diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 2ee4362a384..534457e0992 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,6 +1,6 @@ [bumpversion] current_version = 2.2.1 -files = setup.py src/scancode/__init__.py +files = setup.py src/scancode_config.py commit = False tag = False diff --git a/.gitignore b/.gitignore index 8d35930fb9f..a39bc6d78ee 100644 --- a/.gitignore +++ b/.gitignore @@ -67,3 +67,4 @@ docs/_build # pyenv /.python-version +/man/ diff --git a/configure.bat b/configure.bat index 437c7d77983..8ba282a0275 100644 --- a/configure.bat +++ b/configure.bat @@ -1,6 +1,6 @@ @echo OFF -@rem Copyright (c) 2015 nexB Inc. http://www.nexb.com/ - All rights reserved. +@rem Copyright (c) 2018 nexB Inc. http://www.nexb.com/ - All rights reserved. @rem ################################ @rem # change these variables to customize this script locally @@ -44,7 +44,7 @@ if not exist "c:\python27\python.exe" ( echo Do NOT install Python v3 or any 64 bits edition. echo Instead download Python from this url and see the README.rst file for more details: echo( - echo https://www.python.org/ftp/python/2.7.10/python-2.7.10.msi + echo https://www.python.org/ftp/python/2.7.14/python-2.7.14.msi echo( exit /b 1 ) diff --git a/etc/conf/base.py b/etc/conf/base.py index 6f1b431d30a..d5643fb4246 100644 --- a/etc/conf/base.py +++ b/etc/conf/base.py @@ -5,17 +5,18 @@ import sys - """ Check that we run a supported OS and architecture. """ + def unsupported(platform): print('Unsupported OS/platform %r.' % platform) print('See https://github.com/nexB/scancode-toolkit/ for supported OS/platforms.') print('Enter a ticket https://github.com/nexB/scancode-toolkit/issues asking for support of your OS/platform combo.') sys.exit(1) + if sys.maxsize > 2 ** 32: arch = '64' else: @@ -31,29 +32,12 @@ def unsupported(platform): else: unsupported(sys_platform) - supported_combos = { 'linux': ['32', '64'], - 'win': ['32',], - 'mac': ['64',], + 'win': ['32', ], + 'mac': ['64', ], } arches = supported_combos[os] if arch not in arches: unsupported(os + arch) - - -""" -Re/build the license cache on every configure run. -""" - -def build_license_cache(): - """ - Force a rebuild of the license cache on configure. - """ - from licensedcode import cache - print('* Building license index...') - cache.reindex() - - -build_license_cache() diff --git a/etc/conf/dev/base.py b/etc/conf/dev/base.py index 588a475c0c2..e78bdd149f3 100644 --- a/etc/conf/dev/base.py +++ b/etc/conf/dev/base.py @@ -12,8 +12,8 @@ def setup_dev_mode(): not rely on license data to remain untouched and will always check the license index cache for consistency, rebuilding it if necessary. """ - from scancode import root_dir - with open(os.path.join(root_dir, 'SCANCODE_DEV_MODE'), 'wb') as sdm: + from scancode_config import scancode_root_dir + with open(os.path.join(scancode_root_dir, 'SCANCODE_DEV_MODE'), 'wb') as sdm: sdm.write('This is a tag file to notify that ScanCode is used in development mode.') @@ -21,14 +21,14 @@ def setup_vscode(): """ Add base settings for .vscode """ - from scancode import root_dir + from scancode_config import scancode_root_dir from commoncode.fileutils import create_dir from commoncode.fileutils import copyfile - settings = os.path.join(root_dir, 'etc', 'vscode', 'settings.json') + settings = os.path.join(scancode_root_dir, 'etc', 'vscode', 'settings.json') if os.path.exists(settings): - vscode = os.path.join(root_dir, '.vscode') + vscode = os.path.join(scancode_root_dir, '.vscode') create_dir(vscode) copyfile(settings, vscode) diff --git a/etc/configure.py b/etc/configure.py index 44cada3448a..207e7988f25 100644 --- a/etc/configure.py +++ b/etc/configure.py @@ -64,7 +64,6 @@ import shutil import subprocess - # platform-specific file base names sys_platform = str(sys.platform).lower() on_win = False @@ -79,7 +78,6 @@ raise Exception('Unsupported OS/platform %r' % sys_platform) platform_names = tuple() - # common file basenames for requirements and scripts base = ('base',) @@ -213,7 +211,7 @@ def create_virtualenv(std_python, root_dir, tpp_dirs, quiet=False): def activate(root_dir): """ Activate a virtualenv in the current process.""" - print("* Activating ...") + # print("* Activating...") bin_dir = os.path.join(root_dir, 'bin') activate_this = os.path.join(bin_dir, 'activate_this.py') with open(activate_this) as f: diff --git a/etc/release/release.sh b/etc/release/release.sh index 2e8684d6fe7..a4f03caf795 100755 --- a/etc/release/release.sh +++ b/etc/release/release.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2017 nexB Inc. http://www.nexb.com/ - All rights reserved. +# Copyright (c) 2018 nexB Inc. http://www.nexb.com/ - All rights reserved. # # ScanCode release script @@ -52,17 +52,36 @@ function test_scan { # this is needed for the zip chmod o+x scancode extractcode - # minimal test: update when new scans are available - ./scancode --quiet -lcip apache-2.0.LICENSE test_scan.json - echo "TEST JSON passed: ./scancode --quiet -lcip apache-2.0.LICENSE test_scan.json" - ./scancode --quiet -lcip --format json-pp apache-2.0.LICENSE test_scan.json - echo "TEST JSON-PP passed: ./scancode --quiet -lcip --format json-pp apache-2.0.LICENSE test_scan.json" - ./scancode --quiet -lcip --format html apache-2.0.LICENSE test_scan.html - echo "TEST HTML passed: ./scancode --quiet -lcip --format html apache-2.0.LICENSE test_scan.html" - ./scancode --quiet -lcip --format html-app apache-2.0.LICENSE test_scan_app.html - echo "TEST HTML-APP passed: ./scancode --quiet -lcip --format html-app apache-2.0.LICENSE test_scan_app.html" - ./extractcode --quiet samples/arch - echo "TEST EXTRACTCODE passed: ./extractcode --quiet samples/arch" + # minimal tests: update when new scans are available + cmd="./scancode --quiet -lcip apache-2.0.LICENSE --json test_scan.json" + echo "RUNNING TEST: $cmd" + $cmd + echo "TEST PASSED" + + cmd="./scancode --quiet -lcip apache-2.0.LICENSE --json-pp test_scan.json" + echo "RUNNING TEST: $cmd" + $cmd + echo "TEST PASSED" + + cmd="./scancode --quiet -lcip apache-2.0.LICENSE --output-html test_scan.html" + echo "RUNNING TEST: $cmd" + $cmd + echo "TEST PASSED" + + cmd="./scancode --quiet -lcip apache-2.0.LICENSE --output-html-app test_scan_app.html" + echo "RUNNING TEST: $cmd" + $cmd + echo "TEST PASSED" + + cmd="./scancode --quiet -lcip apache-2.0.LICENSE --output-spdx-tv test_scan.spdx" + echo "RUNNING TEST: $cmd" + $cmd + echo "TEST PASSED" + + cmd="./extractcode --quiet samples/arch" + echo "RUNNING TEST: $cmd" + $cmd + echo "TEST PASSED" # cleanup cd .. diff --git a/etc/scripts/sch2js/sch2js.py b/etc/scripts/sch2js/sch2js.py index e99527a0b2e..812dd2ab4c9 100644 --- a/etc/scripts/sch2js/sch2js.py +++ b/etc/scripts/sch2js/sch2js.py @@ -46,10 +46,8 @@ from schematics.types.compound import ListType from schematics.types.compound import ModelType - __version__ = '1.0.1.patch' - SCHEMATIC_TYPE_TO_JSON_TYPE = { 'NumberType': 'number', 'IntType': 'integer', diff --git a/etc/scripts/synclic.py b/etc/scripts/synclic.py index 7bc7ae63a30..6fdd2902b74 100644 --- a/etc/scripts/synclic.py +++ b/etc/scripts/synclic.py @@ -31,22 +31,25 @@ from collections import OrderedDict import json import os +from os import mkdir +from os.path import exists +from os.path import join import zipfile import click +from os.path import realpath click.disable_unicode_literals_warning = True import requests -from commoncode import fileutils from commoncode import fetch +from commoncode import fileutils import licensedcode -from licensedcode.cache import get_licenses_db from licensedcode.cache import get_index +from licensedcode.cache import get_licenses_db from licensedcode.models import load_licenses from licensedcode.models import License - """ Sync and update the ScanCode licenses against: - the SPDX license list @@ -59,6 +62,7 @@ TRACE_DEEP = False TRACE_FETCH = False + class ExternalLicensesSource(object): """ Base class to provide (including possibly fetch) licenses from an @@ -80,30 +84,30 @@ def __init__(self, src_dir, match_text=False, match_approx=False): """ `src_dir` is where the License objects are dumped. """ - src_dir = os.path.realpath(src_dir) + src_dir = realpath(src_dir) self.src_dir = src_dir self.match_text = match_text self.match_approx = match_approx self.fetched = False - if os.path.exists(src_dir): + if exists(src_dir): # fetch ONLY if the directory is empty self.fetched = True else: - os.mkdir(src_dir) + mkdir(src_dir) self.update_dir = self.src_dir.rstrip('\\/') + '-update' - if not os.path.exists(self.update_dir): - os.mkdir(self.update_dir) + if not exists(self.update_dir): + mkdir(self.update_dir) self.new_dir = self.src_dir.rstrip('\\/') + '-new' - if not os.path.exists(self.new_dir): - os.mkdir(self.new_dir) + if not exists(self.new_dir): + mkdir(self.new_dir) self.del_dir = self.src_dir.rstrip('\\/') + '-del' - if not os.path.exists(self.del_dir): - os.mkdir(self.del_dir) + if not exists(self.del_dir): + mkdir(self.del_dir) self.scancodes_by_key = get_licenses_db() @@ -111,13 +115,15 @@ def __init__(self, src_dir, match_text=False, match_approx=False): for l in self.scancodes_by_key.values() if l.spdx_license_key} - composites_dir = os.path.join(licensedcode.data_dir, 'composites', 'licenses') + composites_dir = join( + licensedcode.models.data_dir, 'composites', 'licenses') self.composites_by_key = load_licenses(composites_dir, with_deprecated=True) self.composites_by_spdx_key = {l.spdx_license_key.lower(): l for l in self.composites_by_key.values() if l.spdx_license_key} - foreign_dir = os.path.join(licensedcode.data_dir, 'non-english', 'licenses') + foreign_dir = join( + licensedcode.models.data_dir, 'non-english', 'licenses') self.non_english_by_key = load_licenses(foreign_dir, with_deprecated=True) self.non_english_by_spdx_key = {l.spdx_license_key.lower(): l for l in self.non_english_by_key.values() @@ -449,8 +455,8 @@ def __init__(self, src_dir, match_text=False, match_approx=False, api_base_url=None, api_key=None): super(DejaSource, self).__init__(src_dir, match_text, match_approx) - self.api_base_url = api_base_url or os.environ.get('DEJACODE_API_URL', None) - self.api_key = api_key or os.environ.get('DEJACODE_API_KEY', None) + self.api_base_url = api_base_url or os.getenv('DEJACODE_API_URL') + self.api_key = api_key or os.getenv('DEJACODE_API_KEY') assert (self.api_key and self.api_base_url), ( 'You must set the DEJACODE_API_URL and DEJACODE_API_KEY ' + @@ -608,11 +614,13 @@ def merge_licenses(scancode_license, other_license, updatable_attributes): (attribute name, value before, value after) """ scancode_updated = [] + def update_sc(_attrib, _sc_val, _o_val): setattr(scancode_license, _attrib, _o_val) scancode_updated.append((_attrib, _sc_val, _o_val)) other_updated = [] + def update_ot(_attrib, _sc_val, _o_val): setattr(other_license, _attrib, _sc_val) other_updated.append((_attrib, _o_val, _sc_val)) @@ -781,7 +789,7 @@ def synchronize_licenses(external_source): if not TRACE:print('.', end='') # Create a new ScanCode license - sc_license = ot_license.relocate(licensedcode.licenses_data_dir, o_key) + sc_license = ot_license.relocate(licensedcode.models.data_dir, o_key) scancodes_added.add(sc_license.key) scancodes_by_key[sc_license.key] = sc_license if TRACE: print('Other license key not in ScanCode:', ot_license.key, 'created in ScanCode.') @@ -793,7 +801,6 @@ def synchronize_licenses(external_source): for k in others_changed | others_added: others_by_key[k].dump() - # TODO: at last: print report of incorrect OTHER licenses to submit # updates eg. make API calls to DejaCode to create or update # licenses and submit review request e.g. submit requests to SPDX diff --git a/etc/scripts/test_json2csv.py b/etc/scripts/test_json2csv.py index 8fcb96f8851..75b17e9cb2f 100644 --- a/etc/scripts/test_json2csv.py +++ b/etc/scripts/test_json2csv.py @@ -208,16 +208,17 @@ class TestJson2CSVWithLiveScans(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata') def test_can_process_scan_from_json_scan(self): - import scancode + from scancode_config import scancode_root_dir from commoncode.command import execute test_dir = self.get_test_loc('livescan/scan') json_file = self.get_temp_file('json') - scan_cmd = os.path.join(scancode.root_dir, 'scancode') + scan_cmd = os.path.join(scancode_root_dir, 'scancode') rc, _stdout, _stderr = execute(scan_cmd, - ['-clip', '--email', '--url', '--strip-root', '--format', 'json', test_dir, json_file]) - assert rc == 0 + ['-clip', '--email', '--url', '--strip-root', test_dir, + '--json', json_file]) result_file = self.get_temp_file('.csv') with open(result_file, 'wb') as rf: json2csv.json_scan_to_csv(json_file, rf) expected_file = self.get_test_loc('livescan/expected.csv') check_csvs(result_file, expected_file, regen=False) + assert rc == 0 diff --git a/etc/scripts/testdata/livescan/expected.csv b/etc/scripts/testdata/livescan/expected.csv index 1e1003996a2..6950b3d7305 100644 --- a/etc/scripts/testdata/livescan/expected.csv +++ b/etc/scripts/testdata/livescan/expected.csv @@ -1,20 +1,20 @@ -Resource,type,name,base_name,extension,date,size,sha1,md5,files_count,mime_type,file_type,programming_language,is_binary,is_text,is_archive,is_media,is_source,is_script,scan_errors,license__key,license__score,license__short_name,license__category,license__owner,license__homepage_url,license__text_url,license__reference_url,license__spdx_license_key,license__spdx_url,start_line,end_line,matched_rule__identifier,matched_rule__license_choice,matched_rule__licenses,copyright,copyright_holder,email,url,package__type,package__name,package__version,package__primary_language,package__summary,package__description,package__size,package__release_date,package__authors,package__homepage_url,package__notes,package__download_urls,package__bug_tracking_url,package__vcs_repository,package__copyright_top_level -/package.json,file,package.json,package,.json,2017-10-03,2200,918376afce796ef90eeda1d6695f2289c90491ac,1f66239a9b850c5e60a9382dbe2162d2,,text/plain,"ASCII text, with very long lines",JSON,False,True,False,False,True,False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,mit,15.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit_27.RULE,False,[u'mit'],,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,mit,100.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit.LICENSE,False,[u'mit'],,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,Copyright (c) 2012 LearnBoost < tj@learnboost.com>,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,,LearnBoost <,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,12,12,,,,,,tj@learnboost.com,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,16,16,,,,,,,https://github.com/visionmedia/node-cookie-signature.git,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,27,27,,,,,,,https://github.com/visionmedia/node-cookie-signature/issues,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TJ Holowaychuk,,,https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.3.tgz,,, -/json2csv.rb,file,json2csv.rb,json2csv,.rb,2017-10-03,1599,6cfb0bd0fb0b784f57164d15bdfca2b734ad87a6,f18e519b77bc7f3e4213215033db3857,,text/x-python,"Python script, ASCII text executable",Ruby,False,True,False,False,True,True,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,apache-2.0,98.45,Apache 2.0,Permissive,Apache Software Foundation,http://www.apache.org/licenses/,http://www.apache.org/licenses/LICENSE-2.0,https://enterprise.dejacode.com/urn/urn:dje:license:apache-2.0,Apache-2.0,https://spdx.org/licenses/Apache-2.0,5,24,apache-2.0_scancode.RULE,False,"[u'apache-2.0', u'scancode-acknowledgment']",,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,scancode-acknowledgment,98.45,ScanCode acknowledgment,Permissive,nexB,https://github.com/nexB/scancode-toolkit/,,https://enterprise.dejacode.com/urn/urn:dje:license:scancode-acknowledgment,,,5,24,apache-2.0_scancode.RULE,False,"[u'apache-2.0', u'scancode-acknowledgment']",,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,Copyright (c) 2017 nexB Inc. and others.,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,,nexB Inc. and others.,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,http://nexb.com/,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,https://github.com/nexB/scancode-toolkit/,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,10,10,,,,,,,http://apache.org/licenses/LICENSE-2.0,,,,,,,,,,,,,,, -/license,file,license,license,,2017-10-03,679,75c5490a718ddd45e40e0cc7ce0c756abc373123,b965a762efb9421cf1bf4405f336e278,,text/plain,ASCII text,,False,True,False,False,False,False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -/license,,,,,,,,,,,,,,,,,,,,gpl-2.0-plus,100.00,GPL 2.0 or later,Copyleft,Free Software Foundation (FSF),http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,https://enterprise.dejacode.com/urn/urn:dje:license:gpl-2.0-plus,GPL-2.0+,https://spdx.org/licenses/GPL-2.0,1,12,gpl-2.0-plus.LICENSE,False,[u'gpl-2.0-plus'],,,,,,,,,,,,,,,,,,, +Resource,type,name,base_name,extension,size,date,sha1,md5,mime_type,file_type,programming_language,is_binary,is_text,is_archive,is_media,is_source,is_script,files_count,dirs_count,size_count,scan_errors,license__key,license__score,license__short_name,license__category,license__owner,license__homepage_url,license__text_url,license__reference_url,license__spdx_license_key,license__spdx_url,start_line,end_line,matched_rule__identifier,matched_rule__license_choice,matched_rule__licenses,copyright,copyright_holder,email,url,package__type,package__name,package__version,package__primary_language,package__summary,package__description,package__size,package__release_date,package__authors,package__homepage_url,package__notes,package__download_urls,package__bug_tracking_url,package__vcs_repository,package__copyright_top_level +/json2csv.rb,file,json2csv.rb,json2csv,.rb,1599,2017-10-03,6cfb0bd0fb0b784f57164d15bdfca2b734ad87a6,f18e519b77bc7f3e4213215033db3857,text/x-python,"Python script, ASCII text executable",Ruby,False,True,False,False,True,True,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,apache-2.0,98.45,Apache 2.0,Permissive,Apache Software Foundation,http://www.apache.org/licenses/,http://www.apache.org/licenses/LICENSE-2.0,https://enterprise.dejacode.com/urn/urn:dje:license:apache-2.0,Apache-2.0,https://spdx.org/licenses/Apache-2.0,5,24,apache-2.0_scancode.RULE,False,"[u'apache-2.0', u'scancode-acknowledgment']",,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,scancode-acknowledgment,98.45,ScanCode acknowledgment,Permissive,nexB,https://github.com/nexB/scancode-toolkit/,,https://enterprise.dejacode.com/urn/urn:dje:license:scancode-acknowledgment,,,5,24,apache-2.0_scancode.RULE,False,"[u'apache-2.0', u'scancode-acknowledgment']",,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,Copyright (c) 2017 nexB Inc. and others.,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,,nexB Inc. and others.,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,http://nexb.com/,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,https://github.com/nexB/scancode-toolkit/,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,10,10,,,,,,,http://apache.org/licenses/LICENSE-2.0,,,,,,,,,,,,,,, +/license,file,license,license,,679,2017-10-03,75c5490a718ddd45e40e0cc7ce0c756abc373123,b965a762efb9421cf1bf4405f336e278,text/plain,ASCII text,,False,True,False,False,False,False,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +/license,,,,,,,,,,,,,,,,,,,,,,gpl-2.0-plus,100.00,GPL 2.0 or later,Copyleft,Free Software Foundation (FSF),http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,https://enterprise.dejacode.com/urn/urn:dje:license:gpl-2.0-plus,GPL-2.0+,https://spdx.org/licenses/GPL-2.0,1,12,gpl-2.0-plus.LICENSE,False,[u'gpl-2.0-plus'],,,,,,,,,,,,,,,,,,, +/package.json,file,package.json,package,.json,2200,2017-10-03,918376afce796ef90eeda1d6695f2289c90491ac,1f66239a9b850c5e60a9382dbe2162d2,text/plain,"ASCII text, with very long lines",JSON,False,True,False,False,True,False,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,mit,15.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit_27.RULE,False,[u'mit'],,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,mit,100.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit.LICENSE,False,[u'mit'],,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,Copyright (c) 2012 LearnBoost < tj@learnboost.com>,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,,LearnBoost <,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,12,12,,,,,,tj@learnboost.com,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,16,16,,,,,,,https://github.com/visionmedia/node-cookie-signature.git,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,27,27,,,,,,,https://github.com/visionmedia/node-cookie-signature/issues,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TJ Holowaychuk,,,https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.3.tgz,,, diff --git a/setup.py b/setup.py index b3d63e9c7eb..4ccbc0ec9ff 100644 --- a/setup.py +++ b/setup.py @@ -161,7 +161,6 @@ def read(*names, **kwargs): 'pygments >= 2.0.1, <3.0.0', # packagedcode - 'attrs >=16.0, < 17.0', 'pymaven-patch >= 0.2.4', 'requests >= 2.7.0, < 3.0.0', 'schematics_patched', @@ -170,6 +169,8 @@ def read(*names, **kwargs): 'click >= 6.0.0, < 7.0.0', 'colorama >= 0.3.9', 'pluggy >= 0.4.0, < 1.0', + 'attrs >=17.0, < 18.0', + 'typing >=3.6, < 3.7', # scancode outputs 'jinja2 >= 2.7.0, < 3.0.0', @@ -199,39 +200,81 @@ def read(*names, **kwargs): 'extractcode = scancode.extract_cli:extractcode', ], - # scancode_output_writers is an entry point to define plugins - # that write a scan output in a given format. - # See the plugincode.output module for details and doc. - # note: the "name" of the entrypoint (e.g "html") becomes the - # ScanCode command line --format option used to enable a given - # format plugin - 'scancode_output_writers': [ - 'html = formattedcode.format_templated:write_html', - 'html-app = formattedcode.format_templated:write_html_app', - 'json = formattedcode.format_json:write_json_compact', - 'json-pp = formattedcode.format_json:write_json_pretty_printed', - 'spdx-tv = formattedcode.format_spdx:write_spdx_tag_value', - 'spdx-rdf = formattedcode.format_spdx:write_spdx_rdf', - 'csv = formattedcode.format_csv:write_csv', - 'jsonlines = formattedcode.format_jsonlines:write_jsonlines', + # scancode_pre_scan is the entry point for pre_scan plugins executed + # before the scans. + # + # Each entry hast this form: + # plugin-name = fully.qualified.module:PluginClass + # where plugin-name must be a unique name for this entrypoint. + # + # See also plugincode.pre_scan module for details and doc. + 'scancode_pre_scan': [ + 'ignore = scancode.plugin_ignore:ProcessIgnore', ], - # scancode_post_scan is an entry point for post_scan_plugins. - # See plugincode.post_scan module for details and doc. - # note: the "name" of the entrypoint (e.g only-findings) - # becomes the ScanCode CLI boolean flag used to enable a - # given post_scan plugin + # scancode_scan is the entry point for scan plugins that run a scan + # after the pre_scan plugins and before the post_scan plugins. + # + # Each entry hast this form: + # plugin-name = fully.qualified.module:PluginClass + # where plugin-name must be a unique name for this entrypoint. + # + # IMPORTANT: The plugin-name is also the "scan key" used in scan results + # for this scanner. + # + # See also plugincode.scan module for details and doc. + 'scancode_scan': [ + 'info = scancode.plugin_info:InfoScanner', + 'licenses = scancode.plugin_license:LicenseScanner', + 'copyrights = scancode.plugin_copyright:CopyrightScanner', + 'packages = scancode.plugin_package:PackageScanner', + 'emails = scancode.plugin_email:EmailScanner', + 'urls = scancode.plugin_url:UrlScanner', + ], + + # scancode_post_scan is the entry point for post_scan plugins executed + # after the scan plugins and before the output plugins. + # + # Each entry hast this form: + # plugin-name = fully.qualified.module:PluginClass + # where plugin-name must be a unique name for this entrypoint. + # + # See also plugincode.post_scan module for details and doc. 'scancode_post_scan': [ - 'only-findings = scancode.plugin_only_findings:process_only_findings', - 'mark-source = scancode.plugin_mark_source:process_mark_source', + 'mark-source = scancode.plugin_mark_source:MarkSource', ], - # scancode_pre_scan is an entry point to define pre_scan plugins. - # See plugincode.pre_scan module for details and doc. - # note: the "name" of the entrypoint (e.g ignore) will be used for - # the option name which passes the input to the given pre_scan plugin - 'scancode_pre_scan': [ - 'ignore = scancode.plugin_ignore:ProcessIgnore', - ] + # scancode_output_filter is the entry point for filter plugins executed + # after the post-scan plugins and used by the output plugins to + # exclude/filter certain files or directories from the codebase. + # + # Each entry hast this form: + # plugin-name = fully.qualified.module:PluginClass + # where plugin-name must be a unique name for this entrypoint. + # + # See also plugincode.post_scan module for details and doc. + 'scancode_output_filter': [ + 'only-findings = scancode.plugin_only_findings:OnlyFindings', + ], + + # scancode_output is the entry point for ouput plugins that write a scan + # output in a given format at the end of a scan. + # + # Each entry hast this form: + # plugin-name = fully.qualified.module:PluginClass + # where plugin-name must be a unique name for this entrypoint. + # + # See also plugincode._output module for details and doc. + 'scancode_output': [ + 'html = formattedcode.output_html:HtmlOutput', + 'html-app = formattedcode.output_html:HtmlAppOutput', + 'json = formattedcode.output_json:JsonCompactOutput', + 'json-pp = formattedcode.output_json:JsonPrettyOutput', + 'spdx-tv = formattedcode.output_spdx:SpdxTvOutput', + 'spdx-rdf = formattedcode.output_spdx:SpdxRdfOutput', + 'csv = formattedcode.output_csv:CsvOutput', + 'jsonlines = formattedcode.output_jsonlines:JsonLinesOutput', + 'template = formattedcode.output_html:CustomTemplateOutput', + ], }, ) diff --git a/src/cluecode/copyrights.py b/src/cluecode/copyrights.py index 28bc3db1c1c..a7bce5f9f99 100644 --- a/src/cluecode/copyrights.py +++ b/src/cluecode/copyrights.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -30,19 +30,19 @@ import os import re - +# importand: this sets re._MAXCACHE import commoncode + from textcode import analysis from cluecode import copyrights_hint - -COPYRIGHT_TRACE = 0 +TRACE = 0 logger = logging.getLogger(__name__) -if os.environ.get('SCANCODE_COPYRIGHT_DEBUG'): +if os.environ.get('SCANCODE_DEBUG_COPYRIGHT'): import sys logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) - COPYRIGHT_TRACE = 0 + TRACE = 1 """ Detect and collect copyright statements. @@ -132,7 +132,6 @@ def detect(location): '\ ' # html entity sometimes are double escaped ')*') # repeated 0 or more times - _YEAR_PUNCT = _YEAR + _PUNCT _YEAR_YEAR_PUNCT = _YEAR_YEAR + _PUNCT _YEAR_SHORT_PUNCT = _YEAR_SHORT + _PUNCT @@ -1068,11 +1067,12 @@ class CopyrightDetector(object): """ Class to detect copyrights and authorship. """ + def __init__(self): from nltk import RegexpTagger from nltk import RegexpParser self.tagger = RegexpTagger(patterns) - self.chunker = RegexpParser(grammar, trace=COPYRIGHT_TRACE) + self.chunker = RegexpParser(grammar, trace=0) @classmethod def as_str(cls, node, ignores=()): @@ -1385,24 +1385,29 @@ def lowercase_well_known_word(text): lines_append(' '.join(words)) return '\n'.join(lines) - # FIXME: instead of using functions, use plain re and let the re cache do its work + def IGNORED_PUNCTUATION_RE(): return re.compile(r'[*#"%\[\]\{\}`]+', re.I | re.M | re.U) + def ASCII_LINE_DECO_RE(): return re.compile(r'[-_=!\\*]{2,}') + def ASCII_LINE_DECO2_RE(): return re.compile(r'/{3,}') + def WHITESPACE_RE(): return re.compile(r' +') + def MULTIQUOTES_RE(): return re.compile(r"\'{2,}") + # TODO: add debian POS name taggings def DEBIAN_COPYRIGHT_TAGS_RE(): return re.compile(r"(\|\)") @@ -1417,7 +1422,7 @@ def prepare_text_line(line): # strip whitespace line = line.strip() - #FIXME: how did we get line returns in this???? + # FIXME: how did we get line returns in this???? line = line.replace('\n', ' ') # remove some junk in man pages: \(co diff --git a/src/cluecode/copyrights_hint.py b/src/cluecode/copyrights_hint.py index 90f2be1be0f..33739cfb368 100644 --- a/src/cluecode/copyrights_hint.py +++ b/src/cluecode/copyrights_hint.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,7 +23,6 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import absolute_import from datetime import datetime @@ -37,7 +36,6 @@ years = r'[\(\.,\-\)\s]+(' + '|'.join(years) + r')[\(\.,\-\)\s]+' years = re.compile(years).findall - statement_markers = u''' © cop @@ -54,7 +52,6 @@ devel '''.split() - # (various copyright/copyleft signs tm, r etc) http://en.wikipedia.org/wiki/Copyright_symbol # ™ U+2122 TRADE MARK SIGN, decimal: 8482, HTML: ™, UTF-8: 0xE2 0x84 0xA2, block: Letterlike Symbols, decomposition: U+0054 U+004D @@ -63,7 +60,6 @@ # � U+00AE (174) # � U+2122 (8482) - '''HTML Entity (decimal) © HTML Entity (hex) © HTML Entity (named) © @@ -79,13 +75,11 @@ Python source code u"\u00A9" ''' - end_of_statement = ''' rights reserve right reserve '''.split() - # others stuffs ''' ® diff --git a/src/cluecode/finder.py b/src/cluecode/finder.py index a1bc5aee525..5ba20343b63 100644 --- a/src/cluecode/finder.py +++ b/src/cluecode/finder.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,22 +22,36 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function -import logging import string import re import url as urlpy import ipaddress -from textcode import analysis from cluecode import finder_data +from textcode import analysis + +# Tracing flags +TRACE = False -LOG = logging.getLogger(__name__) +def logger_debug(*args): + pass -DEBUG = False + +if TRACE: + import logging + import sys + logger = logging.getLogger(__name__) + # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) + logging.basicConfig(stream=sys.stdout) + logger.setLevel(logging.DEBUG) + + def logger_debug(*args): + return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) """ Find patterns in text lines such as a emails and URLs. @@ -53,18 +67,18 @@ def find(location, patterns): Note: the location can be a list of lines for testing convenience. """ - if DEBUG: + if TRACE: from pprint import pformat loc = pformat(location) - print('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals()) + logger_debug('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals()) for i, line in enumerate(analysis.text_lines(location)): lineno = i + 1 for key, pattern in patterns: for match in pattern.findall(line): - if DEBUG: - print('find: yielding match: key=%(key)r, ' + if TRACE: + logger_debug('find: yielding match: key=%(key)r, ' 'match=%(match)r,\n line=%(line)r' % locals()) yield key, unicode(match), line, lineno @@ -110,11 +124,12 @@ def build_regex_filter(pattern): Return a filter function using regex pattern, filtering out matches matching this regex. The pattern should be text, not a compiled re. """ + def re_filt(matches): for key, match, line, lineno in matches: if re.match(regex, match): - if DEBUG: - print('build_regex_filter(pattern=%(pattern)r: ' + if TRACE: + logger_debug('build_regex_filter(pattern=%(pattern)r: ' 'filtering match: %(match)r' % locals()) continue yield key, match, line, lineno @@ -122,7 +137,6 @@ def re_filt(matches): regex = re.compile(pattern, re.UNICODE | re.I) return re_filt - # A good reference page of email address regex is: # http://fightingforalostcause.net/misc/2006/compare-email-regex.php email # regex from http://www.regular-expressions.info/regexbuddy/email.html @@ -172,7 +186,6 @@ def uninteresting_emails_filter(matches): continue yield key, email, line, lineno - # TODO: consider: http://www.regexguru.com/2008/11/detecting-urls-in-a-block-of-text/ # TODO: consider: http://blog.codinghorror.com/the-problem-with-urls/ @@ -180,6 +193,7 @@ def uninteresting_emails_filter(matches): schemes = 'https?|ftps?|sftp|rsync|ssh|svn|git|hg|https?\+git|https?\+svn|https?\+hg' url_body = '[^\s<>\[\]"]' + def urls_regex(): # no space, no < >, no [ ] and no double quote return re.compile(r''' @@ -237,8 +251,8 @@ def empty_urls_filter(matches): for key, match, line, lineno in matches: junk = match.lower().strip(string.punctuation).strip() if not junk or junk in EMPTY_URLS: - if DEBUG: - print('empty_urls_filter: filtering match: %(match)r' + if TRACE: + logger_debug('empty_urls_filter: filtering match: %(match)r' % locals()) continue yield key, match, line, lineno @@ -328,8 +342,8 @@ def user_pass_cleaning_filter(matches): if is_filterable(match): host, _domain = url_host_domain(match) if not host: - if DEBUG: - print('user_pass_cleaning_filter: ' + if TRACE: + logger_debug('user_pass_cleaning_filter: ' 'filtering match(no host): %(match)r' % locals()) continue if '@' in host: @@ -362,14 +376,15 @@ def canonical_url_cleaner(matches): for key, match, line, lineno in matches: if is_filterable(match): match = canonical_url(match) - if DEBUG: - print('canonical_url_cleaner: ' + if TRACE: + logger_debug('canonical_url_cleaner: ' 'match=%(match)r, canonic=%(canonic)r' % locals()) yield key, match , line, lineno IP_V4_RE = r'^(\d{1,3}\.){0,3}\d{1,3}$' + def is_ip_v4(s): return re.compile(IP_V4_RE).match(s) @@ -449,7 +464,6 @@ def is_good_host(host): return False return finder_data.classify_ip(host) - # at this stage we have a host name, not an IP if '.' not in host: @@ -484,14 +498,14 @@ def junk_url_hosts_filter(matches): if is_filterable(match): host, domain = url_host_domain(match) if not is_good_host(host): - if DEBUG: - print('junk_url_hosts_filter: ' + if TRACE: + logger_debug('junk_url_hosts_filter: ' '!is_good_host:%(host)r): %(match)r' % locals()) continue if not is_good_host(domain) and not is_ip(host): - if DEBUG: - print('junk_url_hosts_filter: ''!is_good_host:%(domain)r ' + if TRACE: + logger_debug('junk_url_hosts_filter: ''!is_good_host:%(domain)r ' 'and !is_ip:%(host)r: %(match)r' % locals()) continue yield key, match, line, lineno @@ -506,8 +520,8 @@ def junk_urls_filter(matches): for key, match, line, lineno in matches: good_url = finder_data.classify_url(match) if not good_url: - if DEBUG: - print('junk_url_filter: %(match)r' % locals()) + if TRACE: + logger_debug('junk_url_filter: %(match)r' % locals()) continue yield key, match, line, lineno diff --git a/src/cluecode/finder_data.py b/src/cluecode/finder_data.py index cbecb6533ce..3baf0a8fef2 100644 --- a/src/cluecode/finder_data.py +++ b/src/cluecode/finder_data.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -40,7 +40,6 @@ def set_from_text(text): test.com ''') - JUNK_HOSTS_AND_DOMAINS = set_from_text(u''' exmaple.com example.com @@ -56,12 +55,10 @@ def set_from_text(text): hostname ''') - JUNK_IPS = set_from_text(u''' 1.2.3.4 ''') - JUNK_URLS = set_from_text(u''' http://www.adobe.com/2006/mxml http://www.w3.org/1999/XSL/Transform @@ -134,7 +131,6 @@ def set_from_text(text): http://gcc.gnu.org/bugs.html ''') - JUNK_URL_PREFIXES = tuple(set_from_text(''' http://www.springframework.org/dtd/ http://www.slickedit.com/dtd/ @@ -175,7 +171,6 @@ def set_from_text(text): http://www.oasis-open.org/docbook/xml/ ''')) - JUNK_URL_SUFFIXES = tuple(set_from_text(''' .png .jpg diff --git a/src/commoncode/__init__.py b/src/commoncode/__init__.py index 096c946c0db..702495e4b65 100644 --- a/src/commoncode/__init__.py +++ b/src/commoncode/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -26,19 +26,22 @@ from __future__ import print_function from __future__ import unicode_literals -# set re and fnmatch _MAXCACHE to 1M to cache regex compiled aggressively -# their default is 100 and many utilities and libraries use a lot of regex -import re -remax = getattr(re, '_MAXCACHE', 0) -if remax < 1000000: - setattr(re, '_MAXCACHE', 1000000) -del remax +def set_re_max_cache(max_cache=1000000): + """ + Set re and fnmatch _MAXCACHE to 1M to cache regex compiled aggressively + their default is 100 and many utilities and libraries use a lot of regex + """ + import re + import fnmatch -import fnmatch + remax = getattr(re, '_MAXCACHE', 0) + if remax < max_cache: + setattr(re, '_MAXCACHE', max_cache) -fnmatchmax = getattr(fnmatch, '_MAXCACHE', 0) -if fnmatchmax < 1000000: - setattr(fnmatch, '_MAXCACHE', 1000000) -del fnmatchmax -del re + fnmatchmax = getattr(fnmatch, '_MAXCACHE', 0) + if fnmatchmax < max_cache: + setattr(fnmatch, '_MAXCACHE', max_cache) + + +set_re_max_cache() diff --git a/src/commoncode/command.py b/src/commoncode/command.py index ba1b6fd8e53..4594091cabb 100644 --- a/src/commoncode/command.py +++ b/src/commoncode/command.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -27,14 +27,21 @@ from __future__ import unicode_literals import ctypes -import os +import os as _os_module +from os.path import abspath +from os.path import exists +from os.path import dirname +from os.path import join + import logging import signal import subprocess -from commoncode import fileutils -from commoncode.fileutils import path_to_bytes -from commoncode.fileutils import path_to_unicode +from commoncode.fileutils import chmod +from commoncode.fileutils import fsencode +from commoncode.fileutils import fsdecode +from commoncode.fileutils import get_temp_dir +from commoncode.fileutils import RX from commoncode import text from commoncode import system from commoncode.system import current_os_arch @@ -43,21 +50,14 @@ from commoncode.system import on_windows from commoncode.system import on_linux - # Python 2 and 3 support try: # Python 2 unicode - str = unicode + str = unicode # NOQA except NameError: # Python 3 - unicode = str - -try: - from os import fsencode -except ImportError: - from backports.os import fsencode - + unicode = str # NOQA """ Minimal wrapper for executing external commands in sub-processes. The approach @@ -81,7 +81,7 @@ # logger.setLevel(logging.DEBUG) # current directory is the root dir of this library -curr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +curr_dir = dirname(dirname(abspath(__file__))) def execute(cmd, args, root_dir=None, cwd=None, env=None, to_files=False): @@ -108,9 +108,9 @@ def execute(cmd, args, root_dir=None, cwd=None, env=None, to_files=False): cwd = cwd or curr_dir # temp files for stderr and stdout - tmp_dir = fileutils.get_temp_dir(base_dir='cmd') - sop = os.path.join(tmp_dir, 'stdout') - sep = os.path.join(tmp_dir, 'stderr') + tmp_dir = get_temp_dir(prefix='scancode-cmd-') + sop = join(tmp_dir, 'stdout') + sep = join(tmp_dir, 'stderr') # shell==True is DANGEROUS but we are not running arbitrary commands # though we can execute command that just happen to be in the path @@ -144,7 +144,7 @@ def os_arch_dir(root_dir, _os_arch=current_os_arch): Return a sub-directory of `root_dir` tailored for the current OS and current processor architecture. """ - return os.path.join(root_dir, _os_arch) + return join(root_dir, _os_arch) def os_noarch_dir(root_dir, _os_noarch=current_os_noarch): @@ -152,7 +152,7 @@ def os_noarch_dir(root_dir, _os_noarch=current_os_noarch): Return a sub-directory of `root_dir` tailored for the current OS and NOT specific to a processor architecture. """ - return os.path.join(root_dir, _os_noarch) + return join(root_dir, _os_noarch) def noarch_dir(root_dir, _noarch=noarch): @@ -160,7 +160,7 @@ def noarch_dir(root_dir, _noarch=noarch): Return a sub-directory of `root_dir` that is NOT specific to an OS or processor architecture. """ - return os.path.join(root_dir, _noarch) + return join(root_dir, _noarch) def get_base_dirs(root_dir, @@ -185,14 +185,14 @@ def get_base_dirs(root_dir, binary of any given binary. This function resolves to an actual OS/arch location in this context. """ - if not root_dir or not os.path.exists(root_dir): + if not root_dir or not exists(root_dir): return [] dirs = [] def find_loc(fun, arg): loc = fun(root_dir, arg) - if os.path.exists(loc): + if exists(loc): dirs.append(loc) if _os_arch: @@ -217,17 +217,17 @@ def get_bin_lib_dirs(base_dir): if not base_dir: return None, None - bin_dir = os.path.join(base_dir, 'bin') + bin_dir = join(base_dir, 'bin') - if os.path.exists(bin_dir): - fileutils.chmod(bin_dir, fileutils.RX, recurse=True) + if exists(bin_dir): + chmod(bin_dir, RX, recurse=True) else: bin_dir = None - lib_dir = os.path.join(base_dir, 'lib') + lib_dir = join(base_dir, 'lib') - if os.path.exists(lib_dir): - fileutils.chmod(bin_dir, fileutils.RX, recurse=True) + if exists(lib_dir): + chmod(bin_dir, RX, recurse=True) else: # default to bin for lib if it exists lib_dir = bin_dir or None @@ -291,9 +291,9 @@ def get_locations(cmd, root_dir, for base_dir in get_base_dirs(root_dir, _os_arch, _os_noarch, _noarch): bin_dir, lib_dir = get_bin_lib_dirs(base_dir) - cmd_loc = os.path.join(bin_dir, cmd) - if os.path.exists(cmd_loc): - fileutils.chmod(cmd_loc, fileutils.RX, recurse=False) + cmd_loc = join(bin_dir, cmd) + if exists(cmd_loc): + chmod(cmd_loc, RX, recurse=False) return cmd_loc, bin_dir, lib_dir else: # we just care for getting the dirs and grab the first one @@ -326,7 +326,7 @@ def close_pipe(p): try: # Ensure process death otherwise proc.wait may hang in some cases # NB: this will run only on POSIX OSes supporting signals - os.kill(proc.pid, signal.SIGKILL) # @UndefinedVariable + os.kill(proc.pid, signal.SIGKILL) # NOQA except: pass @@ -341,12 +341,12 @@ def load_lib(libname, root_dir): """ os_dir = get_base_dirs(root_dir)[0] _bin_dir, lib_dir = get_bin_lib_dirs(os_dir) - so = os.path.join(lib_dir, libname + system.lib_ext) + so = join(lib_dir, libname + system.lib_ext) # add lib path to the front of the PATH env var update_path_environment(lib_dir) - if os.path.exists(so): + if exists(so): if not isinstance(so, bytes): # ensure that the path is not Unicode... so = fsencode(so) @@ -356,7 +356,7 @@ def load_lib(libname, root_dir): raise ImportError('Failed to load %(libname)s from %(so)r' % locals()) -def update_path_environment(new_path, _os_module=os): +def update_path_environment(new_path, _os_module=_os_module): """ Update the PATH environment variable by adding `new_path` to the front of PATH if `new_path` is not alreday in the PATH. @@ -379,12 +379,12 @@ def update_path_environment(new_path, _os_module=os): # ensure we use unicode or bytes depending on OSes if on_linux: - new_path = path_to_bytes(new_path) - path_env = path_to_bytes(path_env) + new_path = fsencode(new_path) + path_env = fsencode(path_env) sep = _os_module.pathsep else: - new_path = path_to_unicode(new_path) - path_env = path_to_unicode(path_env) + new_path = fsdecode(new_path) + path_env = fsdecode(path_env) sep = unicode(_os_module.pathsep) path_segments = path_env.split(sep) @@ -399,6 +399,6 @@ def update_path_environment(new_path, _os_module=os): if not on_linux: # recode to bytes using FS encoding - new_path_env = path_to_bytes(new_path_env) + new_path_env = fsencode(new_path_env) # ... and set the variable back as bytes _os_module.environ[b'PATH'] = new_path_env diff --git a/src/commoncode/dict_utils.py b/src/commoncode/dict_utils.py index d9df72d7af8..5c71159e0af 100644 --- a/src/commoncode/dict_utils.py +++ b/src/commoncode/dict_utils.py @@ -36,13 +36,11 @@ import collections import itertools - # Placeholder constants FREE = -1 DUMMY = -2 - class Dict(collections.MutableMapping): """ Space efficient dictionary with fast iteration and cheap resizes. diff --git a/src/commoncode/fetch.py b/src/commoncode/fetch.py index 6f4c34ae0a3..0a1e656a8ab 100644 --- a/src/commoncode/fetch.py +++ b/src/commoncode/fetch.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -35,7 +35,6 @@ from commoncode import fileutils import os - logger = logging.getLogger(__name__) # import sys # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) @@ -65,7 +64,7 @@ def download_url(url, file_name=None, verify=True, timeout=10): logger.error(msg) raise Exception(msg) - tmp_dir = fileutils.get_temp_dir(base_dir='fetch') + tmp_dir = fileutils.get_temp_dir(prefix='scancode-fetch-') output_file = os.path.join(tmp_dir, file_name) with open(output_file, 'wb') as out: out.write(response.content) diff --git a/src/commoncode/fileset.py b/src/commoncode/fileset.py index 589feb88922..b36e2d51e07 100644 --- a/src/commoncode/fileset.py +++ b/src/commoncode/fileset.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -34,18 +34,15 @@ from commoncode import paths from commoncode.system import on_linux - DEBUG = False logger = logging.getLogger(__name__) # import sys # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) # logger.setLevel(logging.DEBUG) - POSIX_PATH_SEP = b'/' if on_linux else '/' EMPTY_STRING = b'' if on_linux else '' - """ Match files and directories paths based on inclusion and exclusion glob-style patterns. diff --git a/src/commoncode/filetype.py b/src/commoncode/filetype.py index 9e24e00b12d..ca4db6f3117 100644 --- a/src/commoncode/filetype.py +++ b/src/commoncode/filetype.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -33,11 +33,11 @@ from commoncode.system import on_posix from commoncode.functional import memoize - """ Low level file type utilities, essentially a wrapper around os.path and stat. """ + def is_link(location): """ Return True if `location` is a symbolic link. @@ -192,6 +192,7 @@ def get_last_modified_date(location): 'file_size': os.path.getsize, } + @memoize def counter(location, counting_function): """ diff --git a/src/commoncode/fileutils.py b/src/commoncode/fileutils.py index a0c236212e1..9f45aee8b4a 100644 --- a/src/commoncode/fileutils.py +++ b/src/commoncode/fileutils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,24 +23,24 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import unicode_literals from __future__ import print_function +from __future__ import unicode_literals # Python 2 and 3 support try: # Python 2 unicode - str = unicode + str = unicode # NOQA except NameError: # Python 3 - unicode = str + unicode = str # NOQA try: from os import fsencode + from os import fsdecode except ImportError: from backports.os import fsencode - from backports.os import fsdecode - + from backports.os import fsdecode # NOQA import codecs import errno @@ -52,19 +52,21 @@ import sys import tempfile +try: + from scancode_config import scancode_temp_dir +except ImportError: + scancode_temp_dir = None from commoncode import filetype from commoncode.filetype import is_rwx -from commoncode import system from commoncode.system import on_linux from commoncode import text # this exception is not available on posix try: - WindowsError # @UndefinedVariable + WindowsError # NOQA except NameError: - WindowsError = None # @ReservedAssignment - + WindowsError = None # NOQA TRACE = False @@ -72,9 +74,11 @@ logger = logging.getLogger(__name__) + def logger_debug(*args): pass + if TRACE: logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) @@ -82,7 +86,6 @@ def logger_debug(*args): def logger_debug(*args): return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) - # Paths can only be sanely handled as raw bytes on Linux PATH_TYPE = bytes if on_linux else unicode POSIX_PATH_SEP = b'/' if on_linux else '/' @@ -98,6 +101,7 @@ def logger_debug(*args): # DIRECTORIES # + def create_dir(location): """ Create directory and all sub-directories recursively at location ensuring these @@ -115,7 +119,7 @@ def create_dir(location): # FIXME: consider using UNC ?\\ paths if on_linux: - location = path_to_bytes(location) + location = fsencode(location) try: os.makedirs(location) chmod(location, RW, recurse=False) @@ -137,37 +141,39 @@ def create_dir(location): raise -def system_temp_dir(): +def get_temp_dir(base_dir=scancode_temp_dir, prefix=''): """ - Return the global temp directory for the current user. + Return the path to a new existing unique temporary directory, created under + the `base_dir` base directory using the `prefix` prefix. + If `base_dir` is not provided, use the 'SCANCODE_TMP' env var or the system + temp directory. + + WARNING: do not change this code without changing scancode_config.py too """ - temp_dir = os.getenv('SCANCODE_TMP') - if not temp_dir: - sc = text.python_safe_name('scancode_' + system.username) - temp_dir = os.path.join(tempfile.gettempdir(), sc) - if on_linux: - temp_dir = path_to_bytes(temp_dir) - create_dir(temp_dir) - return temp_dir + has_base = bool(base_dir) + if not has_base: + base_dir = os.getenv('SCANCODE_TMP') + if not base_dir: + base_dir = tempfile.gettempdir() + else: + if on_linux: + base_dir = fsencode(base_dir) + create_dir(base_dir) + + if not has_base: + prefix = 'scancode-tk-' -def get_temp_dir(base_dir, prefix=''): - """ - Return the path to a new unique temporary directory, created under - the system-wide `system_temp_dir` temp directory as a subdir of the - base_dir path (a path relative to the `system_temp_dir`). - """ if on_linux: - base_dir = path_to_bytes(base_dir) - prefix = path_to_bytes(prefix) - base = os.path.join(system_temp_dir(), base_dir) - create_dir(base) - return tempfile.mkdtemp(prefix=prefix, dir=base) + prefix = fsencode(prefix) + + return tempfile.mkdtemp(prefix=prefix, dir=base_dir) # # FILE READING # + def file_chunks(file_object, chunk_size=1024): """ Yield a file piece by piece. Default chunk size: 1k. @@ -190,7 +196,7 @@ def _text(location, encoding, universal_new_lines=True): Python2.6 see http://bugs.python.org/issue691291 """ if on_linux: - location = path_to_bytes(location) + location = fsencode(location) with codecs.open(location, 'r', encoding) as f: text = f.read() if universal_new_lines: @@ -215,25 +221,6 @@ def read_text_file(location, universal_new_lines=True): # TODO: move these functions to paths.py or codecs.py -def path_to_unicode(path): - """ - Return a path string `path` as a unicode string. - """ - if isinstance(path, unicode): - return path - if TRACE: logger_debug('path_to_unicode:', fsdecode(path)) - return fsdecode(path) - - -def path_to_bytes(path): - """ - Return a `path` string as a byte string using the filesystem encoding. - """ - if isinstance(path, bytes): - return path - if TRACE: logger_debug('path_to_bytes:' , repr(fsencode(path))) - return fsencode(path) - def is_posixpath(location): """ @@ -328,6 +315,53 @@ def file_extension(path, force_posix=False): return splitext(path, force_posix)[1] +def splitext_name(file_name, is_file=True): + """ + Return a tuple of Unicode strings (basename, extension) for a file name. The + basename is the file name minus its extension. Return an empty extension + string for a directory. Not the same as os.path.splitext_name. + + For example: + >>> expected = 'path', '.ext' + >>> assert expected == splitext_name('path.ext') + + Directories even with dotted names have no extension: + >>> expected = 'path.ext', '' + >>> assert expected == splitext_name('path.ext', is_file=False) + + >>> expected = 'file', '.txt' + >>> assert expected == splitext_name('file.txt') + + Composite extensions for tarballs are properly handled: + >>> expected = 'archive', '.tar.gz' + >>> assert expected == splitext_name('archive.tar.gz') + + dotfile are properly handled: + >>> expected = '.dotfile', '' + >>> assert expected == splitext_name('.dotfile') + >>> expected = '.dotfile', '.this' + >>> assert expected == splitext_name('.dotfile.this') + """ + + if not file_name: + return '', '' + file_name = fsdecode(file_name) + + if not is_file: + return file_name, '' + + if file_name.startswith('.') and '.' not in file_name[1:]: + # .dot files base name is the full name and they do not have an extension + return file_name, '' + + base_name, extension = posixpath.splitext(file_name) + # handle composed extensions of tar.gz, bz, zx,etc + if base_name.endswith('.tar'): + base_name, extension2 = posixpath.splitext(base_name) + extension = extension2 + extension + return base_name, extension + +# TODO: FIXME: this is badly broken!!!! def splitext(path, force_posix=False): """ Return a tuple of strings (basename, extension) for a path. The basename is @@ -382,6 +416,7 @@ def splitext(path, force_posix=False): # DIRECTORY AND FILES WALKING/ITERATION # + ignore_nothing = lambda _: False @@ -397,7 +432,7 @@ def walk(location, ignored=ignore_nothing): - location is a directory or a file: for a file, the file is returned. """ if on_linux: - location = path_to_bytes(location) + location = fsencode(location) # TODO: consider using the new "scandir" module for some speed-up. if TRACE: @@ -432,60 +467,28 @@ def walk(location, ignored=ignore_nothing): yield tripple -def file_iter(location, ignored=ignore_nothing): - """ - Return an iterable of files at `location` recursively. - - :param location: a file or a directory. - :param ignored: a callable accepting a location argument and returning True - if the location should be ignored. - :return: an iterable of file locations. - """ - if on_linux: - location = path_to_bytes(location) - - return resource_iter(location, ignored, with_dirs=False) - - -def dir_iter(location, ignored=ignore_nothing): +def resource_iter(location, ignored=ignore_nothing, with_dirs=True): """ - Return an iterable of directories at `location` recursively. - - :param location: a directory. - :param ignored: a callable accepting a location argument and returning True - if the location should be ignored. - :return: an iterable of directory locations. - """ - if on_linux: - location = path_to_bytes(location) - return resource_iter(location, ignored, with_files=False) - - -def resource_iter(location, ignored=ignore_nothing, with_files=True, with_dirs=True): - """ - Return an iterable of resources at `location` recursively. + Return an iterable of paths at `location` recursively. :param location: a file or a directory. :param ignored: a callable accepting a location argument and returning True if the location should be ignored. - :param with_dirs: If True, include the directories. - :param with_files: If True, include the files. :return: an iterable of file and directory locations. """ - assert with_dirs or with_files, "fileutils.resource_iter: One or both of 'with_dirs' and 'with_files' is required" if on_linux: - location = path_to_bytes(location) + location = fsencode(location) for top, dirs, files in walk(location, ignored): - if with_files: - for f in files: - yield os.path.join(top, f) if with_dirs: for d in dirs: yield os.path.join(top, d) + for f in files: + yield os.path.join(top, f) # # COPY # + def copytree(src, dst): """ Copy recursively the `src` directory to the `dst` directory. If `dst` is an @@ -501,8 +504,8 @@ def copytree(src, dst): function. See fileutils.py.ABOUT for details. """ if on_linux: - src = path_to_bytes(src) - dst = path_to_bytes(dst) + src = fsencode(src) + dst = fsencode(dst) if not filetype.is_readable(src): chmod(src, R, recurse=False) @@ -550,8 +553,8 @@ def copyfile(src, dst): for details. """ if on_linux: - src = path_to_bytes(src) - dst = path_to_bytes(dst) + src = fsencode(src) + dst = fsencode(dst) if not filetype.is_regular(src): return @@ -571,8 +574,8 @@ def copytime(src, dst): for details. """ if on_linux: - src = path_to_bytes(src) - dst = path_to_bytes(dst) + src = fsencode(src) + dst = fsencode(dst) errors = [] st = os.stat(src) @@ -591,6 +594,7 @@ def copytime(src, dst): # PERMISSIONS # + # modes: read, write, executable R = stat.S_IRUSR RW = stat.S_IRUSR | stat.S_IWUSR @@ -608,7 +612,7 @@ def chmod(location, flags, recurse=False): if not location or not os.path.exists(location): return if on_linux: - location = path_to_bytes(location) + location = fsencode(location) location = os.path.abspath(location) @@ -638,7 +642,7 @@ def chmod_tree(location, flags): Update permissions recursively in a directory tree `location`. """ if on_linux: - location = path_to_bytes(location) + location = fsencode(location) if filetype.is_dir(location): for top, dirs, files in walk(location): for d in dirs: @@ -650,13 +654,14 @@ def chmod_tree(location, flags): # DELETION # -def _rm_handler(function, path, excinfo): # @UnusedVariable + +def _rm_handler(function, path, excinfo): # NOQA """ shutil.rmtree handler invoked on error when deleting a directory tree. This retries deleting once before giving up. """ if on_linux: - path = path_to_bytes(path) + path = fsencode(path) if function == os.rmdir: try: chmod(path, RW, recurse=True) @@ -686,7 +691,7 @@ def delete(location, _err_handler=_rm_handler): return if on_linux: - location = path_to_bytes(location) + location = fsencode(location) if os.path.exists(location) or filetype.is_broken_link(location): chmod(os.path.dirname(location), RW, recurse=False) diff --git a/src/commoncode/functional.py b/src/commoncode/functional.py index 1175f98fd6c..93049018a06 100644 --- a/src/commoncode/functional.py +++ b/src/commoncode/functional.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -37,6 +37,7 @@ def flatten(seq): flat list of elements. For example:: + >>> flatten([7, (6, [5, [4, ['a'], 3]], 3), 2, 1]) [7, 6, 5, 4, 'a', 3, 3, 2, 1] >>> def gen(): @@ -68,6 +69,7 @@ def pair_chunks(iterable): must contain an even number of elements or it will truncated. For example:: + >>> list(pair_chunks([1, 2, 3, 4, 5, 6])) [(1, 2), (3, 4), (5, 6)] >>> list(pair_chunks([1, 2, 3, 4, 5, 6, 7])) @@ -78,10 +80,11 @@ def pair_chunks(iterable): def memoize(fun): """ - Decorate fun function and cache return values. Arguments must be - hashable. kwargs are not handled. Used to speed up some often executed - functions. - Usage example:: + Decorate `fun` function and cache return values. Arguments must be hashable. + Only args are supported, kwargs are not handled. Used to speed up some often + executed functions. + + For example:: >>> @memoize ... def expensive(*args, **kwargs): @@ -114,7 +117,7 @@ def memoized(*args, **kwargs): # calls with kwargs are not handled and not cached if kwargs: return fun(*args, **kwargs) - # convert any list arg to a tuple + # convert any list args to a tuple args = tuple(tuple(arg) if isinstance(arg, (ListType, tuple, array)) else arg for arg in args) try: @@ -128,10 +131,11 @@ def memoized(*args, **kwargs): def memoize_to_attribute(attr_name, _test=False): """ - Decorate a method and cache return values in attr_name of the parent object. + Decorate a method and cache return values in `attr_name` of the parent object. Used to speed up some often called methods that cache their values in instance variables. - Usage example:: + + For example:: >>> class Obj(object): ... def __init__(self): @@ -153,7 +157,9 @@ def memoize_to_attribute(attr_name, _test=False): The Obj().expensive property value will be cached to attr_name self._expensive and computed only once in the life of the Obj instance. """ + def memoized_to_attr(meth): + @functools.wraps(meth) def wrapper(self, *args, **kwargs): if getattr(self, attr_name) is None: @@ -162,6 +168,7 @@ def wrapper(self, *args, **kwargs): else: res = getattr(self, attr_name) return res + return wrapper return memoized_to_attr @@ -169,10 +176,11 @@ def wrapper(self, *args, **kwargs): def memoize_gen(fun): """ - Decorate fun generator function and cache return values. Arguments must be + Decorate `fun` generator function and cache return values. Arguments must be hashable. kwargs are not handled. Used to speed up some often executed functions. - Usage example:: + + For example:: >>> @memoize ... def expensive(*args, **kwargs): @@ -215,3 +223,34 @@ def memoized(*args, **kwargs): return memos[args] return functools.update_wrapper(memoized, fun) + + +def iter_skip(iterable, skip_first=False, skip_last=False): + """ + Given an iterable, return an iterable skipping the first item if skip_first + is True or the last item if skip_last is True. + For example: + >>> a = iter(range(10)) + >>> list(iter_skip(a, skip_first=True, skip_last=False)) + [1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> a = iter(range(10)) + >>> list(iter_skip(a, skip_first=False, skip_last=True)) + [0, 1, 2, 3, 4, 5, 6, 7, 8] + >>> a = iter(range(10)) + >>> list(iter_skip(a, skip_first=True, skip_last=True)) + [1, 2, 3, 4, 5, 6, 7, 8] + >>> a = iter(range(10)) + >>> list(iter_skip(a, skip_first=False, skip_last=False)) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> a = iter(range(10)) + >>> list(iter_skip(a)) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + """ + current = next(iterable) + if skip_first: + current = next(iterable) + for item in iterable: + yield current + current = item + if not skip_last: + yield current diff --git a/src/commoncode/hash.py b/src/commoncode/hash.py index d8f9ab94feb..d7b3f48ec40 100644 --- a/src/commoncode/hash.py +++ b/src/commoncode/hash.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -33,7 +33,6 @@ from commoncode.codec import urlsafe_b64encode from commoncode import filetype - """ Hashes and checksums. @@ -44,12 +43,15 @@ Checksums are operating on files. """ + def _hash_mod(bitsize, hmodule): """ Return a hashing class returning hashes with a `bitsize` bit length. The interface of this class is similar to the hash module API. """ + class hasher(object): + def __init__(self, msg=None): self.digest_size = bitsize // 8 self.h = msg and hmodule(msg).digest()[:self.digest_size] or None @@ -94,6 +96,7 @@ class sha1_git_hasher(object): """ Hash content using the git blob SHA1 convention. """ + def __init__(self, msg=None): self.digest_size = 160 // 8 self.h = msg and self._compute(msg) or None @@ -148,18 +151,23 @@ def checksum(location, name, base64=False): def md5(location): return checksum(location, name='md5', base64=False) + def sha1(location): return checksum(location, name='sha1', base64=False) + def b64sha1(location): return checksum(location, name='sha1', base64=True) + def sha256(location): return checksum(location, name='sha256', base64=False) + def sha512(location): return checksum(location, name='sha512', base64=False) + def sha1_git(location): return checksum(location, name='sha1_git', base64=False) diff --git a/src/commoncode/ignore.py b/src/commoncode/ignore.py index c4a86be930b..d04e4892342 100644 --- a/src/commoncode/ignore.py +++ b/src/commoncode/ignore.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -36,7 +36,7 @@ """ -def is_ignored(location, ignores, unignores, skip_special=True): +def is_ignored(location, ignores, unignores=None, skip_special=True): """ Return a tuple of (pattern , message) if a file at location is ignored or False otherwise. @@ -74,6 +74,7 @@ def get_ignores(location, include_defaults=True): # Default ignores # + ignores_MacOSX = { '.DS_Store': 'Default ignore: MacOSX artifact', '._.DS_Store': 'Default ignore: MacOSX artifact', @@ -293,7 +294,6 @@ def get_ignores(location, include_defaults=True): '/.ssh': 'Default ignore: SSH configuration', } - default_ignores = {} default_ignores.update(chain(*[d.items() for d in [ diff --git a/src/commoncode/misc.ABOUT b/src/commoncode/misc.ABOUT deleted file mode 100644 index 5cd542f93b1..00000000000 --- a/src/commoncode/misc.ABOUT +++ /dev/null @@ -1,8 +0,0 @@ -about_resource: misc.py -download_url: - - http://code.activestate.com/recipes/578433-mixin-for-pickling-objects-with-__slots__/ - -dje_license: mit -license_text_file: misc.LICENSE -copyright: Copyright (c) 2013 Oren Tirosh -owner: Oren Tirosh diff --git a/src/commoncode/misc.LICENSE b/src/commoncode/misc.LICENSE deleted file mode 100644 index 4a72b80190d..00000000000 --- a/src/commoncode/misc.LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2013 Oren Tirosh -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, merge, -# publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/commoncode/misc.py b/src/commoncode/misc.py deleted file mode 100644 index be957dfdaed..00000000000 --- a/src/commoncode/misc.py +++ /dev/null @@ -1,57 +0,0 @@ -# -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. -# http://nexb.com and https://github.com/nexB/scancode-toolkit/ -# The ScanCode software is licensed under the Apache License version 2.0. -# Data generated with ScanCode require an acknowledgment. -# ScanCode is a trademark of nexB Inc. -# -# You may not use this software except in compliance with the License. -# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# -# When you publish or redistribute any data created with ScanCode or any ScanCode -# derivative work, you must accompany this data with the following acknowledgment: -# -# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES -# OR CONDITIONS OF ANY KIND, either express or implied. No content created from -# ScanCode should be considered or used as legal advice. Consult an Attorney -# for any legal advice. -# ScanCode is a free software code scanning tool from nexB Inc. and others. -# Visit https://github.com/nexB/scancode-toolkit/ for support and download. - -from __future__ import absolute_import, print_function - - -class SlotPickleMixin(object): - # SlotPickelMixin is originally from: - # http://code.activestate.com/recipes/578433-mixin-for-pickling-objects-with-__slots__/ - # Copyright (c) 2013 Created by Oren Tirosh - # - # Permission is hereby granted, free of charge, to any person - # obtaining a copy of this software and associated documentation files - # (the "Software"), to deal in the Software without restriction, - # including without limitation the rights to use, copy, modify, merge, - # publish, distribute, sublicense, and/or sell copies of the Software, - # and to permit persons to whom the Software is furnished to do so, - # subject to the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - # OTHER DEALINGS IN THE SOFTWARE. - def __getstate__(self): - return {slot: getattr(self, slot) for slot in self.__slots__ if hasattr(self, slot)} - - def __setstate__(self, state): - for slot, value in state.items(): - setattr(self, slot, value) diff --git a/src/commoncode/paths.py b/src/commoncode/paths.py index 903eba1e224..17defd15a52 100644 --- a/src/commoncode/paths.py +++ b/src/commoncode/paths.py @@ -38,13 +38,11 @@ from commoncode.fileutils import is_posixpath from commoncode.system import on_linux - """ Various path utilities such as common prefix and suffix functions, conversion to OS-safe paths and to POSIX paths. """ - POSIX_PATH_SEP = b'/' if on_linux else '/' WIN_PATH_SEP = b'\\' if on_linux else '\\' EMPTY_STRING = b'' if on_linux else '' @@ -52,6 +50,7 @@ # # Build OS-portable and safer paths + def safe_path(path, posix=False): """ Convert `path` to a safe and portable POSIX path usable on multiple OSes. The @@ -78,8 +77,6 @@ def safe_path(path, posix=False): segments = [s.strip() for s in path.split(path_sep) if s.strip()] segments = [portable_filename(s) for s in segments] - # print('safe_path: orig:', orig_path, 'segments:', segments) - if not segments: return '_' @@ -89,7 +86,6 @@ def safe_path(path, posix=False): return as_posixpath(path) - def path_handlers(path, posix=True): """ Return a path module and path separator to use for handling (e.g. split and join) @@ -223,7 +219,6 @@ def portable_filename(filename): if basename.lower() in windows_illegal_names: filename = ''.join([basename, '_', dot, extension]) - # no name made only of dots. if set(filename) == set(['.']): filename = 'dot' * len(filename) @@ -239,6 +234,7 @@ def portable_filename(filename): # paths comparisons, common prefix and suffix extraction # + def common_prefix(s1, s2): """ Return the common leading subsequence of two sequences and its length. diff --git a/src/commoncode/saneyaml.py b/src/commoncode/saneyaml.py index 2de2aa78d74..17634a6f39e 100644 --- a/src/commoncode/saneyaml.py +++ b/src/commoncode/saneyaml.py @@ -22,7 +22,6 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import absolute_import from __future__ import print_function @@ -38,7 +37,6 @@ from yaml import SafeLoader from yaml import SafeDumper - """ Wrapper around PyYAML to provide sane defaults ensuring that dump/load does not damage content, keeps ordering, use always block-style and use four spaces @@ -57,6 +55,7 @@ # https://pypi.python.org/pypi/ruamel.yaml/0.9.1 # https://pypi.python.org/pypi/yaml2rst/0.2 + def load(s): """ Return an object safely loaded from YAML string `s`. `s` must be unicode @@ -90,6 +89,7 @@ class SaneLoader(SafeLoader): """ A safe loader configured with many sane defaults. """ + def ignore_aliases(self, data): return True @@ -120,6 +120,7 @@ def string_loader(loader, node): # keep boolean conversion # SaneLoader.add_constructor(u'tag:yaml.org,2002:boolean', string_loader) + def ordered_loader(loader, node): """ Ensure that YAML maps ordered is preserved and loaded in an OrderedDict. @@ -143,6 +144,7 @@ def ordered_loader(loader, node): class SaneDumper(SafeDumper): + def increase_indent(self, flow=False, indentless=False): """ Ensure that lists items are always indented. @@ -162,6 +164,7 @@ def ordered_dumper(dumper, data): """ return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items()) + SaneDumper.add_representer(OrderedDict, ordered_dumper) @@ -171,6 +174,7 @@ def null_dumper(dumper, value): """ return dumper.represent_scalar(u'tag:yaml.org,2002:null', u'') + SafeDumper.add_representer(type(None), null_dumper) @@ -210,4 +214,5 @@ def boolean_dumper(dumper, value): style = None return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style) + SaneDumper.add_representer(bool, boolean_dumper) diff --git a/src/commoncode/system.py b/src/commoncode/system.py index 4cfc520e726..250d1f4b0e6 100644 --- a/src/commoncode/system.py +++ b/src/commoncode/system.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -50,8 +50,9 @@ def os_arch(): raise Exception('Unsupported OS/platform %r' % sys_platform) return os, arch - # FIXME use these for architectures + + ''' darwin/386 darwin/amd64 @@ -85,12 +86,10 @@ def os_arch(): on_linux = current_os == 'linux' on_posix = not on_windows and (on_mac or on_linux) - current_os_arch = '%(current_os)s-%(current_arch)s' % locals() noarch = 'noarch' current_os_noarch = '%(current_os)s-%(noarch)s' % locals() - # # Shared library file extensions # @@ -101,25 +100,19 @@ def os_arch(): if on_linux: lib_ext = '.so' - # # Python versions # -py27 = (sys.version_info[0] == 2 and sys.version_info[1] == 7) -py34 = (sys.version_info[0] == 3 and sys.version_info[1] == 4) -py35 = (sys.version_info[0] == 3 and sys.version_info[1] == 5) -py35 = (sys.version_info[0] == 3 and sys.version_info[1] == 6) -# -# User related -# -if on_windows: - user_home = os.path.join(os.path.expandvars('$HOMEDRIVE'), - os.path.expandvars('$HOMEPATH')) -else: - user_home = os.path.expanduser('~') - -username = getpass.getuser() - +_sys_v0 = sys.version_info[0] +py2 = _sys_v0 == 2 +py3 = _sys_v0 == 3 + +_sys_v1 = sys.version_info[1] +py27 = py2 and _sys_v1 == 7 +py34 = py3 and _sys_v1 == 4 +py35 = py3 and _sys_v1 == 5 +py36 = py3 and _sys_v1 == 6 +py37 = py3 and _sys_v1 == 7 # Do not let Windows error pop up messages with default SetErrorMode # See http://msdn.microsoft.com/en-us/library/ms680621(VS100).aspx diff --git a/src/commoncode/testcase.py b/src/commoncode/testcase.py index 780fd74a29d..d341e4b7173 100644 --- a/src/commoncode/testcase.py +++ b/src/commoncode/testcase.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,7 +22,6 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import absolute_import from __future__ import print_function from __future__ import division @@ -39,7 +38,7 @@ import zipfile from commoncode import fileutils -from commoncode.fileutils import path_to_bytes +from commoncode.fileutils import fsencode from commoncode import filetype from commoncode.system import on_linux from commoncode.system import on_posix @@ -53,7 +52,6 @@ class EnhancedAssertions(TestCaseClass): # always show full diff maxDiff = None - def failUnlessRaisesInstance(self, excInstance, callableObj, *args, **kwargs): """ @@ -79,11 +77,9 @@ def failUnlessRaisesInstance(self, excInstance, callableObj, # to ensure that multiple tests run can be launched in parallel test_run_temp_dir = None - # set to 1 to see the slow tests timing_threshold = sys.maxint - POSIX_PATH_SEP = b'/' if on_linux else '/' WIN_PATH_SEP = b'\\' if on_linux else '\\' EMPTY_STRING = b'' if on_linux else '' @@ -100,7 +96,7 @@ def to_os_native_path(path): Normalize a path to use the native OS path separator. """ if on_linux: - path = path_to_bytes(path) + path = fsencode(path) path = path.replace(POSIX_PATH_SEP, OS_PATH_SEP) path = path.replace(WIN_PATH_SEP, OS_PATH_SEP) path = path.rstrip(OS_PATH_SEP) @@ -113,8 +109,8 @@ def get_test_loc(test_path, test_data_dir, debug=False, exists=True): location to a test file or directory for this path. No copy is done. """ if on_linux: - test_path = path_to_bytes(test_path) - test_data_dir = path_to_bytes(test_data_dir) + test_path = fsencode(test_path) + test_data_dir = fsencode(test_data_dir) if debug: import inspect @@ -154,8 +150,8 @@ def get_test_loc(self, test_path, copy=False, debug=False): """ test_data_dir = self.test_data_dir if on_linux: - test_path = path_to_bytes(test_path) - test_data_dir = path_to_bytes(test_data_dir) + test_path = fsencode(test_path) + test_data_dir = fsencode(test_data_dir) if debug: import inspect @@ -189,9 +185,9 @@ def get_temp_file(self, extension=None, dir_name='td', file_name='tf'): extension = '.txt' if on_linux: - extension = path_to_bytes(extension) - dir_name = path_to_bytes(dir_name) - file_name = path_to_bytes(file_name) + extension = fsencode(extension) + dir_name = fsencode(dir_name) + file_name = fsencode(file_name) if extension and not extension.startswith(DOT): extension = DOT + extension @@ -211,11 +207,12 @@ def get_temp_dir(self, sub_dir_path=None): # ensure that we have a new unique temp directory for each test run global test_run_temp_dir if not test_run_temp_dir: - test_run_temp_dir = fileutils.get_temp_dir(base_dir='tst', prefix=' ') + # not we add a space in the path for testing path with spaces + test_run_temp_dir = fileutils.get_temp_dir(prefix='scancode-tests -') if on_linux: - test_run_temp_dir = path_to_bytes(test_run_temp_dir) + test_run_temp_dir = fsencode(test_run_temp_dir) - new_temp_dir = fileutils.get_temp_dir(base_dir=test_run_temp_dir) + new_temp_dir = fileutils.get_temp_dir(base_dir=test_run_temp_dir, prefix='') if sub_dir_path: # create a sub directory hierarchy if requested @@ -230,8 +227,8 @@ def remove_vcs(self, test_dir): """ vcses = ('CVS', '.svn', '.git', '.hg') if on_linux: - vcses = tuple(path_to_bytes(p) for p in vcses) - test_dir = path_to_bytes(test_dir) + vcses = tuple(fsencode(p) for p in vcses) + test_dir = fsencode(test_dir) for root, dirs, files in os.walk(test_dir): for vcs_dir in vcses: @@ -247,7 +244,6 @@ def remove_vcs(self, test_dir): map(os.remove, [os.path.join(root, file_loc) for file_loc in files if file_loc.endswith(tilde)]) - def __extract(self, test_path, extract_func=None, verbatim=False): """ Given an archive file identified by test_path relative @@ -257,14 +253,14 @@ def __extract(self, test_path, extract_func=None, verbatim=False): """ assert test_path and test_path != '' if on_linux: - test_path = path_to_bytes(test_path) + test_path = fsencode(test_path) test_path = to_os_native_path(test_path) target_path = os.path.basename(test_path) target_dir = self.get_temp_dir(target_path) original_archive = self.get_test_loc(test_path) if on_linux: - target_dir = path_to_bytes(target_dir) - original_archive = path_to_bytes(original_archive) + target_dir = fsencode(target_dir) + original_archive = fsencode(original_archive) extract_func(original_archive, target_dir, verbatim=verbatim) return target_dir @@ -272,6 +268,9 @@ def __extract(self, test_path, extract_func=None, verbatim=False): def extract_test_zip(self, test_path, *args, **kwargs): return self.__extract(test_path, extract_zip) + def extract_test_zip_raw(self, test_path, *args, **kwargs): + return self.__extract(test_path, extract_zip_raw) + def extract_test_tar(self, test_path, verbatim=False): return self.__extract(test_path, extract_tar, verbatim) @@ -289,12 +288,13 @@ def _extract_tar_raw(test_path, target_dir, to_bytes, *args, **kwargs): """ if to_bytes: # use bytes for paths on ALL OSes (though this may fail on macOS) - target_dir = path_to_bytes(target_dir) - test_path = path_to_bytes(test_path) + target_dir = fsencode(target_dir) + test_path = fsencode(test_path) tar = tarfile.open(test_path) tar.extractall(path=target_dir) tar.close() + extract_tar_raw = partial(_extract_tar_raw, to_bytes=True) extract_tar_uni = partial(_extract_tar_raw, to_bytes=False) @@ -307,8 +307,8 @@ def extract_tar(location, target_dir, verbatim=False, *args, **kwargs): """ # always for using bytes for paths on all OSses... tar seems to use bytes internally # and get confused otherwise - location = path_to_bytes(location) - target_dir = path_to_bytes(target_dir) + location = fsencode(location) + target_dir = fsencode(target_dir) with open(location, 'rb') as input_tar: tar = None @@ -335,8 +335,8 @@ def extract_zip(location, target_dir, *args, **kwargs): raise Exception('Incorrect zip file %(location)r' % locals()) if on_linux: - location = path_to_bytes(location) - target_dir = path_to_bytes(target_dir) + location = fsencode(location) + target_dir = fsencode(target_dir) with zipfile.ZipFile(location) as zipf: for info in zipf.infolist(): @@ -353,6 +353,22 @@ def extract_zip(location, target_dir, *args, **kwargs): f.write(content) +def extract_zip_raw(location, target_dir, *args, **kwargs): + """ + Extract a zip archive file at location in the target_dir directory. + Use the builtin extractall function + """ + if not os.path.isfile(location) and zipfile.is_zipfile(location): + raise Exception('Incorrect zip file %(location)r' % locals()) + + if on_linux: + location = fsencode(location) + target_dir = fsencode(target_dir) + + with zipfile.ZipFile(location) as zipf: + zipf.extractall(path=target_dir) + + def tar_can_extract(tarinfo, verbatim): """ Return True if a tar member can be extracted to handle OS specifics. diff --git a/src/commoncode/text.py b/src/commoncode/text.py index 5be67b83fba..b613df485ca 100644 --- a/src/commoncode/text.py +++ b/src/commoncode/text.py @@ -35,15 +35,13 @@ import chardet from text_unidecode import unidecode - # Python 2 and 3 support try: # Python 2 unicode except NameError: # Python 3 - unicode = str - + unicode = str # NOQA """ A text processing module providing functions to process and prepare text @@ -54,7 +52,6 @@ - line separator stripping and conversion """ - LOG = logging.getLogger(__name__) diff --git a/src/commoncode/timeutils.py b/src/commoncode/timeutils.py index 9db6613508d..99cc33db260 100644 --- a/src/commoncode/timeutils.py +++ b/src/commoncode/timeutils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -24,23 +24,28 @@ from __future__ import absolute_import, print_function - -from datetime import datetime, tzinfo +from datetime import datetime +from datetime import tzinfo +from functools import update_wrapper +from functools import wraps +from time import time """ Time is of the essence: path safe time stamps creation and conversion to datetime objects. """ + class UTC(tzinfo): """UTC timezone""" - def utcoffset(self, dt): # @UnusedVariable + + def utcoffset(self, dt): # NOQA return None - def tzname(self, dt): # @UnusedVariable + def tzname(self, dt): # NOQA return 'UTC' - def dst(self, dt): # @UnusedVariable + def dst(self, dt): # NOQA return None @@ -60,7 +65,8 @@ def time2tstamp(dt=None): For times, the ISO 8601 format specifies either a colon : (extended format) or nothing as a separator (basic format). Here Python defaults to using a - colon. We therefore remove all the colons to be file system safe. + colon. We therefore remove all the colons to be safe across filesystems. (a + colon is not a valid path char on Windows) Another character may show up in the ISO representation such as / for time intervals. We could replace the forward slash with a double hyphen (--) as @@ -99,3 +105,22 @@ def tstamp2time(stamp): if 0 <= microsec <= 999999: datim = datim.replace(microsecond=microsec) return datim + + +def timed(fun): + """ + Decorate `fun` callable to return a tuple of (timing, result) where timing + is a function execution time in seconds as a float and result is the value + returned by calling `fun`. + + Note: this decorator will not work as expected for functions that return + generators. + """ + + @wraps(fun) + def _timed(*args, **kwargs): + start = time() + result = fun(*args, **kwargs) + return time() - start, result + + return update_wrapper(_timed, fun) diff --git a/src/commoncode/version.py b/src/commoncode/version.py index c980ee04f87..61323746a8a 100644 --- a/src/commoncode/version.py +++ b/src/commoncode/version.py @@ -24,7 +24,6 @@ from __future__ import absolute_import, print_function - import re from commoncode.system import on_linux diff --git a/src/extractcode/__init__.py b/src/extractcode/__init__.py index 6c6ed472ac9..8b70e72c64c 100644 --- a/src/extractcode/__init__.py +++ b/src/extractcode/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -33,12 +33,16 @@ import shutil import sys -from commoncode import fileutils +from commoncode.fileutils import as_posixpath +from commoncode.fileutils import create_dir +from commoncode.fileutils import file_name +from commoncode.fileutils import fsencode +from commoncode.fileutils import parent_directory from commoncode.text import toascii from commoncode.system import on_linux -from commoncode.fileutils import path_to_bytes -from commoncode.system import on_linux - +from os.path import dirname +from os.path import join +from os.path import exists logger = logging.getLogger(__name__) DEBUG = False @@ -46,9 +50,7 @@ # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) # logger.setLevel(logging.DEBUG) - -root_dir = os.path.join(os.path.dirname(__file__), 'bin') - +root_dir = join(dirname(__file__), 'bin') POSIX_PATH_SEP = b'/' if on_linux else '/' WIN_PATH_SEP = b'\\' if on_linux else '\\' @@ -61,7 +63,6 @@ # Suffix added to extracted target_dir paths EXTRACT_SUFFIX = b'-extract' if on_linux else r'-extract' - # high level archive "kinds" docs = 1 regular = 2 @@ -71,7 +72,6 @@ patches = 6 special_package = 7 - kind_labels = { 1: 'docs', 2: 'regular', @@ -103,7 +103,7 @@ def is_extraction_path(path): Return True is the path points to an extraction path. """ if on_linux: - path = path_to_bytes(path) + path = fsencode(path) return path and path.rstrip(PATHS_SEPS).endswith(EXTRACT_SUFFIX) @@ -114,8 +114,8 @@ def is_extracted(location): extraction location. """ if on_linux: - location = path_to_bytes(location) - return location and os.path.exists(get_extraction_path(location)) + location = fsencode(location) + return location and exists(get_extraction_path(location)) def get_extraction_path(path): @@ -123,7 +123,7 @@ def get_extraction_path(path): Return a path where to extract. """ if on_linux: - path = path_to_bytes(path) + path = fsencode(path) return path.rstrip(PATHS_SEPS) + EXTRACT_SUFFIX @@ -132,7 +132,7 @@ def remove_archive_suffix(path): Remove all the extracted suffix from a path. """ if on_linux: - path = path_to_bytes(path) + path = fsencode(path) return re.sub(EXTRACT_SUFFIX, EMPTY_STRING, path) @@ -142,25 +142,25 @@ def remove_backslashes_and_dotdots(directory): Return a list of errors if any. """ if on_linux: - directory = path_to_bytes(directory) + directory = fsencode(directory) errors = [] for top, _, files in os.walk(directory): for filename in files: if not (WIN_PATH_SEP in filename or DOTDOT in filename): continue try: - new_path = fileutils.as_posixpath(filename) + new_path = as_posixpath(filename) new_path = new_path.strip(POSIX_PATH_SEP) new_path = posixpath.normpath(new_path) new_path = new_path.replace(DOTDOT, POSIX_PATH_SEP) new_path = new_path.strip(POSIX_PATH_SEP) new_path = posixpath.normpath(new_path) segments = new_path.split(POSIX_PATH_SEP) - directory = os.path.join(top, *segments[:-1]) - fileutils.create_dir(directory) - shutil.move(os.path.join(top, filename), os.path.join(top, *segments)) + directory = join(top, *segments[:-1]) + create_dir(directory) + shutil.move(join(top, filename), join(top, *segments)) except Exception: - errors.append(os.path.join(top, filename)) + errors.append(join(top, filename)) return errors @@ -180,16 +180,16 @@ def new_name(location, is_dir=False): """ assert location if on_linux: - location = path_to_bytes(location) + location = fsencode(location) location = location.rstrip(PATHS_SEPS) assert location - parent = fileutils.parent_directory(location) + parent = parent_directory(location) # all existing files or directory as lower case siblings_lower = set(s.lower() for s in os.listdir(parent)) - filename = fileutils.file_name(location) + filename = file_name(location) # corner case if filename in (DOT, DOT): @@ -197,7 +197,7 @@ def new_name(location, is_dir=False): # if unique, return this if filename.lower() not in siblings_lower: - return os.path.join(parent, filename) + return join(parent, filename) # otherwise seek a unique name if is_dir: @@ -219,7 +219,7 @@ def new_name(location, is_dir=False): if filename.lower() not in siblings_lower: break counter += 1 - return os.path.join(parent, filename) + return join(parent, filename) # TODO: use attrs and slots @@ -289,14 +289,18 @@ def to_dict(self): class ExtractError(Exception): pass + class ExtractErrorPasswordProtected(ExtractError): pass + class ExtractErrorFailedToExtract(ExtractError): pass + class ExtractWarningIncorrectEntry(ExtractError): pass + class ExtractWarningTrailingGarbage(ExtractError): pass diff --git a/src/extractcode/archive.py b/src/extractcode/archive.py index 673bcd1199b..555b33b5a0c 100644 --- a/src/extractcode/archive.py +++ b/src/extractcode/archive.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -33,6 +33,7 @@ from commoncode import fileutils from commoncode import filetype +from commoncode.system import on_linux import typecode from extractcode import all_kinds @@ -49,9 +50,6 @@ from extractcode import libarchive2 from extractcode.uncompress import uncompress_gzip from extractcode.uncompress import uncompress_bzip2 -from commoncode.system import on_linux -from commoncode.fileutils import path_to_bytes - logger = logging.getLogger(__name__) TRACE = False @@ -62,8 +60,6 @@ logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) - - """ Archive formats handling. The purpose of this module is to select an extractor suitable for the accurate extraction of a given kind of archive. An extractor is @@ -150,7 +146,7 @@ def get_best_handler(location, kinds=all_kinds): Return the best handler of None for the file at location. """ if on_linux: - location = path_to_bytes(location) + location = fileutils.fsencode(location) location = os.path.abspath(os.path.expanduser(location)) if not filetype.is_file(location): return @@ -166,7 +162,7 @@ def get_handlers(location): extension_matched,) for this `location`. """ if on_linux: - location = path_to_bytes(location) + location = fileutils.fsencode(location) if filetype.is_file(location): T = typecode.contenttype.get_type(location) @@ -187,7 +183,7 @@ def get_handlers(location): exts = handler.extensions if exts: if on_linux: - exts = tuple(path_to_bytes(e) for e in exts) + exts = tuple(fileutils.fsencode(e) for e in exts) extension_matched = exts and location.lower().endswith(exts) if TRACE_DEEP: @@ -311,19 +307,19 @@ def extract_twice(location, target_dir, extractor1, extractor2): covers most common cases. """ if on_linux: - location = path_to_bytes(location) - target_dir = path_to_bytes(target_dir) + location = fileutils.fsencode(location) + target_dir = fileutils.fsencode(target_dir) abs_location = os.path.abspath(os.path.expanduser(location)) abs_target_dir = unicode(os.path.abspath(os.path.expanduser(target_dir))) # extract first the intermediate payload to a temp dir - temp_target = unicode(fileutils.get_temp_dir('extract')) + temp_target = unicode(fileutils.get_temp_dir(prefix='scancode-extract-')) warnings = extractor1(abs_location, temp_target) if TRACE: logger.debug('extract_twice: temp_target: %(temp_target)r' % locals()) # extract this intermediate payload to the final target_dir try: - inner_archives = list(fileutils.file_iter(temp_target)) + inner_archives = list(fileutils.resource_iter(temp_target, with_dirs=False)) if not inner_archives: warnings.append(location + ': No files found in archive.') else: @@ -349,7 +345,7 @@ def extract_with_fallback(location, target_dir, extractor1, extractor2): abs_location = os.path.abspath(os.path.expanduser(location)) abs_target_dir = unicode(os.path.abspath(os.path.expanduser(target_dir))) # attempt extract first to a temp dir - temp_target1 = unicode(fileutils.get_temp_dir('extract1')) + temp_target1 = unicode(fileutils.get_temp_dir(prefix='scancode-extract1-')) try: warnings = extractor1(abs_location, temp_target1) if TRACE: @@ -357,7 +353,7 @@ def extract_with_fallback(location, target_dir, extractor1, extractor2): fileutils.copytree(temp_target1, abs_target_dir) except: try: - temp_target2 = unicode(fileutils.get_temp_dir('extract2')) + temp_target2 = unicode(fileutils.get_temp_dir(prefix='scancode-extract2-')) warnings = extractor2(abs_location, temp_target2) if TRACE: logger.debug('extract_with_fallback: temp_target2: %(temp_target2)r' % locals()) @@ -379,7 +375,7 @@ def try_to_extract(location, target_dir, extractor): """ abs_location = os.path.abspath(os.path.expanduser(location)) abs_target_dir = unicode(os.path.abspath(os.path.expanduser(target_dir))) - temp_target = unicode(fileutils.get_temp_dir('extract1')) + temp_target = unicode(fileutils.get_temp_dir(prefix='scancode-extract1-')) warnings = [] try: warnings = extractor(abs_location, temp_target) @@ -392,10 +388,10 @@ def try_to_extract(location, target_dir, extractor): fileutils.delete(temp_target) return warnings - # High level aliases to lower level extraction functions ######################################################## + extract_tar = libarchive2.extract extract_patch = patch.extract @@ -412,7 +408,6 @@ def try_to_extract(location, target_dir, extractor): extract_springboot = functools.partial(try_to_extract, extractor=extract_zip) - extract_iso = sevenzip.extract extract_rar = sevenzip.extract extract_rpm = sevenzip.extract @@ -425,7 +420,6 @@ def try_to_extract(location, target_dir, extractor): extract_Z = sevenzip.extract extract_xarpkg = sevenzip.extract - # Archive handlers. #################### diff --git a/src/extractcode/extract.py b/src/extractcode/extract.py index 2e3c8103b86..619a71ddb47 100644 --- a/src/extractcode/extract.py +++ b/src/extractcode/extract.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -32,6 +32,7 @@ from os.path import abspath from os.path import expanduser from os.path import join +import traceback from commoncode import fileutils from commoncode import ignore @@ -46,7 +47,6 @@ logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) - """ Extract archives and compressed files recursively to get the file content available for further processing. This the high level extraction entry point. @@ -86,7 +86,6 @@ the original archive. """ - """ An ExtractEvent contains data about an archive extraction progress: - `source` is the location of the archive being extracted @@ -166,7 +165,7 @@ def extract(location, kinds=extractcode.default_kinds, recurse=False): yield xevent -def extract_file(location, target, kinds=extractcode.default_kinds): +def extract_file(location, target, kinds=extractcode.default_kinds, verbose=False): """ Extract a single archive at `location` in the `target` directory if it is of a kind supported in the `kinds` kind tuple. @@ -181,17 +180,21 @@ def extract_file(location, target, kinds=extractcode.default_kinds): if extractor: yield ExtractEvent(location, target, done=False, warnings=[], errors=[]) try: - # extract first to a temp directory. - # if there is an error, the extracted files will not be moved - # to target - tmp_tgt = fileutils.get_temp_dir('extract') + # extract first to a temp directory: if there is an error, the + # extracted files will not be moved to target + tmp_tgt = fileutils.get_temp_dir(prefix='scancode-extract-') abs_location = abspath(expanduser(location)) - warnings.extend(extractor(abs_location, tmp_tgt)) + warns = extractor(abs_location, tmp_tgt) or [] + warnings.extend(warns) fileutils.copytree(tmp_tgt, target) fileutils.delete(tmp_tgt) except Exception, e: - if TRACE: - logger.debug('extract_file: ERROR: %(location)r: %(errors)r, %(e)r.\n' % locals()) errors = [str(e).strip(' \'"')] + if verbose: + errors.append(traceback.format_exc()) + if TRACE: + tb = traceback.format_exc() + logger.debug('extract_file: ERROR: %(location)r: %(errors)r\n%(e)r\n%(tb)s' % locals()) + finally: yield ExtractEvent(location, target, done=True, warnings=warnings, errors=errors) diff --git a/src/extractcode/libarchive2.py b/src/extractcode/libarchive2.py index 40e0011e460..aeb480fc7b4 100644 --- a/src/extractcode/libarchive2.py +++ b/src/extractcode/libarchive2.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,7 +22,6 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -50,19 +49,16 @@ from extractcode import ExtractError from extractcode import ExtractErrorPasswordProtected - # Python 2 and 3 support try: from os import fsencode except ImportError: from backports.os import fsencode - logger = logging.getLogger(__name__) DEBUG = False # logging.basicConfig(level=logging.DEBUG) - """ libarchive2 is a minimal and specialized wrapper around a vendored libarchive archive extraction library. It only deals with archive extraction and does not know how to @@ -142,17 +138,25 @@ def extract(location, target_dir): warnings = [] for entry in list_entries(abs_location): - if not (entry.isdir or entry.isfile): - # skip special files and links - continue - - _target_path = entry.write(abs_target_dir, transform_path=paths.safe_path) - if entry.warnings: - msgs = [w.strip('"\' ') for w in entry.warnings if w and w.strip('"\' ')] - msgs = msgs or ['No message provided'] - formatted = entry.path + ': ' + '\n'.join(msgs) - if formatted not in warnings: - warnings.append(formatted) + + if entry and entry.warnings: + if not entry.is_empty(): + entry_path = entry.path + msgs = ['%(entry_path)r: ' % locals()] + else: + msgs = ['No path available: '] + + msgs.extend([w.strip('"\' ') for w in entry.warnings if w and w.strip('"\' ')]) + msgs = '\n'.join(msgs) or 'No message provided' + + if msgs not in warnings: + warnings.append(msgs) + + if not entry.is_empty(): + if not (entry.isdir or entry.isfile): + # skip special files and links + continue + _target_path = entry.write(abs_target_dir, transform_path=paths.safe_path) return warnings @@ -179,6 +183,7 @@ class Archive(object): for entry in archive: # dome something with entry """ + def __init__(self, location, uncompress=True, extract=True, block_size=10240): """ Build an Archive object from file at `location`. @@ -231,23 +236,37 @@ def close(self): free_archive(self.archive_struct) self.archive_struct = None - def iter(self): + def iter(self, verbose=False): """ Yield Entry for this archive. """ assert self.archive_struct, 'Archive must be used as a context manager.' entry_struct = new_entry() try: - while 1: + while True: + entry = None + warnings = [] try: r = next_entry(self.archive_struct, entry_struct) if r == ARCHIVE_EOF: return - e = Entry(self, entry_struct) + entry = Entry(self, entry_struct) except ArchiveWarning, aw: - if aw.msg and aw.msg not in e.warnings: - e.warnings.append(aw.msg) - yield e + if not entry: + entry = Entry(self, entry_struct) + if aw.msg and aw.msg not in entry.warnings: + entry.warnings.append(aw.msg) + +# msg = 'WARNING: ' +# if aw.msg and aw.msg not in entry.warnings: +# msg += repr(aw.msg) + '\n' +# if verbose: +# msg += traceback.format_exc() +# warnings.append(msg % locals()) + finally: + if entry: + entry.warnings.extend(warnings) + yield entry finally: if entry_struct: free_entry(entry_struct) @@ -277,32 +296,55 @@ def __init__(self, archive, entry_struct): self.archive = archive self.entry_struct = entry_struct - self.filetype = entry_type(self.entry_struct) - self.isfile = self.filetype & AE_IFMT == AE_IFREG - self.isdir = self.filetype & AE_IFMT == AE_IFDIR - self.isblk = self.filetype & AE_IFMT == AE_IFBLK - self.ischr = self.filetype & AE_IFMT == AE_IFCHR - self.isfifo = self.filetype & AE_IFMT == AE_IFIFO - self.issock = self.filetype & AE_IFMT == AE_IFSOCK - self.isspecial = self.ischr or self.isblk or self.isfifo or self.issock + self.filetype = None + self.isfile = None + self.isdir = None + self.isblk = None + self.ischr = None + self.isfifo = None + self.issock = None + self.isspecial = None # bytes - self.size = entry_size(self.entry_struct) or 0 + self.size = None # sec since epoch - self.time = entry_time(self.entry_struct) or 0 + self.time = None # all paths are byte strings not unicode - self.path = self._path_bytes(entry_path, entry_path_w) - self.issym = self.filetype & AE_IFMT == AE_IFLNK - # FIXME: could there be cases with link path and symlink is False? - if self.issym: - self.symlink_path = self._path_bytes(symlink_path, symlink_path_w) - self.hardlink_path = self._path_bytes(hardlink_path, hardlink_path_w) - # hardlinks do not have a filetype: we test the path instead - self.islnk = bool(self.hardlink_path) + self.path = None + + self.issym = None + self.symlink_path = None + self.islnk = None + self.hardlink_path = None + + # list of strings self.warnings = [] + if self.entry_struct: + self.filetype = entry_type(self.entry_struct) + self.isfile = self.filetype & AE_IFMT == AE_IFREG + self.isdir = self.filetype & AE_IFMT == AE_IFDIR + self.isblk = self.filetype & AE_IFMT == AE_IFBLK + self.ischr = self.filetype & AE_IFMT == AE_IFCHR + self.isfifo = self.filetype & AE_IFMT == AE_IFIFO + self.issock = self.filetype & AE_IFMT == AE_IFSOCK + self.isspecial = self.ischr or self.isblk or self.isfifo or self.issock + self.size = entry_size(self.entry_struct) or 0 + self.time = entry_time(self.entry_struct) or 0 + self.path = self._path_bytes(entry_path, entry_path_w) + self.issym = self.filetype & AE_IFMT == AE_IFLNK + # FIXME: could there be cases with link path and symlink is False? + if self.issym: + self.symlink_path = self._path_bytes(symlink_path, symlink_path_w) + self.hardlink_path = self._path_bytes(hardlink_path, hardlink_path_w) + # hardlinks do not have a filetype: we test the path instead + self.islnk = bool(self.hardlink_path) + + def is_empty(self): + return not self.archive or not self.entry_struct + def _path_bytes(self, func, func_w): """ Return a path as a byte string converted to UTF-8-encoded bytes if this is @@ -381,6 +423,7 @@ def __repr__(self): class ArchiveException(ExtractError): + def __init__(self, rc=None, archive_struct=None, archive_func=None, root_ex=None): self.root_ex = root_ex if root_ex and isinstance(root_ex, ArchiveException): @@ -405,29 +448,35 @@ def __str__(self): class ArchiveWarning(ArchiveException): pass + class ArchiveErrorRetryable(ArchiveException): pass + class ArchiveError(ArchiveException): pass + class ArchiveErrorFatal(ArchiveException): pass + class ArchiveErrorFailedToWriteEntry(ArchiveException): pass + class ArchiveErrorPasswordProtected(ArchiveException, ExtractErrorPasswordProtected): pass + class ArchiveErrorIllegalOperationOnClosedArchive(ArchiveException): pass - ################################################# # ctypes defintion of the interface to libarchive ################################################# + def errcheck(rc, archive_func, args, null=False): """ ctypes error check handler for functions returning int, or null if null is True. @@ -455,7 +504,6 @@ def errcheck(rc, archive_func, args, null=False): errcheck_null = partial(errcheck, null=True) - # libarchive return codes ARCHIVE_EOF = 1 ARCHIVE_OK = 0 @@ -464,7 +512,6 @@ def errcheck(rc, archive_func, args, null=False): ARCHIVE_FAILED = -25 ARCHIVE_FATAL = -30 - # libarchive stat/file types AE_IFREG = 0o0100000 # Regular file AE_IFLNK = 0o0120000 # Symbolic link @@ -476,7 +523,6 @@ def errcheck(rc, archive_func, args, null=False): AE_IFMT = 0o0170000 # Format mask - ##################################### # libarchive C functions declarations ##################################### @@ -492,7 +538,6 @@ def errcheck(rc, archive_func, args, null=False): # wide string and then store a narrow string for the same data, the previously-set # wide string will be discarded in favor of the new data. - """ To read an archive, you must first obtain an initialized struct archive object from archive_read_new() @@ -506,7 +551,6 @@ def errcheck(rc, archive_func, args, null=False): archive_reader.restype = c_void_p archive_reader.errcheck = errcheck_null - """ Given a struct archive object, you can enable support for formats and filters. Enables support for all available formats except the "raw" format. @@ -522,7 +566,6 @@ def errcheck(rc, archive_func, args, null=False): use_all_formats.restype = c_int use_all_formats.errcheck = errcheck - """ Given a struct archive object, you can enable support for formats and filters. @@ -539,7 +582,6 @@ def errcheck(rc, archive_func, args, null=False): use_raw_formats.restype = c_int use_raw_formats.errcheck = errcheck - """ Given a struct archive object, you can enable support for formats and filters. @@ -555,7 +597,6 @@ def errcheck(rc, archive_func, args, null=False): use_all_filters.restype = c_int use_all_filters.errcheck = errcheck - """ Once formats and filters have been set, you open an archive filename for actual reading. @@ -575,7 +616,6 @@ def errcheck(rc, archive_func, args, null=False): open_file.restype = c_int open_file.errcheck = errcheck - """ Wide char version of archive_read_open_filename. """ @@ -585,7 +625,6 @@ def errcheck(rc, archive_func, args, null=False): open_file_w.restype = c_int open_file_w.errcheck = errcheck - """ When done with reading an archive you must free its resources. @@ -618,7 +657,6 @@ def errcheck(rc, archive_func, args, null=False): new_entry.restype = c_void_p new_entry.errcheck = errcheck_null - """ Given an opened archive struct object, you can iterate through the archive entries. An entry has a header with various data and usually a payload that is @@ -639,7 +677,6 @@ def errcheck(rc, archive_func, args, null=False): next_entry.restype = c_int next_entry.errcheck = errcheck - """ Read data associated with the header just read. Internally, this is a convenience function that calls archive_read_data_block() and fills any gaps @@ -651,7 +688,6 @@ def errcheck(rc, archive_func, args, null=False): read_entry_data.restype = c_ssize_t read_entry_data.errcheck = errcheck - """ Return the next available block of data for this entry. Unlike archive_read_data(), the archive_read_data_block() function avoids copying @@ -667,7 +703,6 @@ def errcheck(rc, archive_func, args, null=False): read_entry_data_block.restype = c_int read_entry_data_block.errcheck = errcheck - """ Releases the struct archive_entry object. The struct entry object must be freed when no longer needed. @@ -677,7 +712,6 @@ def errcheck(rc, archive_func, args, null=False): free_entry.argtypes = [c_void_p] free_entry.restype = None - # # Entry attributes: path, type, size, etc. are collected with these functions: # @@ -704,7 +738,6 @@ def errcheck(rc, archive_func, args, null=False): entry_type.argtypes = [c_void_p] entry_type.restype = c_int - """ This function retrieves the mtime field in an archive_entry. (modification time). @@ -718,7 +751,6 @@ def errcheck(rc, archive_func, args, null=False): entry_time.argtypes = [c_void_p] entry_time.restype = c_int - """ Path in the archive. @@ -737,14 +769,12 @@ def errcheck(rc, archive_func, args, null=False): entry_path_w.argtypes = [c_void_p] entry_path_w.restype = c_wchar_p - # int64_t archive_entry_size(struct archive_entry *a); entry_size = libarchive.archive_entry_size entry_size.argtypes = [c_void_p] entry_size.restype = c_longlong entry_size.errcheck = errcheck - """ Destination of the hardlink. """ @@ -753,13 +783,11 @@ def errcheck(rc, archive_func, args, null=False): hardlink_path.argtypes = [c_void_p] hardlink_path.restype = c_char_p - # const wchar_t * archive_entry_hardlink_w(struct archive_entry *a); hardlink_path_w = libarchive.archive_entry_hardlink_w hardlink_path_w.argtypes = [c_void_p] hardlink_path_w.restype = c_wchar_p - """ The number of references (hardlinks) can be obtained by calling archive_entry_nlinks() @@ -769,7 +797,6 @@ def errcheck(rc, archive_func, args, null=False): hardlink_count.argtypes = [c_void_p] hardlink_count.restype = c_int - """ The functions archive_entry_dev() and archive_entry_ino64() are used by ManPageArchiveEntryLinkify3 to find hardlinks. The pair of device and inode is @@ -779,7 +806,6 @@ def errcheck(rc, archive_func, args, null=False): # dev_t archive_entry_dev(struct archive_entry *a); # int archive_entry_dev_is_set(struct archive_entry *a); - """ Destination of the symbolic link. """ @@ -789,14 +815,12 @@ def errcheck(rc, archive_func, args, null=False): symlink_path.restype = c_char_p symlink_path.errcheck = errcheck_null - # const wchar_t * archive_entry_symlink_w(struct archive_entry *); symlink_path_w = libarchive.archive_entry_symlink_w symlink_path_w.argtypes = [c_void_p] symlink_path_w.restype = c_wchar_p symlink_path_w.errcheck = errcheck_null - # # Utilities and error handling: not all are defined for now # @@ -812,7 +836,6 @@ def errcheck(rc, archive_func, args, null=False): errno.argtypes = [c_void_p] errno.restype = c_int - """ Returns a textual error message suitable for display. The error message here is usually more specific than that obtained from passing the result of @@ -823,7 +846,6 @@ def errcheck(rc, archive_func, args, null=False): err_msg.argtypes = [c_void_p] err_msg.restype = c_char_p - """ Returns a count of the number of files processed by this archive object. The count is incremented by calls to ManPageArchiveWriteHeader3 or @@ -844,13 +866,11 @@ def errcheck(rc, archive_func, args, null=False): """ # int archive_filter_count(struct archive *, int); - """ Synonym for archive_filter_code(a,(0)). """ # int archive_compression(struct archive *); - """ Returns a textual name identifying the indicated filter. See archive_filter_count() for details of the numbering. diff --git a/src/extractcode/patch.py b/src/extractcode/patch.py index 765295ab731..9882a5bce92 100644 --- a/src/extractcode/patch.py +++ b/src/extractcode/patch.py @@ -48,7 +48,6 @@ more conveniently. """ - LOG = logging.getLogger(__name__) diff --git a/src/extractcode/sevenzip.py b/src/extractcode/sevenzip.py index c626fba2699..c21e26d0c10 100644 --- a/src/extractcode/sevenzip.py +++ b/src/extractcode/sevenzip.py @@ -43,12 +43,10 @@ root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bin')) - """ Low level support for p/7zip-based archive extraction. """ - sevenzip_errors = [ ('unsupported method', 'Unsupported archive or broken archive'), ('wrong password', 'Password protected archive, unable to extract'), @@ -222,7 +220,6 @@ def list_entries(location, arch_type='*'): if rc != 0: # FIXME: this test is useless _error = get_7z_errors(stdout) or UNKNOWN_ERROR - # print(_error) # the listing was produced as UTF on windows to avoid damaging binary # paths in console outputs diff --git a/src/extractcode/tar.py b/src/extractcode/tar.py index f7c5b0a628d..fa3f3e24c6a 100644 --- a/src/extractcode/tar.py +++ b/src/extractcode/tar.py @@ -50,7 +50,6 @@ # # Credits: Gustavo Niemeyer, Niels Gustabel, Richard Townsend. - from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals @@ -71,7 +70,6 @@ logger = logging.getLogger('extractcode') # logging.basicConfig(level=logging.DEBUG) - """ Low level support for tar-based archive extraction using Python built-in tar support. diff --git a/src/extractcode/tarfile_patch/tarfile.py b/src/extractcode/tarfile_patch/tarfile.py index 1b0b2f7d2d3..f826401dd2c 100644 --- a/src/extractcode/tarfile_patch/tarfile.py +++ b/src/extractcode/tarfile_patch/tarfile.py @@ -1,4 +1,5 @@ # -*- coding: iso-8859-1 -*- +# flake8: noqa #------------------------------------------------------------------- # tarfile.py #------------------------------------------------------------------- @@ -2650,7 +2651,7 @@ def writestr(self, zinfo, bytes): from cStringIO import StringIO except ImportError: from StringIO import StringIO - import calendar # @UnresolvedImport + import calendar # NOQA tinfo = TarInfo(zinfo.filename) tinfo.size = len(bytes) tinfo.mtime = calendar.timegm(zinfo.date_time) diff --git a/src/extractcode/uncompress.py b/src/extractcode/uncompress.py index d6469e6906d..df6dfc03d39 100644 --- a/src/extractcode/uncompress.py +++ b/src/extractcode/uncompress.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -79,7 +79,7 @@ def uncompress_file(location, decompressor): warnings = [] base_name = fileutils.file_base_name(location) - target_location = os.path.join(fileutils.get_temp_dir(base_dir='extract'), base_name) + target_location = os.path.join(fileutils.get_temp_dir(prefix='scancode-extract-'), base_name) with decompressor(location, 'rb') as compressed: with open(target_location, 'wb') as uncompressed: buffer_size = 32 * 1024 * 1024 diff --git a/src/formattedcode/format_json.py b/src/formattedcode/format_json.py deleted file mode 100644 index 7eb8d272235..00000000000 --- a/src/formattedcode/format_json.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. -# http://nexb.com and https://github.com/nexB/scancode-toolkit/ -# The ScanCode software is licensed under the Apache License version 2.0. -# Data generated with ScanCode require an acknowledgment. -# ScanCode is a trademark of nexB Inc. -# -# You may not use this software except in compliance with the License. -# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# -# When you publish or redistribute any data created with ScanCode or any ScanCode -# derivative work, you must accompany this data with the following acknowledgment: -# -# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES -# OR CONDITIONS OF ANY KIND, either express or implied. No content created from -# ScanCode should be considered or used as legal advice. Consult an Attorney -# for any legal advice. -# ScanCode is a free software code scanning tool from nexB Inc. and others. -# Visit https://github.com/nexB/scancode-toolkit/ for support and download. - -from __future__ import absolute_import -from __future__ import unicode_literals - -from collections import OrderedDict - -import simplejson - -from plugincode.output import scan_output_writer - - -""" -Output plugins to write scan results as JSON. -""" - -@scan_output_writer -def write_json_compact(files_count, version, notice, scanned_files, options, output_file, *args, **kwargs): - """ - Write scan output formatted as compact JSON. - """ - _write_json(files_count, version, notice, scanned_files, options, output_file, pretty=False) - - -@scan_output_writer -def write_json_pretty_printed(files_count, version, notice, scanned_files, options, output_file, *args, **kwargs): - """ - Write scan output formatted as pretty-printed JSON. - """ - _write_json(files_count, version, notice, scanned_files, options, output_file, pretty=True) - - -def _write_json(files_count, version, notice, scanned_files, options, output_file, pretty=False): - scan = OrderedDict([ - ('scancode_notice', notice), - ('scancode_version', version), - ('scancode_options', options), - ('files_count', files_count), - ('files', scanned_files), - ]) - kwargs = dict(iterable_as_array=True, encoding='utf-8') - if pretty: - kwargs['indent'] = 2 * ' ' - else: - kwargs['separators'] = (',', ':',) - - # FIXME: Why do we wrap the output in unicode? Test output when we do not wrap the output in unicode - output_file.write(unicode(simplejson.dumps(scan, **kwargs))) - output_file.write('\n') diff --git a/src/formattedcode/format_csv.py b/src/formattedcode/output_csv.py similarity index 85% rename from src/formattedcode/format_csv.py rename to src/formattedcode/output_csv.py index a5782919a24..f9d6f4a5aa3 100644 --- a/src/formattedcode/format_csv.py +++ b/src/formattedcode/output_csv.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -31,20 +31,36 @@ import unicodecsv -from plugincode.output import scan_output_writer +from plugincode.output import output_impl +from plugincode.output import OutputPlugin +from scancode import CommandLineOption +from scancode import FileOptionType +from scancode import OUTPUT_GROUP -""" -Output plugin to write scan results as CSV. -""" +@output_impl +class CsvOutput(OutputPlugin): + options = [ + CommandLineOption(('--output-csv',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + help='Write scan output as CSV to FILE.', + help_group=OUTPUT_GROUP, + sort_order=30), + ] + + def is_enabled(self, output_csv, **kwargs): + return output_csv + + def process_codebase(self, codebase, output_csv, **kwargs): + results = self.get_results(codebase, **kwargs) + write_csv(results, output_csv) -@scan_output_writer -def write_csv(scanned_files, output_file, *args, **kwargs): - """ - Write scan output formatted as CSV. - """ - scan_results = list(scanned_files) + +def write_csv(results, output_file): + # FIXMe: this is reading all in memory + results = list(results) headers = OrderedDict([ ('info', []), @@ -56,7 +72,7 @@ def write_csv(scanned_files, output_file, *args, **kwargs): ]) # note: FIXME: headers are collected as a side effect and this is not great - rows = list(flatten_scan(scan_results, headers)) + rows = list(flatten_scan(results, headers)) ordered_headers = [] for key_group in headers.values(): @@ -112,25 +128,28 @@ def collect_keys(mapping, key_group): # do not include matched text for now. if k == 'matched_text': continue + if k == 'matched_rule': + is_choice = val.get('license_choice', False) for mrk, mrv in val.items(): - mrk = 'matched_rule__' + mrk if mrk == 'license_choice': mrv = 'y' if mrv else '' if mrk == 'licenses': - mrv = ' '.join(mrv) + sep = ' OR ' if is_choice else ' AND ' + mrv = sep.join(mrv) if mrk in ('match_coverage', 'rule_relevance'): # normalize the string representation of this number mrv = '{:.2f}'.format(mrv) + mrk = 'matched_rule__' + mrk lic[mrk] = mrv continue if k == 'score': - # normalize the string representation of this number + # normalize score with two decimal values val = '{:.2f}'.format(val) - # lines are present in multiple scans: keep their column name as not scan-specific - # Prefix othe columns with license__ + # lines are present in multiple scans: keep their column name as + # not scan-specific. Prefix othe columns with license__ if k not in ('start_line', 'end_line',): k = 'license__' + k lic[k] = val diff --git a/src/formattedcode/format_templated.py b/src/formattedcode/output_html.py similarity index 54% rename from src/formattedcode/format_templated.py rename to src/formattedcode/output_html.py index 9774dcdacef..5cee9fab016 100644 --- a/src/formattedcode/format_templated.py +++ b/src/formattedcode/output_html.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,20 +23,38 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals from collections import OrderedDict import codecs from operator import itemgetter -import os - -import simplejson as json - -from commoncode import fileutils -from plugincode.output import scan_output_writer - +from os.path import abspath +from os.path import basename +from os.path import dirname +from os.path import exists +from os.path import expanduser +from os.path import isfile +from os.path import join + +import click +import simplejson + +from commoncode.fileutils import PATH_TYPE +from commoncode.fileutils import as_posixpath +from commoncode.fileutils import copytree +from commoncode.fileutils import delete +from commoncode.fileutils import file_name +from commoncode.fileutils import file_base_name +from commoncode.fileutils import fsencode +from commoncode.fileutils import parent_directory +from commoncode.system import on_linux +from plugincode.output import output_impl +from plugincode.output import OutputPlugin +from scancode import CommandLineOption +from scancode import FileOptionType +from scancode import OUTPUT_GROUP """ Output plugins to write scan results using templates such as HTML. @@ -46,152 +64,111 @@ """ -@scan_output_writer -def write_html(scanned_files, output_file, _echo, version, *args, **kwargs): +@output_impl +class HtmlOutput(OutputPlugin): + + options = [ + CommandLineOption(('--output-html',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + help='Write scan output as HTML to FILE.', + help_group=OUTPUT_GROUP, + sort_order=50), + ] + + def is_enabled(self, output_html, **kwargs): + return output_html + + def process_codebase(self, codebase, output_html, scancode_version, **kwargs): + results = self.get_results(codebase, **kwargs) + write_templated(output_html, results, scancode_version, + template_or_format='html') + + +@output_impl +class CustomTemplateOutput(OutputPlugin): + + options = [ + CommandLineOption(('--output-custom',), + type=FileOptionType(mode='wb', lazy=False), + requires=['custom_template'], + metavar='FILE', + help='Write scan output to FILE formatted with ' + 'the custom Jinja template file.', + help_group=OUTPUT_GROUP, + sort_order=60), + + CommandLineOption(('--custom-template',), + type=click.Path( + exists=True, file_okay=True, dir_okay=False, + readable=True, path_type=PATH_TYPE), + requires=['output_custom'], + metavar='FILE', + help='Use this Jinja template FILE as a custom template.', + help_group=OUTPUT_GROUP, + sort_order=65), + ] + + def is_enabled(self, output_custom, custom_template, **kwargs): + return output_custom and custom_template + + def process_codebase(self, codebase, output_custom, custom_template, + scancode_version, **kwargs): + + results = self.get_results(codebase, **kwargs) + if on_linux: + custom_template = fsencode(custom_template) + write_templated(output_custom, results, scancode_version, + template_or_format=custom_template) + + +@output_impl +class HtmlAppOutput(OutputPlugin): """ - Write scan output formatted as plain HTML page. + Write scan output as a mini HTML application. """ - _write_templated(scanned_files, output_file, _echo, version, template_or_format='html', raise_ex=False) + options = [ + CommandLineOption(('--output-html-app',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + help='Write scan output as a mini HTML application to FILE.', + help_group=OUTPUT_GROUP, + sort_order=70), + ] + def is_enabled(self, output_html_app, **kwargs): + return output_html_app -def write_custom(scanned_files, output_file, _echo, version, template_path): - """ - Write scan output formatted with a custom template. - NOTE: this is NOT a plugin, but a built-in - """ - _write_templated(scanned_files, output_file, _echo, version, template_or_format=template_path, raise_ex=True) + def process_codebase(self, codebase, + input, # NOQA + output_html_app, + scancode_version, **kwargs): + + results = self.get_results(codebase, **kwargs) + output_html_app.write(as_html_app(output_html_app, input, scancode_version)) + create_html_app_assets(results, output_html_app) -def _write_templated(scanned_files, output_file, _echo, version, template_or_format, raise_ex=False): +def write_templated(output_file, results, version, template_or_format): """ Write scan output using a template or a format. Optionally raise an exception on errors. """ - for template_chunk in as_template(scanned_files, version, template=template_or_format): + for template_chunk in as_template(results, version, template_or_format=template_or_format): try: output_file.write(template_chunk) except Exception: import traceback - extra_context = 'ERROR: Failed to write output for: ' + repr(template_chunk) - extra_context += '\n' + traceback.format_exc() - _echo(extra_context, fg='red') - if raise_ex: - # NOTE: this is a tad brutal to raise here, but helps - # the template authors - raise - - -@scan_output_writer -def write_html_app(scanned_files, input, output_file, _echo, version, *args, **kwargs): - """ - Write scan output formatted as a mini HTML application. - """ - output_file.write(as_html_app(input, version, output_file)) - try: - create_html_app_assets(scanned_files, output_file) - except HtmlAppAssetCopyWarning: - _echo('\nHTML app creation skipped when printing to stdout.', fg='yellow') - except HtmlAppAssetCopyError: - _echo('\nFailed to create HTML app.', fg='red') - - -def create_html_app_assets(results, output_file): - """ - Given an html-app output_file, create the corresponding `_files` - directory and copy the assets to this directory. The target - directory is deleted if it exists. - - Raise HtmlAppAssetCopyWarning if the output_file is or - HtmlAppAssetCopyError if the copy was not possible. - """ - try: - if is_stdout(output_file): - raise HtmlAppAssetCopyWarning() - assets_dir = os.path.join(get_template_dir('html-app'), 'assets') - - # delete old assets - tgt_dirs = get_html_app_files_dirs(output_file) - target_dir = os.path.join(*tgt_dirs) - if os.path.exists(target_dir): - fileutils.delete(target_dir) - - # copy assets - fileutils.copytree(assets_dir, target_dir) - - # write json data - root_path, assets_dir = get_html_app_files_dirs(output_file) - with codecs.open(os.path.join(root_path, assets_dir, 'data.json'), 'wb', encoding='utf-8') as f: - f.write('data=') - json.dump(results, f, iterable_as_array=True) - - # create help file - with codecs.open(os.path.join(root_path, assets_dir, 'help.html'), 'wb', encoding='utf-8') as f: - f.write(get_html_app_help(os.path.basename(output_file.name))) - except HtmlAppAssetCopyWarning, w: - raise w - except Exception, e: - raise HtmlAppAssetCopyError(e) - - -def as_html_app(scanned_path, version, output_file): - """ - Return an HTML string built from a list of results and the html-app template. - """ - template = get_template(get_template_dir('html-app')) - _, assets_dir = get_html_app_files_dirs(output_file) - - return template.render(assets_dir=assets_dir, scanned_path=scanned_path, version=version) - - -def get_html_app_help(output_filename): - """ - Return an HTML string containing the html-app help page with a - reference back to the main app page. - """ - template = get_template(get_template_dir('html-app'), - template_name='help_template.html') - - return template.render(main_app=output_filename) - - -class HtmlAppAssetCopyWarning(Exception): - pass - - -class HtmlAppAssetCopyError(Exception): - pass - - -def is_stdout(output_file): - return output_file.name == '' - - -def get_html_app_files_dirs(output_file): - """ - Return a tuple of (parent_dir, dir_name) directory named after the - `output_file` file object file_base_name (stripped from extension) and a - `_files` suffix Return empty strings if output is to stdout. - """ - if is_stdout(output_file): - return '', '' - - file_name = output_file.name - parent_dir = os.path.dirname(file_name) - dir_name = fileutils.file_base_name(file_name) + '_files' - return parent_dir, dir_name + msg = 'ERROR: Failed to write output for: ' + repr(template_chunk) + msg += '\n' + traceback.format_exc() + raise Exception(msg) -# -# Common utilities for templated scans outputs: html, html-app and -# custom templates. -# - -# FIXME: no HTML default! def get_template(templates_dir, template_name='template.html'): """ - Given a template directory, load and return the template file in the template_name - file found in that directory. + Given a `templates_dir` template directory, load and return the template + file for the `template_name` file found in that directory. """ from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader(templates_dir)) @@ -199,20 +176,19 @@ def get_template(templates_dir, template_name='template.html'): return template -def get_template_dir(format): +def get_template_dir(format_code): """ - Given a format string return the corresponding standard template - directory. + Return the template directory of a built-in template for a `format_code` + string. """ - return os.path.join(os.path.dirname(__file__), 'templates', format) + return join(dirname(__file__), 'templates', format_code) -# FIXME: no HTML default! -def as_template(scanned_files, version, template): +def as_template(results, version, template_or_format): """ - Return an string built from a list of `scanned_files` results and - the provided `template` identifier. The template defaults to the standard HTML - template format or can point to the path of a custom template file. + Return an string built from a list of `results` and the provided `template` + identifier. The template_or_format is either a built-in template format code + (e.g. "html") or the path of a custom template file. """ # FIXME: This code is highly coupled with actual scans and may not # support adding new scans at all @@ -220,14 +196,14 @@ def as_template(scanned_files, version, template): from licensedcode.cache import get_licenses_db # FIXME: factor out the html vs custom from this function: we should get a template path - if template == 'html': + if template_or_format == 'html': template = get_template(get_template_dir('html')) else: # load a custom template - tpath = fileutils.as_posixpath(os.path.abspath(os.path.expanduser(template))) - assert os.path.isfile(tpath) - tdir = fileutils.parent_directory(tpath) - tfile = fileutils.file_name(tpath) + tpath = as_posixpath(abspath(expanduser(template_or_format))) + assert isfile(tpath) + tdir = parent_directory(tpath) + tfile = file_name(tpath) template = get_template(tdir, tfile) converted = OrderedDict() @@ -242,7 +218,7 @@ def as_template(scanned_files, version, template): EMAILS = 'emails' # Create a flattened data dict keyed by path - for scanned_file in scanned_files: + for scanned_file in results: path = scanned_file['path'] results = [] if COPYRIGHTS in scanned_file: @@ -292,3 +268,91 @@ def as_template(scanned_files, version, template): } return template.generate(files=files, licenses=licenses, version=version) + + +def create_html_app_assets(results, output_file): + """ + Given an html-app output_file, create the corresponding `_files` + directory and copy the assets to this directory. The target + directory is deleted if it exists. + + Raise HtmlAppAssetCopyWarning if the output_file is or + HtmlAppAssetCopyError if the copy was not possible. + """ + try: + if is_stdout(output_file): + raise HtmlAppAssetCopyWarning() + assets_dir = join(get_template_dir('html-app'), 'assets') + + # delete old assets + tgt_dirs = get_html_app_files_dirs(output_file) + target_dir = join(*tgt_dirs) + if exists(target_dir): + delete(target_dir) + + # copy assets + copytree(assets_dir, target_dir) + + # write json data + # FIXME: this should a regular JSON scan format + root_path, assets_dir = get_html_app_files_dirs(output_file) + with codecs.open(join(root_path, assets_dir, 'data.json'), 'wb', encoding='utf-8') as f: + f.write('data=') + simplejson.dump(results, f, iterable_as_array=True) + + # create help file + with codecs.open(join(root_path, assets_dir, 'help.html'), 'wb', encoding='utf-8') as f: + f.write(get_html_app_help(basename(output_file.name))) + except HtmlAppAssetCopyWarning, w: + raise w + except Exception, e: + raise HtmlAppAssetCopyError(e) + + +def as_html_app(output_file, scanned_path, version,): + """ + Return an HTML string built from a list of results and the html-app template. + """ + template = get_template(get_template_dir('html-app')) + _, assets_dir = get_html_app_files_dirs(output_file) + + return template.render(assets_dir=assets_dir, scanned_path=scanned_path, version=version) + + +def get_html_app_help(output_filename): + """ + Return an HTML string containing the html-app help page with a + reference back to the main app page. + """ + template = get_template(get_template_dir('html-app'), + template_name='help_template.html') + + return template.render(main_app=output_filename) + + +class HtmlAppAssetCopyWarning(Exception): + pass + + +class HtmlAppAssetCopyError(Exception): + pass + + +def is_stdout(output_file): + return output_file.name == '' + + +def get_html_app_files_dirs(output_file): + """ + Return a tuple of (parent_dir, dir_name) directory named after the + `output_file` file-like object file_base_name (stripped from extension) and + a `_files` suffix Return empty strings if output is to stdout. + """ + if is_stdout(output_file): + return '', '' + + # FIXME: what if there is no name attribute?? + file_name = output_file.name + parent_dir = dirname(file_name) + dir_name = file_base_name(file_name) + '_files' + return parent_dir, dir_name diff --git a/src/formattedcode/output_json.py b/src/formattedcode/output_json.py new file mode 100644 index 00000000000..23751d107de --- /dev/null +++ b/src/formattedcode/output_json.py @@ -0,0 +1,118 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import unicode_literals + +from collections import OrderedDict + +import simplejson + +from plugincode.output import output_impl +from plugincode.output import OutputPlugin +from scancode import CommandLineOption +from scancode import FileOptionType +from scancode import OUTPUT_GROUP + +""" +Output plugins to write scan results as JSON. +""" + + +@output_impl +class JsonCompactOutput(OutputPlugin): + + options = [ + CommandLineOption(('--json', 'output_json',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + help='Write scan output as compact JSON to FILE.', + help_group=OUTPUT_GROUP, + sort_order=10), + ] + + def is_enabled(self, output_json, **kwargs): + return output_json + + def process_codebase(self, codebase, output_json, files_count, + scancode_version, scancode_notice, pretty_options, + **kwargs): + + results = self.get_results(codebase, **kwargs) + write_json(results=results, output_file=output_json, + files_count=files_count, + scancode_version=scancode_version, + scancode_notice=scancode_notice, + pretty_options=pretty_options, + pretty=False) + + +@output_impl +class JsonPrettyOutput(OutputPlugin): + + options = [ + CommandLineOption(('--json-pp', 'output_json_pp',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + help='Write scan output as pretty-printed JSON to FILE.', + help_group=OUTPUT_GROUP, + sort_order=10), + ] + + def is_enabled(self, output_json_pp, **kwargs): + return output_json_pp + + def process_codebase(self, codebase, output_json_pp, files_count, + scancode_version, scancode_notice, pretty_options, + **kwargs): + + results = self.get_results(codebase, **kwargs) + write_json(results=results, output_file=output_json_pp, + files_count=files_count, + scancode_version=scancode_version, + scancode_notice=scancode_notice, + pretty_options=pretty_options, + pretty=True) + + +def write_json(results, output_file, files_count, + scancode_version, scancode_notice, + pretty_options, pretty=False): + + scan = OrderedDict([ + ('scancode_notice', scancode_notice), + ('scancode_version', scancode_version), + ('scancode_options', pretty_options), + ('files_count', files_count), + ('files', results), + ]) + + kwargs = dict(iterable_as_array=True, encoding='utf-8') + if pretty: + kwargs.update(dict(indent=2 * b' ')) + else: + kwargs.update(dict(separators=(b',', b':',))) + + output_file.write(simplejson.dumps(scan, **kwargs)) + output_file.write(b'\n') diff --git a/src/formattedcode/output_jsonlines.py b/src/formattedcode/output_jsonlines.py new file mode 100644 index 00000000000..38f15235fb8 --- /dev/null +++ b/src/formattedcode/output_jsonlines.py @@ -0,0 +1,75 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import unicode_literals + +from collections import OrderedDict + +import simplejson + +from plugincode.output import output_impl +from plugincode.output import OutputPlugin +from scancode import CommandLineOption +from scancode import FileOptionType +from scancode import OUTPUT_GROUP + + +@output_impl +class JsonLinesOutput(OutputPlugin): + + options = [ + CommandLineOption(('--json-lines', 'output_json_lines',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + help='Write scan output as JSON Lines to FILE.', + help_group=OUTPUT_GROUP, + sort_order=15), + ] + + def is_enabled(self, output_json_lines, **kwargs): + return output_json_lines + + def process_codebase(self, codebase, output_json_lines, files_count, + scancode_version, scancode_notice, pretty_options, + **kwargs): + + results = self.get_results(codebase, **kwargs) + + header = dict(header=OrderedDict([ + ('scancode_notice', scancode_notice), + ('scancode_version', scancode_version), + ('scancode_options', pretty_options), + ('files_count', files_count) + ])) + + kwargs = dict( + iterable_as_array=True, encoding='utf-8', separators=(',', ':',)) + output_json_lines.write(simplejson.dumps(header, **kwargs)) + output_json_lines.write('\n') + + for scanned_file in results: + scanned_file_line = {'files': [scanned_file]} + output_json_lines.write(simplejson.dumps(scanned_file_line, **kwargs)) + output_json_lines.write('\n') diff --git a/src/formattedcode/format_spdx.py b/src/formattedcode/output_spdx.py similarity index 67% rename from src/formattedcode/format_spdx.py rename to src/formattedcode/output_spdx.py index 622a051f2f9..e910116e17d 100644 --- a/src/formattedcode/format_spdx.py +++ b/src/formattedcode/output_spdx.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -27,8 +27,13 @@ from __future__ import division from __future__ import unicode_literals -import os from os.path import abspath +from os.path import basename +from os.path import dirname +from os.path import isdir +from os.path import isfile +from os.path import join +import sys from spdx.checksum import Algorithm from spdx.creationinfo import Tool @@ -41,48 +46,118 @@ from spdx.utils import SPDXNone from spdx.version import Version -from plugincode.output import scan_output_writer +from plugincode.output import output_impl +from plugincode.output import OutputPlugin +from scancode import CommandLineOption +from scancode import FileOptionType +from scancode import OUTPUT_GROUP +# Python 2 and 3 support +try: + # Python 2 + unicode + str_orig = str + bytes = str # NOQA + str = unicode # NOQA +except NameError: + # Python 3 + unicode = str # NOQA + +# Tracing flags +TRACE = False +TRACE_DEEP = False + + +def logger_debug(*args): + pass + + +if TRACE or TRACE_DEEP: + import logging + + logger = logging.getLogger(__name__) + logging.basicConfig(stream=sys.stdout) + logger.setLevel(logging.DEBUG) + + def logger_debug(*args): + return logger.debug(' '.join(isinstance(a, unicode) + and a or repr(a) for a in args)) """ Output plugins to write scan results in SPDX format. """ -@scan_output_writer -def write_spdx_tag_value(files_count, version, notice, scanned_files, input, output_file, *args, **kwargs): - """ - Write scan output formatted as SPDX Tag/Value. - """ - write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=True) +@output_impl +class SpdxTvOutput(OutputPlugin): -@scan_output_writer -def write_spdx_rdf(files_count, version, notice, scanned_files, input, output_file, *args, **kwargs): - """ - Write scan output formatted as SPDX RDF. - """ - write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=False) + options = [ + CommandLineOption(('--output-spdx-tv',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + requires=['info'], + help='Write scan output as SPDX Tag/Value to FILE.', + help_group=OUTPUT_GROUP) + ] + def is_enabled(self, output_spdx_tv, info, **kwargs): + return output_spdx_tv and info -def write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=True): + def process_codebase(self, codebase, + input, # NOQA + output_spdx_tv, + scancode_version, scancode_notice, **kwargs): + + results = self.get_results(codebase, **kwargs) + write_spdx(output_spdx_tv, results, scancode_version, scancode_notice, + input, as_tagvalue=True) + + +@output_impl +class SpdxRdfOutput(OutputPlugin): + + options = [ + CommandLineOption(('--output-spdx-rdf',), + type=FileOptionType(mode='wb', lazy=False), + metavar='FILE', + requires=['info'], + help='Write scan output as SPDX RDF to FILE.', + help_group=OUTPUT_GROUP) + ] + + def is_enabled(self, output_spdx_rdf, info, **kwargs): + return output_spdx_rdf and info + + def process_codebase(self, codebase, + input, # NOQA + output_spdx_rdf, + scancode_version, scancode_notice, **kwargs): + + results = self.get_results(codebase, **kwargs) + write_spdx(output_spdx_rdf, results, scancode_version, scancode_notice, + input, as_tagvalue=False) + + +def write_spdx(output_file, results, scancode_version, scancode_notice, + input_file, as_tagvalue=True): """ - Write scan output formatted as SPDX Tag/value or RDF. + Write scan output as SPDX Tag/value or RDF. """ - absinput = abspath(input) + absinput = abspath(input_file) - if os.path.isdir(absinput): + if isdir(absinput): input_path = absinput else: - input_path = os.path.dirname(absinput) + input_path = dirname(absinput) doc = Document(Version(2, 1), License.from_identifier('CC0-1.0')) - doc.comment = notice + doc.comment = scancode_notice - doc.creation_info.add_creator(Tool('ScanCode ' + version)) + doc.creation_info.add_creator(Tool('ScanCode ' + scancode_version)) doc.creation_info.set_created_now() package = doc.package = Package( - name=os.path.basename(input_path), + name=basename(input_path), download_location=NoAssert() ) @@ -92,14 +167,15 @@ def write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=T all_files_have_no_license = True all_files_have_no_copyright = True - for file_data in scanned_files: + # FIXME: this should walk the codebase instead!!! + for file_data in results: # Construct the absolute path in case we need to access the file # to calculate its SHA1. - file_entry = File(os.path.join(input_path, file_data.get('path'))) + file_entry = File(join(input_path, file_data.get('path'))) file_sha1 = file_data.get('sha1') if not file_sha1: - if os.path.isfile(file_entry.name): + if isfile(file_entry.name): # Calculate the SHA1 in case it is missing, e.g. for empty files. file_sha1 = file_entry.calc_chksum() else: @@ -125,7 +201,8 @@ def write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=T licenseref_id = 'LicenseRef-' + license_key spdx_license = ExtractedLicense(licenseref_id) spdx_license.name = file_license.get('short_name') - comment = 'See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/%s.yml\n' % license_key + comment = ('See details at https://github.com/nexB/scancode-toolkit' + '/blob/develop/src/licensedcode/data/licenses/%s.yml\n' % license_key) spdx_license.comment = comment text = file_license.get('matched_text') # always set some text, even if we did not extract the matched text @@ -168,7 +245,6 @@ def write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=T else: file_entry.copyright = SPDXNone() - package.add_file(file_entry) if len(package.files) == 0: @@ -203,9 +279,9 @@ def write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=T package.conc_lics = NoAssert() if as_tagvalue: - from spdx.writers.tagvalue import write_document + from spdx.writers.tagvalue import write_document # NOQA else: - from spdx.writers.rdf import write_document + from spdx.writers.rdf import write_document # NOQA # The spdx-tools write_document returns either: # - unicode for tag values diff --git a/src/licensedcode/__init__.py b/src/licensedcode/__init__.py index 972c5bde04b..ecfc6d7ffd3 100644 --- a/src/licensedcode/__init__.py +++ b/src/licensedcode/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,31 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import absolute_import -from os.path import dirname -from os.path import abspath -from os.path import getsize -from os.path import getmtime -from os.path import join -from os.path import exists - -from commoncode import fileutils - - -lic_src_dir = abspath(dirname(__file__)) -src_dir = dirname(lic_src_dir) -data_dir = join(lic_src_dir, 'data') -licenses_data_dir = join(data_dir, 'licenses') -rules_data_dir = join(data_dir, 'rules') -root_dir = dirname(src_dir) -cache_dir = join(root_dir, '.cache') -license_index_cache_dir = join(cache_dir, 'license_index') - -if not exists(license_index_cache_dir): - fileutils.create_dir(license_index_cache_dir) - # minimum number of tokens a match should have to be considered as worthy keeping MIN_MATCH_LENGTH = 4 MIN_MATCH_HIGH_LENGTH = 3 @@ -55,4 +32,3 @@ # eventually this should be skipped early right during the matching too # maximum distance between two matches to merge MAX_DIST = 120 - diff --git a/src/licensedcode/cache.py b/src/licensedcode/cache.py index 6f155e97ec4..873ba344bbd 100644 --- a/src/licensedcode/cache.py +++ b/src/licensedcode/cache.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -26,21 +26,20 @@ from functools import partial from hashlib import md5 -import os from os.path import exists from os.path import getmtime from os.path import getsize from os.path import join -import yg.lockfile # @UnresolvedImport +import yg.lockfile # NOQA -from commoncode.fileutils import file_iter +from commoncode.fileutils import resource_iter +from commoncode.fileutils import create_dir from commoncode import ignore -from licensedcode import root_dir -from licensedcode import src_dir -from licensedcode import license_index_cache_dir - +from scancode_config import scancode_cache_dir +from scancode_config import scancode_src_dir +from scancode_config import SCANCODE_DEV_MODE """ An on-disk persistent cache of LicenseIndex. The index is pickled and invalidated if @@ -48,160 +47,160 @@ cached index is safe to use across multiple processes using lock files. """ -index_lock_file = join(license_index_cache_dir, 'lockfile') -tree_checksum_file = join(license_index_cache_dir, 'tree_checksums') -index_cache_file = join(license_index_cache_dir, 'index_cache') - +LICENSE_INDEX_LOCK_TIMEOUT = 60 * 3 -_ignored_from_hash = partial( - ignore.is_ignored, - ignores={'*.pyc': 'pyc files', '*~': 'temp gedit files', '*.swp': 'vi swap files'}, - unignores={} -) +# global in-memory cache of the main license index instance +_LICENSES_INDEX = None -def tree_checksum(tree_base_dir=src_dir, _ignored=_ignored_from_hash): +def get_index(cache_dir=scancode_cache_dir, check_consistency=SCANCODE_DEV_MODE, + return_value=True): """ - Return a checksum computed from a file tree using the file paths, - size and last modified time stamps. - The purpose is to detect is there has been any modification to - source code or data files and use this as a proxy to verify the - cache consistency. - - NOTE: this is not 100% fool proof but good enough in practice. + Return and eventually cache an index built from an iterable of rules. + Build the index from the built-in rules dataset. """ - hashable = (pth + str(getmtime(pth)) + str(getsize(pth)) - for pth in file_iter(tree_base_dir, ignored=_ignored)) - return md5(''.join(sorted(hashable))).hexdigest() - - -LICENSE_INDEX_LOCK_TIMEOUT = 60 * 3 + global _LICENSES_INDEX + if not _LICENSES_INDEX: + _LICENSES_INDEX = get_cached_index(cache_dir, check_consistency) + if return_value: + return _LICENSES_INDEX -# If this file exists at the root, the cache is always checked for consistency -DEV_MODE = os.path.exists(os.path.join(root_dir, 'SCANCODE_DEV_MODE')) +# global in-memory cache of a mapping of key -> license instance +_LICENSES = {} -def get_or_build_index_through_cache( - check_consistency=DEV_MODE, - return_index=True, - # used for testing only - _tree_base_dir=src_dir, - _tree_checksum_file=tree_checksum_file, - _index_lock_file=index_lock_file, - _index_cache_file=index_cache_file, - _licenses_data_dir=None, - _rules_data_dir=None, - _timeout=LICENSE_INDEX_LOCK_TIMEOUT, - ): +def get_licenses_db(licenses_data_dir=None): """ - Check and build or rebuild the LicenseIndex cache. - If the cache does not exist, a new index is built an cached. - Return the LicenseIndex if return_index is True. + Return a mapping of license key -> license object. + """ + global _LICENSES + if not _LICENSES : + from licensedcode.models import load_licenses + if not licenses_data_dir: + from licensedcode.models import licenses_data_dir as ldd + licenses_data_dir = ldd + _LICENSES = load_licenses(licenses_data_dir) + return _LICENSES - If `check_consistency` is True, the cache is checked for consistency - and rebuilt if inconsistent or stale. - If `check_consistency` is False, the cache is NOT checked for consistency - If the cache files exist but stale, the cache WILL NOT be rebuilt +def get_cached_index(cache_dir=scancode_cache_dir, + check_consistency=SCANCODE_DEV_MODE, + # used for testing only + timeout=LICENSE_INDEX_LOCK_TIMEOUT, + tree_base_dir=scancode_src_dir, + licenses_data_dir=None, rules_data_dir=None,): + """ + Return a LicenseIndex: either load a cached index or build and cache the + index. + - If the cache does not exist, a new index is built an cached. + - If `check_consistency` is True, the cache is checked for consistency and + rebuilt if inconsistent or stale. + - If `check_consistency` is False, the cache is NOT checked for consistency + If the cache files exist but ARE stale, the cache WILL NOT be rebuilt """ from licensedcode.index import LicenseIndex + from licensedcode.models import licenses_data_dir as ldd + from licensedcode.models import rules_data_dir as rdd from licensedcode.models import get_rules - from licensedcode.models import licenses_data_dir - from licensedcode.models import rules_data_dir - _licenses_data_dir = _licenses_data_dir or licenses_data_dir - _rules_data_dir = _rules_data_dir or rules_data_dir - has_cache = exists(_index_cache_file) - has_tree_checksum = exists(_tree_checksum_file) + licenses_data_dir = licenses_data_dir or ldd + rules_data_dir = rules_data_dir or rdd + + lock_file, checksum_file, cache_file = get_license_cache_paths(cache_dir) + + has_cache = exists(cache_file) + has_tree_checksum = exists(checksum_file) # bypass check if no consistency check is needed if has_cache and has_tree_checksum and not check_consistency: - return return_index and _load_index(_index_cache_file) + return load_index(cache_file) # here, we have no cache or we want a validity check: lock, check # and build or rebuild as needed try: # acquire lock and wait until timeout to get a lock or die - with yg.lockfile.FileLock(_index_lock_file, timeout=_timeout): + with yg.lockfile.FileLock(lock_file, timeout=timeout): current_checksum = None # is the current cache consistent or stale? if has_cache and has_tree_checksum: # if we have a saved cached index # load saved tree_checksum and compare with current tree_checksum - with open(_tree_checksum_file, 'rb') as etcs: + with open(checksum_file, 'rb') as etcs: existing_checksum = etcs.read() - current_checksum = tree_checksum(tree_base_dir=_tree_base_dir) + current_checksum = tree_checksum(tree_base_dir=tree_base_dir) if current_checksum == existing_checksum: # The cache is consistent with the latest code and data # load and return - return return_index and _load_index(_index_cache_file) + return load_index(cache_file) # Here, the cache is not consistent with the latest code and # data: It is either stale or non-existing: we need to # rebuild the index and cache it rules = get_rules( - licenses_data_dir=_licenses_data_dir, - rules_data_dir=_rules_data_dir) + licenses_data_dir=licenses_data_dir, + rules_data_dir=rules_data_dir) + idx = LicenseIndex(rules) - with open(_index_cache_file, 'wb') as ifc: + + with open(cache_file, 'wb') as ifc: ifc.write(idx.dumps()) # save the new checksums tree - with open(_tree_checksum_file, 'wb') as ctcs: - ctcs.write(current_checksum or tree_checksum(tree_base_dir=_tree_base_dir)) + with open(checksum_file, 'wb') as ctcs: + ctcs.write(current_checksum + or tree_checksum(tree_base_dir=tree_base_dir)) - return return_index and idx + return idx except yg.lockfile.FileLockTimeout: # TODO: handle unable to lock in a nicer way raise -def _load_index(_index_cache_file=index_cache_file): +def load_index(cache_file): """ Return a LicenseIndex loaded from cache. """ from licensedcode.index import LicenseIndex - - with open(_index_cache_file, 'rb') as ifc: + with open(cache_file, 'rb') as ifc: # Note: weird but read() + loads() is much (twice++???) faster than load() - idx = LicenseIndex.loads(ifc.read()) - return idx - - -"""Check the license index and reindex if needed.""" -reindex = partial(get_or_build_index_through_cache, check_consistency=True, return_index=False) + return LicenseIndex.loads(ifc.read()) -# global in-memory cache of the main license index instance -_LICENSES_INDEX = None +_ignored_from_hash = partial( + ignore.is_ignored, + ignores={'*.pyc': 'pyc files', + '*~': 'temp gedit files', + '*.swp': 'vi swap files'}, + unignores={} +) -def get_index(_return_index=True): +def tree_checksum(tree_base_dir=scancode_src_dir, _ignored=_ignored_from_hash): """ - Return and eventually cache an index built from an iterable of rules. - Build the index from the built-in rules dataset. - """ - global _LICENSES_INDEX - if not _LICENSES_INDEX: - _LICENSES_INDEX = get_or_build_index_through_cache() - return _return_index and _LICENSES_INDEX - + Return a checksum computed from a file tree using the file paths, + size and last modified time stamps. + The purpose is to detect is there has been any modification to + source code or data files and use this as a proxy to verify the + cache consistency. -# global in-memory cache of a mapping of key -> license instance -_LICENSES = {} + NOTE: this is not 100% fool proof but good enough in practice. + """ + resources = resource_iter(tree_base_dir, ignored=_ignored, with_dirs=False) + hashable = (pth + str(getmtime(pth)) + str(getsize(pth)) for pth in resources) + return md5(''.join(sorted(hashable))).hexdigest() -def get_licenses_db(licenses_data_dir=None): +def get_license_cache_paths(cache_dir=scancode_cache_dir): """ - Return a mapping of license key -> license object. + Return a tuple of index cache files given a master `cache_dir` """ - global _LICENSES - if not _LICENSES : - from licensedcode.models import load_licenses - if not licenses_data_dir: - from licensedcode.models import licenses_data_dir as ldd - licenses_data_dir = ldd - _LICENSES = load_licenses(licenses_data_dir) - return _LICENSES + idx_cache_dir = join(cache_dir, 'license_index') + create_dir(idx_cache_dir) + + lock_file = join(idx_cache_dir, 'lockfile') + checksum_file = join(idx_cache_dir, 'tree_checksums') + cache_file = join(idx_cache_dir, 'index_cache') + + return lock_file, checksum_file, cache_file diff --git a/src/licensedcode/index.py b/src/licensedcode/index.py index 049f36fe00b..2c298dbc2af 100644 --- a/src/licensedcode/index.py +++ b/src/licensedcode/index.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -34,16 +34,15 @@ from functools import partial from itertools import izip from operator import itemgetter -import os import sys from time import time -from commoncode.dict_utils import sparsify +# import early +from scancode_config import scancode_cache_dir +from commoncode.dict_utils import sparsify from licensedcode import MAX_DIST -from licensedcode.cache import get_index from licensedcode.frequent_tokens import global_tokens_by_ranks - from licensedcode import match from licensedcode import match_aho from licensedcode import match_hash @@ -83,8 +82,8 @@ def logger_debug(*args): pass -if (TRACE or TRACE_INDEXING_PERF or TRACE_QUERY_RUN_SIMPLE - or os.environ.get('SCANCODE_LICENSE_DEBUG') or TRACE_NEGATIVE): + +if TRACE or TRACE_INDEXING_PERF or TRACE_QUERY_RUN_SIMPLE or TRACE_NEGATIVE: import logging logger = logging.getLogger(__name__) @@ -96,7 +95,8 @@ def logger_debug(*args): return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) -def get_license_matches(location=None, query_string=None, min_score=0): +def get_license_matches(location=None, query_string=None, min_score=0, + cache_dir=scancode_cache_dir): """ Yield detected license matches in the file at `location` or the `query_string` string. @@ -108,7 +108,8 @@ def get_license_matches(location=None, query_string=None, min_score=0): The minimum length for an approximate match is four tokens. Spurrious matched are always filtered. """ - return get_index().match(location=location, query_string=query_string, min_score=min_score) + from licensedcode.cache import get_index + return get_index(cache_dir).match(location=location, query_string=query_string, min_score=min_score) # Feature switch to enable or not ngram fragments detection @@ -565,7 +566,7 @@ def negative_match(self, query_run): from the query run. """ matches = match_aho.exact_match(self, query_run, self.negative_automaton) - + if TRACE_NEGATIVE and matches: logger_debug(' ##final _negative_matches:....', len(matches)) return matches diff --git a/src/licensedcode/legal.py b/src/licensedcode/legal.py index 7a7e0933af8..e7af15faa9d 100644 --- a/src/licensedcode/legal.py +++ b/src/licensedcode/legal.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -28,17 +28,14 @@ from commoncode import fileutils - """ Recognition of typical "legal" files such as "LICENSE", "COPYING", etc. """ - special_names = ( 'COPYING', 'COPYRIGHT', 'NOTICE', 'LICENSE', 'LICENCE', 'LEGAL', 'EULA', 'AGREEMENT', 'ABOUT', 'COPYLEFT', 'LICENSING') - special_names_lower = tuple(x.lower() for x in special_names) diff --git a/src/licensedcode/match.py b/src/licensedcode/match.py index 440357e5ff4..d3e35dae75d 100644 --- a/src/licensedcode/match.py +++ b/src/licensedcode/match.py @@ -53,6 +53,7 @@ def logger_debug(*args): pass + if (TRACE or TRACE_FILTER_CONTAINS or TRACE_MERGE or TRACE_REFINE_RULE_MIN_COVERAGE or TRACE_REFINE_SINGLE or TRACE_REFINE_SMALL): @@ -430,7 +431,6 @@ def merge_matches(matches, max_dist=MAX_DIST): returned as-is. For being merged two matches must also be in increasing query and index positions. """ - from licensedcode.match_seq import MATCH_SEQ # shortcut for single matches if len(matches) < 2: @@ -474,7 +474,6 @@ def merge_matches(matches, max_dist=MAX_DIST): if TRACE_MERGE: logger_debug(' ---> ###merge_matches: MAX_DIST reached, breaking') break - # keep one of equal matches # with same qspan: FIXME: is this ever possible? if current_match.qspan == next_match.qspan and current_match.ispan == next_match.ispan: @@ -563,10 +562,10 @@ def merge_matches(matches, max_dist=MAX_DIST): merged.extend(rule_matches) return merged - # FIXME we should consider the length and distance between matches to break # early from the loops: trying to check containment on wildly separated matches does not make sense + def filter_contained_matches(matches): """ Return a filtered list of LicenseMatch given a `matches` list of LicenseMatch by @@ -1067,6 +1066,7 @@ def get_full_matched_text( dictionary_get = idx.dictionary.get import attr + @attr.s(slots=True) class Token(object): value = attr.ib() diff --git a/src/licensedcode/match_aho.py b/src/licensedcode/match_aho.py index 8c7b090775f..d5c706877dd 100644 --- a/src/licensedcode/match_aho.py +++ b/src/licensedcode/match_aho.py @@ -51,6 +51,7 @@ def logger_debug(*args): logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) else: + def logger_debug(*args): pass diff --git a/src/licensedcode/match_hash.py b/src/licensedcode/match_hash.py index 76bfc5de15c..512d2a1e2e3 100644 --- a/src/licensedcode/match_hash.py +++ b/src/licensedcode/match_hash.py @@ -30,7 +30,6 @@ from licensedcode.spans import Span from licensedcode.match import LicenseMatch - """ Matching strategy using hashes to match a whole text chunk at once. """ @@ -51,10 +50,10 @@ def logger_debug(*args): logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) else: + def logger_debug(*args): pass - MATCH_HASH = '1-hash' diff --git a/src/licensedcode/match_seq.py b/src/licensedcode/match_seq.py index 90d5d870f9e..8f70143555e 100644 --- a/src/licensedcode/match_seq.py +++ b/src/licensedcode/match_seq.py @@ -24,7 +24,6 @@ from __future__ import absolute_import, division, print_function - from licensedcode.match import get_texts from licensedcode.match import LicenseMatch from licensedcode.seq import match_blocks @@ -33,8 +32,10 @@ TRACE = False TRACE2 = False + def logger_debug(*args): pass + if TRACE: import logging import sys @@ -54,6 +55,7 @@ def logger_debug(*args): MATCH_SEQ = '3-seq' + def match_sequence(idx, candidate, query_run, start_offset=0): """ Return a list of LicenseMatch by matching the `query_run` tokens sequence diff --git a/src/licensedcode/match_set.py b/src/licensedcode/match_set.py index b96a95fee44..e7da310e75e 100644 --- a/src/licensedcode/match_set.py +++ b/src/licensedcode/match_set.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -35,7 +35,6 @@ from licensedcode.models import Rule - """ Approximate matching strategies using token sets and multisets. @@ -123,6 +122,7 @@ def logger_debug(*args): pass + if TRACE: import logging import sys @@ -134,10 +134,10 @@ def logger_debug(*args): pass def logger_debug(*args): return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) - # TODO: add bigrams sets and multisets # TODO: see also https://github.com/bolo1729/python-memopt/blob/master/memopt/memopt.py for multisets + def tids_sets_intersector(qset, iset): """ Return the intersection of a query and index token ids sets. @@ -225,6 +225,7 @@ def index_token_sets(token_ids, len_junk, len_good): # would discard when we compute candaites to eventually discard many or all candidates # we compute too many candidates that may waste time in seq matching for no reason + # FIXME: Also we should remove any weak and or small rules from the top candidates # and anything that cannot be seq matched at all. (e.g. no high match) def compute_candidates(query_run, idx, rules_subset, top=30): @@ -270,7 +271,10 @@ def compute_candidates(query_run, idx, rules_subset, top=30): logger_debug('candidate: ihigh:', [(idx.tokens_by_tid[tid], val) for tid, val in enumerate(ihigh, idx.len_junk)]) thresholds = thresholds_getter(rule) - compared = compare_sets(qhigh, qlow, ihigh, ilow, thresholds, intersector, counter) + if TRACE_DEEP: + compared = compare_sets(qhigh, qlow, ihigh, ilow, thresholds, intersector, counter, rule, idx) + else: + compared = compare_sets(qhigh, qlow, ihigh, ilow, thresholds, intersector, counter) if compared: sort_order, intersection = compared sortable_candidates.append((sort_order, rid, rule, intersection)) @@ -309,7 +313,7 @@ def compute_candidates(query_run, idx, rules_subset, top=30): return candidates -def compare_sets(qhigh, qlow, ihigh, ilow, thresholds, intersector, counter): +def compare_sets(qhigh, qlow, ihigh, ilow, thresholds, intersector, counter, _rule=None, _idx=None): """ Compare a query qhigh and qlow sets with an index rule ihigh and ilow sets. Return a tuple suitable for sorting and the computed sets intersection or None if @@ -383,4 +387,9 @@ def compare_sets(qhigh, qlow, ihigh, ilow, thresholds, intersector, counter): inter = low_inter low_inter.update(high_inter) + if TRACE_DEEP: + logger_debug('compare_sets: intersected rule:', _rule.identifier) + logger_debug(' compare_sets: thresholds:', thresholds) + logger_debug(' compare_sets: high_inter:', ' '.join(_idx.tokens_by_tid[tid] for tid in high_inter)) + return sort_order, inter diff --git a/src/licensedcode/models.py b/src/licensedcode/models.py index 6057761d43e..99791aa7a52 100644 --- a/src/licensedcode/models.py +++ b/src/licensedcode/models.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,9 +23,9 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import unicode_literals -from __future__ import print_function from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals import codecs from collections import Counter @@ -34,23 +34,27 @@ from collections import OrderedDict from itertools import chain from operator import itemgetter +from os.path import abspath +from os.path import dirname from os.path import exists from os.path import join +from commoncode.fileutils import copyfile from commoncode.fileutils import file_base_name from commoncode.fileutils import file_name -from commoncode.fileutils import file_iter +from commoncode.fileutils import resource_iter from commoncode import saneyaml from textcode.analysis import text_lines from licensedcode import MIN_MATCH_LENGTH from licensedcode import MIN_MATCH_HIGH_LENGTH -from licensedcode import licenses_data_dir -from licensedcode import rules_data_dir from licensedcode.tokenize import rule_tokenizer from licensedcode.tokenize import query_tokenizer -from commoncode import fileutils +# these are globals but always side-by-side with the code so not moving +data_dir = join(abspath(dirname(__file__)), 'data') +licenses_data_dir = join(data_dir, 'licenses') +rules_data_dir = join(data_dir, 'rules') """ Reference License and license Rule structures persisted as a combo of a YAML @@ -180,7 +184,7 @@ def relocate(self, target_dir, new_key=None): # save it all to files if self.text: - fileutils.copyfile(self.text_file, newl.text_file) + copyfile(self.text_file, newl.text_file) newl.dump() return newl @@ -389,7 +393,6 @@ def validate(licenses, verbose=False, no_dupe_urls=False): # for global dedupe by_text[license_qtokens].append(key + ': TEXT') - # SPDX consistency if lic.spdx_license_key: by_spdx_key[lic.spdx_license_key].append(key) @@ -431,7 +434,7 @@ def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False): Return a mapping of key -> license objects, loaded from license files. """ licenses = {} - for data_file in file_iter(licenses_data_dir): + for data_file in resource_iter(licenses_data_dir, with_dirs=False): if not data_file.endswith('.yml'): continue key = file_base_name(data_file) @@ -511,7 +514,7 @@ def load_rules(rules_data_dir=rules_data_dir, load_notes=False): processed_files = set() lower_case_files = set() case_problems = set() - for data_file in file_iter(rules_data_dir): + for data_file in resource_iter(rules_data_dir, with_dirs=False): if data_file.endswith('.yml'): base_name = file_base_name(data_file) rule_file = join(rules_data_dir, base_name + '.RULE') @@ -740,30 +743,37 @@ def thresholds(self): Return a Thresholds tuple considering the occurrence of all tokens. """ if not self._thresholds: - min_high = min([self.high_length, MIN_MATCH_HIGH_LENGTH]) - min_len = MIN_MATCH_LENGTH + length = self.length + high_length = self.high_length + if length > 200: + min_high = high_length // 10 + min_len = length // 10 + else: + min_high = min([high_length, MIN_MATCH_HIGH_LENGTH]) + min_len = MIN_MATCH_LENGTH # note: we cascade ifs from largest to smallest lengths # FIXME: this is not efficient + if self.length < 30: - min_len = self.length // 2 + min_len = length // 2 if self.length < 10: - min_high = self.high_length - min_len = self.length + min_high = high_length + min_len = length self.minimum_coverage = 80 if self.length < 3: - min_high = self.high_length - min_len = self.length + min_high = high_length + min_len = length self.minimum_coverage = 100 if self.minimum_coverage == 100: - min_high = self.high_length - min_len = self.length + min_high = high_length + min_len = length self._thresholds = Thresholds( - self.high_length, self.low_length, self.length, + high_length, self.low_length, length, self.small(), min_high, min_len ) return self._thresholds @@ -773,31 +783,40 @@ def thresholds_unique(self): Return a Thresholds tuple considering the occurrence of only unique tokens. """ if not self._thresholds_unique: - highu = (int(self.high_unique // 2)) or self.high_unique - min_high = min([highu, MIN_MATCH_HIGH_LENGTH]) - min_len = MIN_MATCH_LENGTH + length = self.length + high_unique = self.high_unique + length_unique = self.length_unique + + if length > 200: + min_high = high_unique // 10 + min_len = length // 10 + else: + highu = (int(high_unique // 2)) or high_unique + min_high = min([highu, MIN_MATCH_HIGH_LENGTH]) + min_len = MIN_MATCH_LENGTH + # note: we cascade IFs from largest to smallest lengths - if self.length < 20: - min_high = self.high_unique + if length < 20: + min_high = high_unique min_len = min_high - if self.length < 10: - min_high = self.high_unique - if self.length_unique < 2: - min_len = self.length_unique + if length < 10: + min_high = high_unique + if length_unique < 2: + min_len = length_unique else: - min_len = self.length_unique - 1 + min_len = length_unique - 1 - if self.length < 5: - min_high = self.high_unique - min_len = self.length_unique + if length < 5: + min_high = high_unique + min_len = length_unique if self.minimum_coverage == 100: - min_high = self.high_unique - min_len = self.length_unique + min_high = high_unique + min_len = length_unique self._thresholds_unique = Thresholds( - self.high_unique, self.low_unique, self.length_unique, + high_unique, self.low_unique, length_unique, self.small(), min_high, min_len) return self._thresholds_unique diff --git a/src/licensedcode/query.py b/src/licensedcode/query.py index baa94c827a1..2f76bd16226 100644 --- a/src/licensedcode/query.py +++ b/src/licensedcode/query.py @@ -23,7 +23,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import print_function, absolute_import +from __future__ import absolute_import +from __future__ import print_function from collections import defaultdict @@ -35,7 +36,6 @@ from licensedcode.tokenize import query_lines from licensedcode.tokenize import query_tokenizer - """ Build license queries from scanned files to feed the detection pipeline. @@ -84,6 +84,7 @@ def logger_debug(*args): pass + if TRACE: import logging import sys diff --git a/src/licensedcode/seq.py b/src/licensedcode/seq.py index cf23d0dd9ea..68555c4f58e 100644 --- a/src/licensedcode/seq.py +++ b/src/licensedcode/seq.py @@ -3,7 +3,6 @@ from collections import namedtuple as _namedtuple - """ Token sequences alignement and diffing based on the longest common substrings of "high tokens". This essentially a non-optimal and reasonably fast single local @@ -15,7 +14,6 @@ license: PSF. See seq.ABOUT file for details. """ - Match = _namedtuple('Match', 'a b size') diff --git a/src/licensedcode/spans.py b/src/licensedcode/spans.py index bb258ad36f2..4b60d0cf155 100644 --- a/src/licensedcode/spans.py +++ b/src/licensedcode/spans.py @@ -37,7 +37,6 @@ from intbitset import intbitset - """ Ranges and intervals of integers using bitmaps. Used as a compact and faster data structure for token and position sets. @@ -51,6 +50,7 @@ class Span(Set): It is equivalent to a sparse closed interval. Originally derived and heavily modified from Whoosh Span. """ + def __init__(self, *args): """ Create a new Span from a start and end ints or an iterable of ints. diff --git a/src/licensedcode/tokenize.py b/src/licensedcode/tokenize.py index ef53e9094ca..4549621bc7e 100644 --- a/src/licensedcode/tokenize.py +++ b/src/licensedcode/tokenize.py @@ -34,12 +34,12 @@ from textcode.analysis import text_lines - """ Utilities to break texts in lines and tokens (aka. words) with specialized version for queries and rules texts. """ + def query_lines(location=None, query_string=None, strip=True): """ Return an iterable of text lines given a file at `location` or a @@ -70,6 +70,7 @@ def query_lines(location=None, query_string=None, strip=True): query_pattern = '[^\W]+\+?[^\W]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall + def query_tokenizer(text, lower=True): """ Return an iterable of tokens from a unicode query text. @@ -84,11 +85,11 @@ def query_tokenizer(text, lower=True): # matched text collection not_query_pattern = '[\W\s\+]+[\W\s]?' - # collect tokens and non-token texts in two different groups _text_capture_pattern = '(?P' + query_pattern + ')' + '|' + '(?P' + not_query_pattern + ')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer + def matched_query_text_tokenizer(text): """ Return an iterable of tokens and non-tokens from a unicode query text keeping @@ -118,6 +119,7 @@ def matched_query_text_tokenizer(text): rule_pattern = '%s|%s+' % (query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern , re.UNICODE).findall + def rule_tokenizer(text, lower=True): """ Return an iterable of tokens from a unicode rule text, skipping templated diff --git a/src/packagedcode/__init__.py b/src/packagedcode/__init__.py index 47aab4d7a92..68fd453243e 100644 --- a/src/packagedcode/__init__.py +++ b/src/packagedcode/__init__.py @@ -22,6 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. +from __future__ import absolute_import + from packagedcode import models from packagedcode import maven from packagedcode import npm @@ -29,7 +31,6 @@ from packagedcode import phpcomposer from packagedcode import rpm - # Note: the order matters: from the most to the least specific # Package classes MUST be added to this list to be active PACKAGE_TYPES = [ diff --git a/src/packagedcode/maven.py b/src/packagedcode/maven.py index 4ceb12fe7dd..cb8e5fe9832 100644 --- a/src/packagedcode/maven.py +++ b/src/packagedcode/maven.py @@ -45,7 +45,6 @@ from typecode import contenttype from textcode import analysis - logger = logging.getLogger(__name__) TRACE = False @@ -54,12 +53,12 @@ logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) - """ Support Maven2 POMs. Attempts to resolve Maven properties when possible. """ + class MavenPomPackage(models.Package): metafiles = ('.pom', 'pom.xml',) extensions = ('.pom', '.xml',) @@ -118,6 +117,7 @@ def to_dict(self): class MavenPom(pom.Pom): + def __init__(self, location=None, text=None): """ Build a POM from a location or unicode text. @@ -217,6 +217,7 @@ def _extra_properties(self): def _replace_props(cls, text, properties): if not text: return text + def subfunc(matchobj): """Return the replacement value for a matched property key.""" key = matchobj.group(1) @@ -775,6 +776,7 @@ class MavenRecognizer(object): """ A package recognizer for Maven-based packages. """ + def __init__(self): return NotImplementedError() diff --git a/src/packagedcode/models.py b/src/packagedcode/models.py index ed1f0b07368..ce6e039b779 100644 --- a/src/packagedcode/models.py +++ b/src/packagedcode/models.py @@ -54,7 +54,6 @@ from schematics.types.compound import ModelType from schematics.transforms import blacklist - """ Common data model for package information and dependencies, abstracting the many small differences existing between package management formats and tools. @@ -127,6 +126,7 @@ class BaseListType(ListType): """ ListType with a default of an empty list. """ + def __init__(self, field, **kwargs): super(BaseListType, self).__init__(field=field, default=[], **kwargs) @@ -138,6 +138,7 @@ class PackageIndentifierType(BaseType): """ Global identifier for a package """ + def __init__(self, **kwargs): super(PackageIndentifierType, self).__init__(**kwargs) @@ -298,6 +299,7 @@ class BaseModel(Model): """ Base class for all schematics models. """ + def __init__(self, **kwargs): super(BaseModel, self).__init__(raw_data=kwargs) @@ -514,7 +516,6 @@ def resolve(self): payload_doc = 'doc' PAYLOADS = (payload_src, payload_bin, payload_doc) - # Packaging types ################################# as_archive = 'archive' @@ -946,7 +947,6 @@ def identifier(self): """ return PackageId(self.type, self.name, self.version) - # # Package sub types # NOTE: this is somewhat redundant with extractcode archive handlers @@ -1212,7 +1212,6 @@ class SquashfsPackage(Package): type = StringType(default='squashfs image') packaging = StringType(default=as_archive) - # # these very generic archive packages must come last in recogniztion order # diff --git a/src/packagedcode/nevra.py b/src/packagedcode/nevra.py index a840ff9f651..8d6718a72ca 100644 --- a/src/packagedcode/nevra.py +++ b/src/packagedcode/nevra.py @@ -22,13 +22,13 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import re from commoncode import fileutils - """ Utilities to handle RPM NEVRA (name, epoch, version, release, architecture) """ @@ -50,6 +50,7 @@ # modified and originally from: # https://raw.githubusercontent.com/sassoftware/conary/c26507001b62b0839539908cc5bf28893c45c0b4/conary/rpmhelper.py + def from_name(filename): """ Return an (E, N, V, R, A) tuple given a file name, by splitting diff --git a/src/packagedcode/npm.py b/src/packagedcode/npm.py index 756d1806222..655ded0b885 100644 --- a/src/packagedcode/npm.py +++ b/src/packagedcode/npm.py @@ -48,7 +48,6 @@ https://github.com/pombredanne/normalize-package-data """ - logger = logging.getLogger(__name__) # import sys # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) @@ -129,7 +128,6 @@ def build_package(package_data, base_dir=None, metafile_name='package.json'): ('repository', repository_mapper), ]) - if not package_data.get('name') or not package_data.get('version'): # a package.json without name and version is not a usable NPM package return @@ -404,7 +402,6 @@ def deps_mapper(deps, package, field_name): peer_dependencies_mapper = partial(deps_mapper, field_name='peerDependencies') optional_dependencies_mapper = partial(deps_mapper, field_name='optionalDependencies') - person_parser = re.compile( r'^(?P[^\(<]+)' r'\s?' diff --git a/src/packagedcode/phpcomposer.py b/src/packagedcode/phpcomposer.py index 7877cc58048..909cedb2c81 100644 --- a/src/packagedcode/phpcomposer.py +++ b/src/packagedcode/phpcomposer.py @@ -31,7 +31,6 @@ from collections import OrderedDict from functools import partial - from commoncode import filetype from commoncode import fileutils @@ -42,7 +41,6 @@ Handle PHP composer packages, refer to https://getcomposer.org/ """ - logger = logging.getLogger(__name__) # import sys # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) @@ -84,7 +82,7 @@ def parse(location): return build_package(package_data, base_dir, metafile_name) -def build_package(package_data, base_dir =None, metafile_name='composer.json'): +def build_package(package_data, base_dir=None, metafile_name='composer.json'): """ Return a composer Package object from a package data mapping or None. @@ -112,11 +110,10 @@ def build_package(package_data, base_dir =None, metafile_name='composer.json'): ('support', support_mapper), ]) - # A composer.json without name and description is not a usable PHP # composer package. Name and description fields are required but # only for published packages: - # https://getcomposer.org/doc/04-schema.md#name + # https://getcomposer.org/doc/04-schema.md#name # We want to catch both published and non-published packages here. package = PHPComposerPackage() @@ -141,7 +138,7 @@ def build_package(package_data, base_dir =None, metafile_name='composer.json'): if value: func(value, package) # Parse vendor from name value - vendor_mapper(package) + vendor_mapper(package) return package diff --git a/src/packagedcode/pypi.py b/src/packagedcode/pypi.py index c99368fcb16..8b76246122a 100644 --- a/src/packagedcode/pypi.py +++ b/src/packagedcode/pypi.py @@ -34,12 +34,10 @@ from packagedcode.models import PythonPackage from packagedcode import models - """ Detect and collect Python packages information. """ - PKG_INFO_ATTRIBUTES = [ 'Name', 'Version', @@ -129,8 +127,8 @@ def parse_metadata(location): for fname in ('METADATA', 'DESCRIPTION.rst')): return # FIXME: wrap in a with statement + # FIXME: use ordereddict infos = json.loads(open(location, 'rb').read()) - print(infos) homepage_url = None authors = [] if infos['extensions']: diff --git a/src/packagedcode/pyrpm/rpm.py b/src/packagedcode/pyrpm/rpm.py index 9502c270821..371f69db558 100644 --- a/src/packagedcode/pyrpm/rpm.py +++ b/src/packagedcode/pyrpm/rpm.py @@ -35,7 +35,6 @@ from __future__ import absolute_import - from StringIO import StringIO import struct import re @@ -63,6 +62,7 @@ def find_magic_number(regexp, data): class Entry(object): ''' RPM Header Entry ''' + def __init__(self, entry, store): self.entry = entry self.store = store @@ -155,6 +155,7 @@ def __readbin(self): class Header(object): ''' RPM Header Structure ''' + def __init__(self, header, entries, store): self.header = header self.entries = entries diff --git a/src/packagedcode/pyrpm/rpmdefs.py b/src/packagedcode/pyrpm/rpmdefs.py index bd416ad68b0..f1077874503 100644 --- a/src/packagedcode/pyrpm/rpmdefs.py +++ b/src/packagedcode/pyrpm/rpmdefs.py @@ -27,9 +27,10 @@ ''' rpm definitions - ''' +from __future__ import absolute_import + RPM_LEAD_MAGIC_NUMBER = '\xed\xab\xee\xdb' RPM_HEADER_MAGIC_NUMBER = '\x8e\xad\xe8' @@ -45,11 +46,9 @@ RPMSIGTAG_GPG = 1005 RPMSIGTAG_PGP5 = 1006 - MD5_SIZE = 16 # 16 bytes long PGP_SIZE = 152 # 152 bytes long - # data types definition RPM_DATA_TYPE_NULL = 0 RPM_DATA_TYPE_CHAR = 1 @@ -102,7 +101,6 @@ RPMTAG_SOURCEPACKAGE = 1106 RPMTAG_DISTURL = 1123 - RPMTAGS = { RPMTAG_NAME: 'name', RPMTAG_EPOCH: 'epoch', @@ -124,7 +122,6 @@ RPMTAG_DISTURL: 'dist_url', } - """ from rpm.org lib/rpmtag.h See also: http://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/pkgformat.html diff --git a/src/packagedcode/recognize.py b/src/packagedcode/recognize.py index a8a78f265e2..2a6fe4260db 100644 --- a/src/packagedcode/recognize.py +++ b/src/packagedcode/recognize.py @@ -29,17 +29,18 @@ import sys from commoncode import filetype +from commoncode.fileutils import fsencode from commoncode.system import on_linux -from commoncode.fileutils import path_to_bytes from packagedcode import PACKAGE_TYPES from typecode import contenttype - TRACE = False + def logger_debug(*args): pass + logger = logging.getLogger(__name__) if TRACE: @@ -49,7 +50,6 @@ def logger_debug(*args): def logger_debug(*args): return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) - """ Recognize packages in files or directories. """ @@ -67,12 +67,11 @@ def recognize_package(location): ftype = T.filetype_file.lower() mtype = T.mimetype_file - for package_type in PACKAGE_TYPES: # Note: default to True if there is nothing to match against metafiles = package_type.metafiles if on_linux: - metafiles = (path_to_bytes(m) for m in metafiles) + metafiles = (fsencode(m) for m in metafiles) if location.endswith(tuple(metafiles)): logger_debug('metafile matching: package_type is of type:', package_type) return package_type.recognize(location) @@ -89,7 +88,7 @@ def recognize_package(location): extensions = package_type.extensions if extensions: if on_linux: - extensions = tuple(path_to_bytes(e) for e in extensions) + extensions = tuple(fsencode(e) for e in extensions) extension_matched = location.lower().endswith(extensions) else: extension_matched = False diff --git a/src/packagedcode/rpm.py b/src/packagedcode/rpm.py index b715bb16952..f0c9ee3d7bb 100644 --- a/src/packagedcode/rpm.py +++ b/src/packagedcode/rpm.py @@ -30,19 +30,19 @@ import string import sys - from packagedcode import models from packagedcode import nevra from packagedcode.pyrpm.rpm import RPM import typecode.contenttype - TRACE = False + def logger_debug(*args): pass + logger = logging.getLogger(__name__) if TRACE: @@ -52,7 +52,6 @@ def logger_debug(*args): def logger_debug(*args): return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) - # TODO: retrieve dependencies # TODO: parse spec files see: @@ -82,7 +81,6 @@ def logger_debug(*args): 'bin_or_src', ) - RPMInfo = namedtuple('RPMInfo', list(RPM_TAGS)) @@ -118,7 +116,6 @@ def info(location, include_desc=False): the long RPM description value if include_desc is True. """ tgs = tags(location, include_desc) - print(tgs) return tgs and RPMInfo(**tgs) or None @@ -126,6 +123,7 @@ class EVR(namedtuple('EVR', 'epoch version release')): """ The RPM Epoch, Version, Release tuple. """ + # note: the order of the named tuple is the sort order. # But for creation we put the rarely used epoch last def __new__(self, version, release, epoch=None): diff --git a/src/packagedcode/utils.py b/src/packagedcode/utils.py index 423033c739f..69ce4aeb8a2 100644 --- a/src/packagedcode/utils.py +++ b/src/packagedcode/utils.py @@ -22,7 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import print_function, absolute_import +from __future__ import absolute_import +from __future__ import print_function VCS_URLS = ( diff --git a/src/packagedcode/xmlutils.py b/src/packagedcode/xmlutils.py index 75b3c5af730..3cd018103f0 100644 --- a/src/packagedcode/xmlutils.py +++ b/src/packagedcode/xmlutils.py @@ -26,13 +26,11 @@ from __future__ import print_function from __future__ import unicode_literals - import chardet from lxml import etree from textcode import analysis - """ Utility functions for dealing with XML. """ @@ -61,7 +59,7 @@ def parse(location, handler): except: parser = etree.XMLParser(recover=True, remove_blank_text=True, resolve_entities=False) text = analysis.unicode_text(location) - xdoc= etree.fromstring(_as_unicode_bytes(text), parser) + xdoc = etree.fromstring(_as_unicode_bytes(text), parser) return handler(xdoc) diff --git a/src/plugincode/__init__.py b/src/plugincode/__init__.py index 7ebe6b01cb0..1ef5f285eb2 100644 --- a/src/plugincode/__init__.py +++ b/src/plugincode/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,5 +22,209 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +from collections import OrderedDict +import sys + +from pluggy import HookimplMarker +from pluggy import HookspecMarker +from pluggy import PluginManager as PluggyPluginManager +from scancode import CommandLineOption + + +class BasePlugin(object): + """ + A base class for all ScanCode plugins. + """ + # List of CommandLineOption CLI options for this plugin. + # Subclasses should set this as needed + options = [] + + # flag set to True once this plugin class has been initialized by calling it + # setup() class method. + # This is set automatically when a plugin class is loaded in its manager. + # Subclasses must not set this. + initialized = False + + # stage string for this plugin. + # This is set automatically when a plugin class is loaded in its manager. + # Subclasses must not set this. + stage = None + + # name string under which this plugin is registered. + # This is set automatically when a plugin class is loaded in its manager. + # Subclasses must not set this. + name = None + + # An ordered mapping of attr attributes that specifies the data returned by + # this plugin. These attributes will be added to a Resource subclass. The + # position of these attributes in the returned serialized data is determined + # by the sort_order then the plugin name + attributes = OrderedDict() + + # a relative sort order number (integer or float). In scan results, results + # from scanners are sorted by this sorted_order then by "keys". + # This is also used in the CLI UI to sort the SCAN_GROUP option help group. + sort_order = 100 + + def __init__(self, *args, **kwargs): + """ + Initialize a new plugin with a user kwargs. + Plugins can override as needed (still calling super). + """ + self.options_by_name = {o.name: o for o in self.options} + + self.kwargs = kwargs + + # mapping of scan summary data and statistics. + # This is populated automatically on the plugin instance. + # Subclasses must not set this. + self.summary = OrderedDict() + + # TODO: pass own command options name/values as concrete kwargs + def is_enabled(self, **kwargs): + """ + Return True is this plugin is enabled by user-selected options. + Subclasses must override. + This receives all the ScanCode call arguments as kwargs. + """ + raise NotImplementedError + + # TODO: pass own command options name/values as concrete kwargs + def setup(self, **kwargs): + """ + Execute some setup for this plugin. This is guaranteed to be called + exactly one time at initialization if this plugin is enabled. + Must raise an Exception on failure. + Subclasses can override as needed. + This receives all the ScanCode call arguments as kwargs. + """ + pass + + # NOTE: Other methods below should NOT be overriden. + + @property + def qname(self): + """ + Return the qualified name of this plugin. + """ + return '{self.stage}:{self.name}'.format(self=self) + + def get_option(self, name): + """ + Return the CommandLineOption of this plugin with `name` or None. + """ + return self.options_by_name.get(name) + + +class CodebasePlugin(BasePlugin): + """ + Base class for plugins that process a whole codebase at once. + """ + + def process_codebase(self, codebase, **kwargs): + """ + Process a `codebase` Codebase object updating its Reousrce as needed. + Subclasses should override. + This receives all the ScanCode call arguments as kwargs. + """ + raise NotImplementedError + + +class PluginManager(object): + """ + A PluginManager class for plugins. + """ + + # a global managers cache as a mapping of {stage: manager instance} + managers = {} + + def __init__(self, stage, module_qname, entrypoint, plugin_base_class): + """ + Initialize this plugin manager for the `stage` specified in the fully + qualified Python module name `module_qname` with plugins loaded from the + setuptools `entrypoint` that must subclass `plugin_base_class`. + """ + self.manager = PluggyPluginManager(project_name=stage) + self.managers[stage] = self + + self.stage = stage + self.entrypoint = entrypoint + self.plugin_base_class = plugin_base_class + self.manager.add_hookspecs(sys.modules[module_qname]) + + # set to True once this manager is initialized by running its setup() + self.initialized = False + + # mapping of {plugin.name: plugin_class} for all the plugins of this + # manager + self.plugin_classes = OrderedDict() + + @classmethod + def load_plugins(cls): + """ + Setup the plugins enviroment. + Must be called once to initialize all the plugins of all managers. + """ + plugin_classes = [] + plugin_options = [] + for stage, manager in cls.managers.items(): + mgr_setup = manager.setup() + if not mgr_setup: + msg = 'Cannot load ScanCode plugins for stage: %(stage)s' % locals() + raise Exception(msg) + mplugin_classes, mplugin_options = mgr_setup + plugin_classes.extend(mplugin_classes) + plugin_options.extend(mplugin_options) + return plugin_classes, plugin_options + + def setup(self): + """ + Return a tuple of (list of all plugin classes, list of all options of + all plugin classes). + + Load and validate available plugins for this PluginManager from its + assigned `entrypoint`. Raise an Exception if a plugin is not valid such + that when it does not subcclass the manager `plugin_base_class`. + Must be called once to setup the plugins if this manager. + """ + if self.initialized: + return + + entrypoint = self.entrypoint + try: + self.manager.load_setuptools_entrypoints(entrypoint) + except ImportError, e: + raise e + stage = self.stage + + plugin_options = [] + for name, plugin_class in self.manager.list_name_plugin(): + + if not issubclass(plugin_class, self.plugin_base_class): + qname = '%(stage)s:%(name)s' % locals() + raise Exception( + 'Invalid plugin: %(qname)r: %(plugin_class)r ' + 'must extend %(plugin_base_class)r.' % locals()) + + for option in plugin_class.options: + if not isinstance(option, CommandLineOption): + qname = '%(stage)s:%(name)s' % locals() + oname = option.name + clin = CommandLineOption + raise Exception( + 'Invalid plugin: %(qname)r: option %(oname)r ' + 'must extend %(clin)r.' % locals()) + plugin_options.append(option) + + plugin_class.stage = stage + plugin_class.name = name + + self.plugin_classes[name] = plugin_class + + self.initialized = True + return self.plugin_classes.values(), plugin_options + diff --git a/src/plugincode/output.py b/src/plugincode/output.py index 824911e406a..987e6b04774 100644 --- a/src/plugincode/output.py +++ b/src/plugincode/output.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,56 +23,85 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import +from __future__ import division from __future__ import print_function from __future__ import unicode_literals -from collections import OrderedDict -import sys +from functools import partial +from itertools import imap -from pluggy import HookimplMarker -from pluggy import HookspecMarker -from pluggy import PluginManager +from plugincode import CodebasePlugin +from plugincode import PluginManager +from plugincode import HookimplMarker +from plugincode import HookspecMarker +from scancode.resource import Resource +# Python 2 and 3 support +try: + # Python 2 + unicode + str_orig = str + bytes = str # NOQA + str = unicode # NOQA +except NameError: + # Python 3 + unicode = str # NOQA -scan_output_spec = HookspecMarker('scan_output_writer') -scan_output_writer = HookimplMarker('scan_output_writer') +# Tracing flags +TRACE = False +TRACE_DEEP = False -# FIXME: simplify the hooskpec -@scan_output_spec -def write_output(files_count, version, notice, scanned_files, options, input, output_file, _echo): - """ - Write the `scanned_files` scan results in the format supplied by - the --format command line option. - Parameters: - - `file_count`: the number of files and directories scanned. - - `version`: ScanCode version - - `notice`: ScanCode notice - - `scanned_files`: an iterable of scan results for each file - - `options`: a mapping of key by command line option to a flag True - if this option was enabled. - - `input`: the original input path scanned. - - `output_file`: an opened, file-like object to write the output to. - - `_echo`: a funtion to echo strings to stderr. This will be removedd in the future. - """ +def logger_debug(*args): pass -output_plugins = PluginManager('scan_output_writer') -output_plugins.add_hookspecs(sys.modules[__name__]) +if TRACE or TRACE_DEEP: + import logging + import sys + logger = logging.getLogger(__name__) + logging.basicConfig(stream=sys.stdout) + logger.setLevel(logging.DEBUG) -def initialize(): - """ - NOTE: this defines the entry points for use in setup.py - """ - output_plugins.load_setuptools_entrypoints('scancode_output_writers') + def logger_debug(*args): + return logger.debug(' '.join(isinstance(a, unicode) + and a or repr(a) for a in args)) + +stage = 'output' +entrypoint = 'scancode_output' +output_spec = HookspecMarker(project_name=stage) +output_impl = HookimplMarker(project_name=stage) -def get_format_plugins(): + +@output_spec +class OutputPlugin(CodebasePlugin): """ - Return an ordered mapping of format name --> plugin callable for all - the output plugins. The mapping is ordered by sorted key. - This is the main API for other code to access format plugins. + Base plugin class for scan output formatters all output plugins must extend. """ - return OrderedDict(sorted(output_plugins.list_name_plugin())) + + def process_codebase(self, codebase, **kwargs): + """ + Write scan output for the `codebase`. + """ + raise NotImplementedError + + @classmethod + def get_results(cls, codebase, info, full_root, strip_root, timing, **kwargs): + """ + Return an iterable of serialized scan results from a codebase. + """ + # FIXME: serialization SHOULD NOT be needed: only some format need it + # (e.g. JSON) and only these should serialize + serializer = partial(Resource.to_dict, with_info=info, with_timing=timing) + resources = codebase.walk_filtered(topdown=True, skip_root=strip_root) + return imap(serializer, resources) + + +output_plugins = PluginManager( + stage=stage, + module_qname=__name__, + entrypoint=entrypoint, + plugin_base_class=OutputPlugin +) diff --git a/src/plugincode/output_filter.py b/src/plugincode/output_filter.py new file mode 100644 index 00000000000..9c3f4a2e768 --- /dev/null +++ b/src/plugincode/output_filter.py @@ -0,0 +1,59 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from plugincode import CodebasePlugin +from plugincode import PluginManager +from plugincode import HookimplMarker +from plugincode import HookspecMarker + +stage = 'output_filter' +entrypoint = 'scancode_output_filter' + +output_filter_spec = HookspecMarker(project_name=stage) +output_filter_impl = HookimplMarker(project_name=stage) + + +@output_filter_spec +class OutputFilterPlugin(CodebasePlugin): + """ + Base plugin class for Resource output filter plugins that all output filter + plugins must extend. + + Filter plugins MUST NOT modify the codebase beyond setting the + Resource.is_filtered flag on resources. + """ + pass + + +output_filter_plugins = PluginManager( + stage=stage, + module_qname=__name__, + entrypoint=entrypoint, + plugin_base_class=OutputFilterPlugin +) diff --git a/src/plugincode/post_scan.py b/src/plugincode/post_scan.py index 4f1aee9ce79..2281f759fb8 100644 --- a/src/plugincode/post_scan.py +++ b/src/plugincode/post_scan.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -25,45 +25,29 @@ from __future__ import absolute_import from __future__ import unicode_literals -from collections import OrderedDict -import sys +from plugincode import CodebasePlugin +from plugincode import PluginManager +from plugincode import HookimplMarker +from plugincode import HookspecMarker -from pluggy import HookimplMarker -from pluggy import HookspecMarker -from pluggy import PluginManager +stage = 'post_scan' +entrypoint = 'scancode_post_scan' - -post_scan_spec = HookspecMarker('post_scan') -post_scan_impl = HookimplMarker('post_scan') +post_scan_spec = HookspecMarker(project_name=stage) +post_scan_impl = HookimplMarker(project_name=stage) @post_scan_spec -def post_scan_handler(active_scans, results): +class PostScanPlugin(CodebasePlugin): """ - Process the scanned files and yield the modified results. - Parameters: - - `active_scans`: a list of scanners names requested in the current run. - - `results`: an iterable of scan results for each file or directory. + A post-scan plugin base class that all post-scan plugins must extend. """ pass -post_scan_plugins = PluginManager('post_scan') -post_scan_plugins.add_hookspecs(sys.modules[__name__]) - - -def initialize(): - """ - NOTE: this defines the entry points for use in setup.py - """ - post_scan_plugins.load_setuptools_entrypoints('scancode_post_scan') - - -def get_post_scan_plugins(): - """ - Return an ordered mapping of - "command line option name" --> "plugin callable" - for all the post_scan plugins. The mapping is sorted by option name. - This is the main API for other code to access post_scan plugins. - """ - return OrderedDict(sorted(post_scan_plugins.list_name_plugin())) +post_scan_plugins = PluginManager( + stage=stage, + module_qname=__name__, + entrypoint=entrypoint, + plugin_base_class=PostScanPlugin +) diff --git a/src/plugincode/pre_scan.py b/src/plugincode/pre_scan.py index c9ba789bdad..a44026c7135 100644 --- a/src/plugincode/pre_scan.py +++ b/src/plugincode/pre_scan.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -25,60 +25,83 @@ from __future__ import absolute_import from __future__ import unicode_literals -from collections import OrderedDict -import sys +from plugincode import CodebasePlugin +from plugincode import PluginManager +from plugincode import HookimplMarker +from plugincode import HookspecMarker -from pluggy import HookimplMarker -from pluggy import HookspecMarker -from pluggy import PluginManager +stage = 'pre_scan' +entrypoint = 'scancode_pre_scan' +pre_scan_spec = HookspecMarker(stage) +pre_scan_impl = HookimplMarker(stage) -pre_scan_spec = HookspecMarker('pre_scan') -pre_scan_impl = HookimplMarker('pre_scan') @pre_scan_spec -class PreScanPlugin(object): +class PreScanPlugin(CodebasePlugin): """ - A pre-scan plugin layout class to be extended by the pre_scan plugins. - Docstring of a plugin class will be used as the plugin option's help text + A pre-scan plugin base class that all pre-scan plugins must extend. """ - # attributes to be used while creating the option for this plugin. - option_attrs = {} + # List of scanner name strings that this plugin requires to run first + # before this pres-scan plugin runs. + # Subclasses should set this as needed + requires = [] - def __init__(self, user_input): - self.user_input = user_input - - def process_resource(self, resource): - """ - Process a resource prior to scan. - :param resource: instance of Resource to process - :return: resource or None to ignore the resource + def get_required(self, scanner_plugins): """ - return resource + Return a list of required scanner plugin instances that are direct + requirements of self. - def get_ignores(self): - """ - Return a dict of ignores to be used when processing resources + `scanner_plugins` is a {name: plugin} mapping of enabled scanner + plugins. """ - return {} + required = [] + for name in self.requires: + required_plugin = scanner_plugins.get(name) -pre_scan_plugins = PluginManager('pre_scan') -pre_scan_plugins.add_hookspecs(sys.modules[__name__]) + if not required_plugin: + qname = self.qname + raise Exception( + 'Missing required scan plugin: %(name)r ' + 'for plugin: %(qname)r.' % locals()) + required.append(required_plugin) -def initialize(): - # NOTE: this defines the entry points for use in setup.py - pre_scan_plugins.load_setuptools_entrypoints('scancode_pre_scan') - for name, plugin in get_pre_scan_plugins().items(): - if not issubclass(plugin, PreScanPlugin): - raise Exception('Invalid pre-scan plugin "%(name)s": does not extend "plugincode.pre_scan.PreScanPlugin".' % locals()) + return unique(required) -def get_pre_scan_plugins(): + @classmethod + def get_all_required(self, prescan_plugins, scanner_plugins): + """ + Return a list of unique required scanner plugin instances that are direct + requirements of any of the `prescan_plugins` pre-scan plugin instances. + `prescan_plugins` is a list of enabled pre-scan plugins. + `scanner_plugins` is a {name: plugin} mapping of enabled scanner + plugins. + """ + required = [] + for plugin in prescan_plugins: + required.extend(plugin.get_required(scanner_plugins)) + return unique(required) + + +def unique(iterable): """ - Return an ordered mapping of CLI option name --> plugin callable - for all the pre_scan plugins. The mapping is ordered by sorted key. - This is the main API for other code to access pre_scan plugins. + Return a sequence of unique items in `iterable` keeping their + original order. + Note: this can be very slow for large sequences as this is using lists. """ - return OrderedDict(sorted(pre_scan_plugins.list_name_plugin())) + uniques = [] + for item in iterable: + if item not in uniques: + uniques.append(item) + return uniques + + +pre_scan_plugins = PluginManager( + stage=stage, + module_qname=__name__, + entrypoint=entrypoint, + plugin_base_class=PreScanPlugin +) diff --git a/src/plugincode/scan.py b/src/plugincode/scan.py new file mode 100644 index 00000000000..77f12ac57e4 --- /dev/null +++ b/src/plugincode/scan.py @@ -0,0 +1,91 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import unicode_literals + +from plugincode import BasePlugin +from plugincode import PluginManager +from plugincode import HookimplMarker +from plugincode import HookspecMarker + +stage = 'scan' +entrypoint = 'scancode_scan' + +scan_spec = HookspecMarker(stage) +scan_impl = HookimplMarker(stage) + + +@scan_spec +class ScanPlugin(BasePlugin): + """ + A scan plugin base class that all scan plugins must extend. A scan plugin + provides a single `get_scanner()` method that returns a scanner function. + The key under which scan results are returned for a scanner is the plugin + "name" attribute. This attribute is set automatically as the "entrypoint" + name used for this plugin. + """ + + def get_scanner(self, **kwargs): + """ + Return a scanner callable, receiving all the scancode call arguments as + kwargs. + + The returned callable MUST be a top-level module importable function + (e.g. that is picklable and it can be possibly closed on argumenst with + functools.partial) and accept these arguments: + + - a first `location` argument that is always an absolute path string to + a file. This string is using the filesystem encoding (e.g. bytes on + Linux and Unicode elsewhere). + + - other **kwargs that will be all the scancode call arguments. + + The returned callable MUST RETURN an ordered mapping of key/values that + must be serializable to JSON. + + All mapping keys must be strings, including for any nested mappings. + + Any value must be one of: + - None, unicode or str, int, flota, long. + str if not unicode WILL be converted to unicode with UTF-8. + - iterable/list/tuple/generator or dict/mapping preferrably ordered. + - any object beyond these above that has an asdict() ot to_dict() method + that returns an ordered mapping of key/values of the same styke the + top-level mapping defined here. + + This callable (typically a bare function) should carry as little state + as possible as it may be executed through multiprocessing. + + Subclasses must override. + """ + raise NotImplementedError + + +scan_plugins = PluginManager( + stage=stage, + module_qname=__name__, + entrypoint=entrypoint, + plugin_base_class=ScanPlugin +) diff --git a/src/scancode/__init__.py b/src/scancode/__init__.py index 46affd53e35..8d96cabaa22 100644 --- a/src/scancode/__init__.py +++ b/src/scancode/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,32 +22,291 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import print_function from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +from collections import namedtuple +from itertools import chain from os.path import dirname from os.path import abspath from os.path import getsize from os.path import getmtime from os.path import join from os.path import exists +from types import BooleanType + +import click +from click.types import BoolParamType from commoncode import fileutils +# Python 2 and 3 support +try: + # Python 2 + unicode + str_orig = str + bytes = str # NOQA + str = unicode # NOQA +except NameError: + # Python 3 + unicode = str # NOQA -scan_src_dir = abspath(dirname(__file__)) -src_dir = dirname(scan_src_dir) -root_dir = dirname(src_dir) -cache_dir = join(root_dir, '.cache') -scans_cache_dir = join(cache_dir, 'scan_results_caches') +# Tracing flags +TRACE = False -if not exists(scans_cache_dir): - fileutils.create_dir(scans_cache_dir) +def logger_debug(*args): + pass + + +if TRACE: + import logging + import sys + logger = logging.getLogger(__name__) + logging.basicConfig(stream=sys.stdout) + logger.setLevel(logging.DEBUG) + + def logger_debug(*args): + return logger.debug(' '.join(isinstance(a, (unicode, str)) + and a or repr(a) for a in args)) + +# CLI help groups +SCAN_GROUP = 'primary scans' +SCAN_OPTIONS_GROUP = 'scan options' +OTHER_SCAN_GROUP = 'other scans' +OUTPUT_GROUP = 'output formats' +OUTPUT_FILTER_GROUP = 'output filters' +OUTPUT_CONTROL_GROUP = 'output control' +PRE_SCAN_GROUP = 'pre-scan' +POST_SCAN_GROUP = 'post-scan' +MISC_GROUP = 'miscellaneous' +DOC_GROUP = 'documentation' +CORE_GROUP = 'core' + +# Holds a scan plugin result "key and the corresponding function. +# click.Parameter instance +Scanner = namedtuple('Scanner', 'name function') + + +class CommandLineOption(click.Option): + """ + An option with extra args and attributes to control CLI help options + grouping, co-required and conflicting options (e.g. mutually exclusive). + """ + + # args are from Click 6.7 + def __init__(self, param_decls=None, show_default=False, + prompt=False, confirmation_prompt=False, + hide_input=False, is_flag=None, flag_value=None, + multiple=False, count=False, allow_from_autoenv=True, + type=None, help=None, # NOQA + # custom additions # + # a string that set the CLI help group for this option + help_group=MISC_GROUP, + # a relative sort order number (integer or float) for this + # option within a help group: the sort is by increasing + # sort_order then by option declaration. + sort_order=100, + # a sequence of other option name strings that this option + # requires to be set + requires=(), + # a sequence of other option name strings that this option + # conflicts with if they are set + conflicts=(), + # a flag set to True if this option should be hidden from the CLI help + hidden=False, + **attrs): + + super(CommandLineOption, self).__init__(param_decls, show_default, + prompt, confirmation_prompt, + hide_input, is_flag, flag_value, + multiple, count, allow_from_autoenv, + type, help, **attrs) + + self.help_group = help_group + self.sort_order = sort_order + self.requires = requires + self.conflicts = conflicts + self.hidden = hidden + + def __repr__(self, *args, **kwargs): + name = self.name + opt = self.opts[-1] + help_group = self.help_group + requires = self.requires + conflicts = self.conflicts + + return ('CommandLineOption' % locals()) + + def validate_dependencies(self, ctx, value): + """ + Validate `value` against declared `requires` or `conflicts` dependencies. + """ + _validate_option_dependencies(ctx, self, value, self.requires, required=True) + _validate_option_dependencies(ctx, self, value, self.conflicts, required=False) + + +def validate_option_dependencies(ctx): + """ + Validate all CommandLineOption dependencies in the `ctx` Click context. + Ignore eager flags. + """ + values = ctx.params + if TRACE: + logger_debug('validate_option_dependencies: values:') + for va in sorted(values.items()): + logger_debug(' ', va) + + for param in ctx.command.params: + if param.is_eager: + continue + if not isinstance(param, CommandLineOption): + if TRACE: + logger_debug(' validate_option_dependencies: skip param:', param) + continue + value = values.get(param.name) + if TRACE: + logger_debug(' validate_option_dependencies: param:', param, 'value:', value) + param.validate_dependencies(ctx, value) + + +def _validate_option_dependencies(ctx, param, value, + other_option_names, required=False): + """ + Validate the `other_option_names` option dependencies and return a + UsageError if the `param` `value` is set to a not-None non-default value and + if: + - `required` is True and the `other_option_names` options are not set with a + not-None value in the `ctx` context. + - `required` is False and any of the `other_option_names` options are set + with a not-None, non-default value in the `ctx` context. + """ + if not other_option_names: + return + + def _is_set(_value, _default, typ): + if type in (BooleanType, BoolParamType): + return _value + return bool(_value is not None and _value != _default) + + is_set = _is_set(value, param.default, param.type) + + if TRACE: + logger_debug() + logger_debug('Checking param:', param) + logger_debug(' value:', value, 'is_set:' , is_set) + + if not is_set: + return + + oparams_by_name = {oparam.name: oparam for oparam in ctx.command.params} + oparams = [] + missing_onames = [] + + for oname in other_option_names: + oparam = oparams_by_name.get(oname) + if not oparam: + missing_onames.append(oparam) + else: + oparams.append(oparam) + + if TRACE: + logger_debug() + logger_debug(' Available other params:') + for oparam in oparams: + logger_debug(' other param:', oparam) + logger_debug(' value:', ctx.params.get(oparam.name)) + if required: + logger_debug(' missing names:', missing_onames) + + if required and missing_onames: + opt = param.opts[-1] + oopts = [oparam.opts[-1] for oparam in oparams] + omopts = ['--' + oname.replace('_', '-') for oname in missing_onames] + oopts.extend(omopts) + oopts = ', '.join(oopts) + msg = ('The option %(opt)s requires the option(s) %(all_opts)s.' + 'and is missing %(omopts)s. ' + 'You must set all of these options if you use this option.' % locals()) + raise click.UsageError(msg) + + if TRACE: + logger_debug() + logger_debug(' Checking other params:') + + opt = param.opts[-1] + + for oparam in oparams: + ovalue = ctx.params.get(oparam.name) + ois_set = _is_set(ovalue, oparam.default, oparam.type) + + if TRACE: + logger_debug(' Checking oparam:', oparam) + logger_debug(' value:', ovalue, 'ois_set:' , ois_set) + + # by convention the last opt is the long form + oopt = oparam.opts[-1] + oopts = ', '.join(oparam.opts[-1] for oparam in oparams) + all_opts = '%(opt)s and %(oopts)s' % locals() + if required and not ois_set: + msg = ('The option %(opt)s requires the option(s) %(oopts)s ' + 'and is missing %(oopt)s. ' + 'You must set all of these options if you use this option.' % locals()) + raise click.UsageError(msg) + + if not required and ois_set: + msg = ('The option %(opt)s cannot be used together with the %(oopts)s option(s) ' + 'and %(oopt)s is used. ' + 'You can set only one of these options at a time.' % locals()) + raise click.UsageError(msg) + + +class FileOptionType(click.File): + """ + A click.File subclass that ensures that a file name is not set to an + existing option parameter to avoid mistakes. + """ + + def convert(self, value, param, ctx): + known_opts = set(chain.from_iterable(p.opts for p in ctx.command.params + if isinstance(p, click.Option))) + if value in known_opts: + self.fail('Illegal file name conflicting with an option name: %s. ' + 'Use the special "-" file name to print results on screen/stdout.' + % (click.types.filename_to_ui(value), + ), param, ctx) + return click.File.convert(self, value, param, ctx) + + +info_text = ''' +ScanCode scans code and other files for origin and license. +Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +''' + +notice_path = join(abspath(dirname(__file__)), 'NOTICE') +notice_text = open(notice_path).read() + +delimiter = '\n\n\n' +[notice_text, extra_notice_text] = notice_text.split(delimiter, 1) +extra_notice_text = delimiter + extra_notice_text + +delimiter = '\n\n ' +[notice_text, acknowledgment_text] = notice_text.split(delimiter, 1) +acknowledgment_text = delimiter + acknowledgment_text + +notice = acknowledgment_text.strip().replace(' ', '') + + +def print_about(ctx, param, value): + """ + Click callback to print a notice. + """ + if not value or ctx.resilient_parsing: + return + click.echo(info_text + notice_text + acknowledgment_text + extra_notice_text) + ctx.exit() -from pkg_resources import get_distribution, DistributionNotFound -try: - __version__ = get_distribution('scancode-toolkit').version -except DistributionNotFound: - # package is not installed ?? - __version__ = '2.2.1' diff --git a/src/scancode/api.py b/src/scancode/api.py index 86d0eac8ad9..3675807fc2f 100644 --- a/src/scancode/api.py +++ b/src/scancode/api.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,146 +22,123 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import print_function from __future__ import absolute_import +from __future__ import division +from __future__ import print_function from __future__ import unicode_literals from collections import OrderedDict +from os.path import getsize -from commoncode.fileutils import as_posixpath -from commoncode.fileutils import path_to_bytes -from commoncode.fileutils import path_to_unicode -from commoncode.system import on_linux -from scancode.utils import get_relative_path - +from commoncode.filetype import get_last_modified_date +from commoncode.hash import multi_checksums +from typecode.contenttype import get_type """ Main scanning functions. -Note: this API is unstable and still evolving. -""" - -class Resource(object): - """ - Store scanned details for a single resource (file or a directory) - such as infos and path - """ - - def __init__(self, scan_cache_class, abs_path, base_is_dir, len_base_path): - self.scan_cache_class = scan_cache_class() - self.is_cached = False - self.abs_path = abs_path - self.base_is_dir = base_is_dir - posix_path = as_posixpath(abs_path) - # fix paths: keep the path as relative to the original - # base_path. This is always Unicode - self.rel_path = get_relative_path(posix_path, len_base_path, base_is_dir) - self.infos = OrderedDict() - self.infos['path'] = self.rel_path - - def put_info(self, infos): - """ - Cache file info and set `is_cached` to True if already cached or false otherwise. - """ - self.infos.update(infos) - self.is_cached = self.scan_cache_class.put_info(self.rel_path, self.infos) - - def get_info(self): - """ - Retrieve info from cache. - """ - return self.scan_cache_class.get_info(self.rel_path) +Each scanner is a function that accepts a location and returns a sequence of +mappings as results. -def extract_archives(location, recurse=True): - """ - Extract any archives found at `location` and yield ExtractEvents. If - `recurse` is True, extracts nested archives-in- archives - recursively. - """ - from extractcode.extract import extract - from extractcode import default_kinds - for xevent in extract(location, kinds=default_kinds, recurse=recurse): - yield xevent +Note: this API is unstable and still evolving. +""" -def get_copyrights(location): +def get_copyrights(location, **kwargs): """ - Yield mappings of copyright data detected in the file at `location`. + Return a mapping with a single 'copyrights' key with a value that is a list + of mappings for copyright detected in the file at `location`. """ from cluecode.copyrights import detect_copyrights - + results = [] for copyrights, authors, _years, holders, start_line, end_line in detect_copyrights(location): result = OrderedDict() + results.append(result) # FIXME: we should call this copyright instead, and yield one item per statement result['statements'] = copyrights result['holders'] = holders result['authors'] = authors result['start_line'] = start_line result['end_line'] = end_line - yield result + return dict(copyrights=results) -def get_emails(location): +def get_emails(location, **kwargs): """ - Yield mappings of emails detected in the file at `location`. + Return a mapping with a single 'emails' key with a value that is a list of + mappings for emails detected in the file at `location`. """ from cluecode.finder import find_emails + results = [] for email, line_num in find_emails(location): if not email: continue - misc = OrderedDict() - misc['email'] = email - misc['start_line'] = line_num - misc['end_line'] = line_num - yield misc + result = OrderedDict() + results.append(result) + result['email'] = email + result['start_line'] = line_num + result['end_line'] = line_num + return dict(emails=results) -def get_urls(location): +def get_urls(location, **kwargs): """ - Yield mappings of urls detected in the file at `location`. + Return a mapping with a single 'urls' key with a value that is a list of + mappings for urls detected in the file at `location`. """ from cluecode.finder import find_urls + results = [] for urls, line_num in find_urls(location): if not urls: continue - misc = OrderedDict() - misc['url'] = urls - misc['start_line'] = line_num - misc['end_line'] = line_num - yield misc + result = OrderedDict() + results.append(result) + result['url'] = urls + result['start_line'] = line_num + result['end_line'] = line_num + return dict(urls=results) DEJACODE_LICENSE_URL = 'https://enterprise.dejacode.com/urn/urn:dje:license:{}' SPDX_LICENSE_URL = 'https://spdx.org/licenses/{}' -def get_licenses(location, min_score=0, include_text=False, diag=False, license_url_template=DEJACODE_LICENSE_URL): +def get_licenses(location, min_score=0, include_text=False, diag=False, + license_url_template=DEJACODE_LICENSE_URL, + cache_dir=None, + **kwargs): """ - Yield mappings of license data detected in the file at `location`. + Return a mapping with a single 'licenses' key with a value that is list of + mappings for licenses detected in the file at `location`. - `minimum_score` is a minimum score threshold from 0 to 100. The - default is 0 means that all license matches will be returned. With - any other value matches that have a score below minimum score with - not be returned. + `minimum_score` is a minimum score threshold from 0 to 100. The default is 0 + means that all license matches are returned. Otherwise, matches with a score + below `minimum_score` are returned. - if `include_text` is True, the matched text is included in the - returned data. + if `include_text` is True, matched text is included in the returned data. - If `diag` is True, additional match details are returned with the + If `diag` is True, additional license match details are returned with the matched_rule key of the returned mapping. """ + from scancode_config import SCANCODE_DEV_MODE + if not cache_dir: + from scancode_config import scancode_cache_dir as cache_dir + from licensedcode.cache import get_index from licensedcode.cache import get_licenses_db - idx = get_index() + idx = get_index(cache_dir, SCANCODE_DEV_MODE) licenses = get_licenses_db() + results = [] for match in idx.match(location=location, min_score=min_score): if include_text: matched_text = match.matched_text(whole_lines=False) + for license_key in match.rule.licenses: lic = licenses.get(license_key) result = OrderedDict() + results.append(result) result['key'] = lic.key result['score'] = match.score() result['short_name'] = lic.short_name @@ -194,97 +171,58 @@ def get_licenses(location, min_score=0, include_text=False, diag=False, license_ # FIXME: for sanity this should always be included????? if include_text: result['matched_text'] = matched_text - yield result + return dict(licenses=results) -def get_file_infos(location): + +def get_package_info(location, **kwargs): """ - Return a mapping of file information collected from the file or - directory at `location`. + mappings for package information detected in the file at `location`. """ - from commoncode import fileutils - from commoncode import filetype - from commoncode.hash import multi_checksums - from typecode import contenttype - - if on_linux: - location = path_to_bytes(location) - else: - location = path_to_unicode(location) - - infos = OrderedDict() - is_file = filetype.is_file(location) - is_dir = filetype.is_dir(location) - - T = contenttype.get_type(location) - - infos['type'] = filetype.get_type(location, short=False) - name = fileutils.file_name(location) - if is_file: - base_name, extension = fileutils.splitext(location) - else: - base_name = name - extension = '' - - if on_linux: - infos['name'] = path_to_unicode(name) - infos['base_name'] = path_to_unicode(base_name) - infos['extension'] = path_to_unicode(extension) - else: - infos['name'] = name - infos['base_name'] = base_name - infos['extension'] = extension - - infos['date'] = is_file and filetype.get_last_modified_date(location) or None - infos['size'] = T.size - infos.update(multi_checksums(location, ('sha1', 'md5',))) - infos['files_count'] = is_dir and filetype.get_file_count(location) or None - infos['mime_type'] = is_file and T.mimetype_file or None - infos['file_type'] = is_file and T.filetype_file or None - infos['programming_language'] = is_file and T.programming_language or None - infos['is_binary'] = bool(is_file and T.is_binary) - infos['is_text'] = bool(is_file and T.is_text) - infos['is_archive'] = bool(is_file and T.is_archive) - infos['is_media'] = bool(is_file and T.is_media) - infos['is_source'] = bool(is_file and T.is_source) - infos['is_script'] = bool(is_file and T.is_script) - - return infos - - -# FIXME: this smells bad -def _empty_file_infos(): + from packagedcode.recognize import recognize_package + package = recognize_package(location) + if package: + return dict(packages=[package.to_dict()]) + return dict(packages=[]) + + +def get_file_info(location, **kwargs): """ - Return an empty mapping of file info, used in case of failure. + Return a mappings of file information collected for the file at `location`. """ - infos = OrderedDict() - infos['type'] = None - infos['name'] = None - infos['extension'] = None - infos['date'] = None - infos['size'] = None - infos['sha1'] = None - infos['md5'] = None - infos['files_count'] = None - infos['mime_type'] = None - infos['file_type'] = None - infos['programming_language'] = None - infos['is_binary'] = False - infos['is_text'] = False - infos['is_archive'] = False - infos['is_media'] = False - infos['is_source'] = False - infos['is_script'] = False - return infos - - -def get_package_infos(location): + result = OrderedDict() + + # TODO: move date and size these to the inventory collection step??? + result['date'] = get_last_modified_date(location) or None + result['size'] = getsize(location) or 0 + + sha1, md5 = multi_checksums(location, ('sha1', 'md5',)).values() + result['sha1'] = sha1 + result['md5'] = md5 + + collector = get_type(location) + result['mime_type'] = collector.mimetype_file or None + result['file_type'] = collector.filetype_file or None + result['programming_language'] = collector.programming_language or None + result['is_binary'] = bool(collector.is_binary) + result['is_text'] = bool(collector.is_text) + result['is_archive'] = bool(collector.is_archive) + result['is_media'] = bool(collector.is_media) + result['is_source'] = bool(collector.is_source) + result['is_script'] = bool(collector.is_script) + return result + + +def extract_archives(location, recurse=True): """ - Return a list of mappings of package information collected from the - `location` or an empty list. + Yield ExtractEvent while extracting archive(s) and compressed files at + `location`. If `recurse` is True, extract nested archives-in-archives + recursively. + Archives and compressed files are extracted in a directory named + "-extract" created in the same directory as the archive. + Note: this API is returning an iterable and NOT a sequence. """ - from packagedcode.recognize import recognize_package - package = recognize_package(location) - if not package: - return [] - return [package.to_dict()] + from extractcode.extract import extract + from extractcode import default_kinds + for xevent in extract(location, kinds=default_kinds, recurse=recurse): + yield xevent diff --git a/src/scancode/cache.py b/src/scancode/cache.py deleted file mode 100644 index 1621dc42798..00000000000 --- a/src/scancode/cache.py +++ /dev/null @@ -1,374 +0,0 @@ -# -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. -# http://nexb.com and https://github.com/nexB/scancode-toolkit/ -# The ScanCode software is licensed under the Apache License version 2.0. -# Data generated with ScanCode require an acknowledgment. -# ScanCode is a trademark of nexB Inc. -# -# You may not use this software except in compliance with the License. -# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# -# When you publish or redistribute any data created with ScanCode or any ScanCode -# derivative work, you must accompany this data with the following acknowledgment: -# -# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES -# OR CONDITIONS OF ANY KIND, either express or implied. No content created from -# ScanCode should be considered or used as legal advice. Consult an Attorney -# for any legal advice. -# ScanCode is a free software code scanning tool from nexB Inc. and others. -# Visit https://github.com/nexB/scancode-toolkit/ for support and download. - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import unicode_literals - -import codecs -from collections import OrderedDict -from functools import partial -import json -from hashlib import sha1 -import os -import posixpath -import sys - -from commoncode import fileutils -from commoncode.fileutils import as_posixpath -from commoncode.fileutils import path_to_bytes -from commoncode.fileutils import path_to_unicode -from commoncode.system import on_linux -from commoncode import timeutils - -from scancode import scans_cache_dir - - -""" -Cache scan results for a file or directory disk using a file-based cache. - -The approach is to cache the scan of a file using these files: - - one "global" file contains a log of all the paths scanned. - - for each file being scanned, we store a file that contains the corresponding file - info data as JSON. This file is named after the hash of the path of a scanned file. - - for each unique file being scanned (e.g. based on its content SHA1), we store a - another JSON file that contains the corresponding scan data. This file is named - after the hash of the scanned file content. - -Once a scan is completed, we iterate the cache to output the final scan results: -First iterate the global log file to get the paths, from there collect the cached -file info for that file and from the path and file info collect the cached scanned -result. This iterator is then streamed to the final JSON output. - -Finally once a scan is completed the cache is destroyed to free up disk space. - -Internally the cache is organized as a tree of directories named after the first few -characters or a path hash or file hash. This is to avoid having having too many files -per directory that can make some filesystems choke as well as having directories that -are too deep or having file paths that are too long which problematic on some OS. -""" - -# Tracing flags -TRACE = False - -def logger_debug(*args): - pass - -if TRACE: - import logging - - logger = logging.getLogger(__name__) - # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) - logging.basicConfig(stream=sys.stdout) - logger.setLevel(logging.DEBUG) - - def logger_debug(*args): - return logger.debug(' '.join(isinstance(a, unicode) and a or repr(a) for a in args)) - - -def get_scans_cache_class(cache_dir=scans_cache_dir): - """ - Return a new persistent cache class configured with a unique storage directory. - """ - # create a unique temp directory in cache_dir - fileutils.create_dir(cache_dir) - prefix = timeutils.time2tstamp() + u'-' - cache_dir = fileutils.get_temp_dir(cache_dir, prefix=prefix) - if on_linux: - cache_dir = path_to_bytes(cache_dir) - sc = ScanFileCache(cache_dir) - sc.setup() - return partial(ScanFileCache, cache_dir) - - -def info_keys(path, seed=None): - """ - Return a file info cache "keys" tripple for a path composed of three - paths segments derived from a checksum. - - For example: - >>> expected = 'fb87db2bb28e9501ac7fdc4812782118f4c94a0f' - >>> assert expected == sha1('/w421/scancode-toolkit2').hexdigest() - >>> expected = ('f', 'b', '87db2bb28e9501ac7fdc4812782118f4c94a0f') - >>> assert expected == info_keys('/w421/scancode-toolkit2') - """ - # ensure that we always pass bytes to the hash function - if isinstance(path, unicode): - path = path_to_bytes(path) - if seed: - if isinstance(seed, unicode): - seed = path_to_bytes(seed) - path = seed + path - return keys_from_hash(sha1(path).hexdigest()) - - -def scan_keys(path, file_info): - """ - Return a scan cache keys tripple for a path and file_info. If the file_info - sha1 is empty (e.g. such as a directory), return a key based on the path instead. - """ - # we "get" because in some off cases getting file info may have failed - # or there may be none for a directory - sha1_digest = file_info.get('sha1') - if sha1_digest: - return keys_from_hash(sha1_digest) - else: - # we may eventually store directories, in which case we use the - # path as a key with some extra seed - return info_keys(path, seed=b'empty hash') - - -def keys_from_hash(hexdigest): - """ - Return a cache keys triple for a hash hexdigest string. - - NOTE: since we use the first character and next two characters as directories, we - create at most 16 dir at the first level and 16 dir at the second level for each - first level directory for a maximum total of 16*16 = 256 directories. For a - million files we would have about 4000 files per directory on average with this - scheme which should keep most file systems happy and avoid some performance - issues when there are too many files in a single directory. - - For example: - >>> expected = ('f', 'b', '87db2bb28e9501ac7fdc4812782118f4c94a0f') - >>> assert expected == keys_from_hash('fb87db2bb28e9501ac7fdc4812782118f4c94a0f') - """ - if on_linux: - hexdigest = bytes(hexdigest) - return hexdigest[0], hexdigest[1], hexdigest[2:] - - -def paths_from_keys(base_path, keys): - """ - Return a tuple of (parent dir path, filename) for a cache entry built from a cache - keys triple and a base_directory. Ensure that the parent directory exist. - """ - if on_linux: - keys = [path_to_bytes(k) for k in keys] - base_path = path_to_bytes(base_path) - else: - keys = [path_to_unicode(k) for k in keys] - base_path = path_to_unicode(base_path) - - dir1, dir2, file_name = keys - parent = os.path.join(base_path, dir1, dir2) - fileutils.create_dir(parent) - return parent, file_name - - -class ScanFileCache(object): - """ - A file-based cache for scan results saving results in files and using no locking. - This is NOT thread-safe and NOT multi-process safe but works OK in our context: - we cache the scan for a given file once and read it only a few times. - """ - def __init__(self, cache_dir): - # subdirs for info and scans_dir caches - if on_linux: - infos_dir = b'infos_dir/' - scans_dir = b'scans_dir/' - files_log = b'files_log' - self.cache_base_dir = path_to_bytes(cache_dir) - - else: - infos_dir = u'infos_dir/' - scans_dir = u'scans_dir/' - files_log = u'files_log' - self.cache_base_dir = cache_dir - - self.cache_infos_dir = as_posixpath(os.path.join(self.cache_base_dir, infos_dir)) - self.cache_scans_dir = as_posixpath(os.path.join(self.cache_base_dir, scans_dir)) - self.cache_files_log = as_posixpath(os.path.join(self.cache_base_dir, files_log)) - - def setup(self): - """ - Setup the cache: must be called at least once globally after cache - initialization. - """ - fileutils.create_dir(self.cache_infos_dir) - fileutils.create_dir(self.cache_scans_dir) - - @classmethod - def log_file_path(cls, logfile_fd, path): - """ - Log file path in the cache logfile_fd **opened** file descriptor. - """ - # we dump one path per line written as bytes or unicode - if on_linux: - path = path_to_bytes(path) + b'\n' - else: - path = path_to_unicode(path) + '\n' - logfile_fd.write(path) - - def get_cached_info_path(self, path): - """ - Return the path where to store a file info in the cache given a path. - """ - keys = info_keys(path) - paths = paths_from_keys(self.cache_infos_dir, keys) - return posixpath.join(*paths) - - def put_info(self, path, file_info): - """ - Put file_info for path in the cache and return True if the file referenced - in file_info has already been scanned or False otherwise. - """ - info_path = self.get_cached_info_path(path) - with codecs.open(info_path, 'wb', encoding='utf-8') as cached_infos: - json.dump(file_info, cached_infos, check_circular=False) - scan_path = self.get_cached_scan_path(path, file_info) - is_scan_cached = os.path.exists(scan_path) - if TRACE: - logger_debug('put_infos:', 'path:', path, 'is_scan_cached:', is_scan_cached, 'file_info:', file_info, '\n') - return is_scan_cached - - def get_info(self, path): - """ - Return file info from the cache for a path. - Return None on failure to find the info in the cache. - """ - info_path = self.get_cached_info_path(path) - if os.path.exists(info_path): - with codecs.open(info_path, 'r', encoding='utf-8') as ci: - return json.load(ci, object_pairs_hook=OrderedDict) - - def get_cached_scan_path(self, path, file_info): - """ - Return the path where to store a scan in the cache given a path and file_info. - """ - keys = scan_keys(path, file_info) - paths = paths_from_keys(self.cache_scans_dir, keys) - return posixpath.join(*paths) - - def put_scan(self, path, file_info, scan_result): - """ - Put scan_result in the cache if not already cached. - """ - scan_path = self.get_cached_scan_path(path, file_info) - if not os.path.exists(scan_path): - with codecs.open(scan_path, 'wb', encoding='utf-8') as cached_scan: - json.dump(scan_result, cached_scan, check_circular=False) - if TRACE: - logger_debug('put_scan:', 'scan_path:', scan_path, 'file_info:', file_info, 'scan_result:', scan_result, '\n') - - def get_scan(self, path, file_info): - """ - Return scan results from the cache for a path and file_info. - Return None on failure to find the scan results in the cache. - """ - scan_path = self.get_cached_scan_path(path, file_info) - if os.path.exists(scan_path): - with codecs.open(scan_path, 'r', encoding='utf-8') as cached_scan: - return json.load(cached_scan, object_pairs_hook=OrderedDict) - - def iterate(self, scan_names, root_dir=None, paths_subset=tuple()): - """ - Yield scan data for all cached scans e.g. the whole cache given - a list of scan names. - If a `paths_subset` sequence of paths is provided, then only - these paths are iterated. - - The logfile MUST have been closed before calling this method. - """ - if on_linux: - paths_subset = set(path_to_bytes(p) for p in paths_subset) - else: - paths_subset = set(path_to_unicode(p) for p in paths_subset) - - if on_linux: - log_opener = partial(open, self.cache_files_log, 'rb') - else: - log_opener = partial(codecs.open, self.cache_files_log, 'rb', encoding='utf-8') - EOL = b'\n' if on_linux else '\n' - - with log_opener() as cached_files: - # iterate paths, one by line - for file_log in cached_files: - # must be unicode - path = file_log.rstrip(EOL) - if paths_subset and path not in paths_subset: - continue - file_info = self.get_info(path) - - if on_linux: - unicode_path = path_to_unicode(path) - else: - unicode_path = path - - if root_dir: - # must be unicode - if on_linux: - root_dir = path_to_unicode(root_dir) - rooted_path = posixpath.join(root_dir, unicode_path) - else: - rooted_path = unicode_path - rooted_path = fileutils.as_posixpath(rooted_path) - logger_debug('iterate:', 'rooted_path:', rooted_path) - - # rare but possible corner case - if file_info is None: - no_info = ('ERROR: file info unavailable in cache: ' - 'This is either a bug or processing was aborted with CTRL-C.') - scan_result = OrderedDict(path=rooted_path) - scan_result['scan_errors'] = [no_info] - if TRACE: - logger_debug('iterate:', 'scan_result:', scan_result, 'for path:', rooted_path, '\n') - yield scan_result - continue - - _unicode_path_from_file_info = file_info.pop('path') - scan_result = OrderedDict(path=rooted_path) - - if 'infos' in scan_names: - # info are always collected but only returned if requested - # we flatten these as direct attributes of a file object - scan_result.update(file_info.items()) - - if not scan_result.get('scan_errors'): - scan_result['scan_errors'] = [] - - # check if we have more than just infos - if ['infos'] != scan_names: - errors = scan_result['scan_errors'] - scan_details = self.get_scan(path, file_info) - if scan_details is None: - no_scan_details = ( - 'ERROR: scan details unavailable in cache: ' - 'This is either a bug or processing was aborted with CTRL-C.') - errors.append(no_scan_details) - else: - # append errors to other top level errors if any - scan_errors = scan_details.pop('scan_errors', []) - errors.extend(scan_errors) - scan_result.update(scan_details) - - if TRACE: - logger_debug('iterate:', 'scan_result:', scan_result, 'for path:', rooted_path, '\n') - yield scan_result - - def clear(self, *args): - """ - Purge the cache by deleting the corresponding cached data files. - """ - fileutils.delete(self.cache_base_dir) diff --git a/src/scancode/cli.py b/src/scancode/cli.py index 304bf2f7a16..161f15f92cd 100644 --- a/src/scancode/cli.py +++ b/src/scancode/cli.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,181 +23,164 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals -# Import early because this import has monkey-patching side effects +# Import first because this import has monkey-patching side effects from scancode.pool import get_pool -import codecs from collections import OrderedDict from functools import partial from itertools import imap -import os -from os.path import expanduser -from os.path import abspath import sys from time import time import traceback -from types import GeneratorType +import attr import click click.disable_unicode_literals_warning = True -from click.termui import style - -from commoncode import filetype -from commoncode import fileutils -from commoncode.fileutils import path_to_bytes -from commoncode.fileutils import path_to_unicode -from commoncode import ignore -from commoncode.system import on_linux -from commoncode.text import toascii - -import plugincode.output -import plugincode.post_scan -import plugincode.pre_scan - -from scancode import __version__ as version - -from scancode.api import DEJACODE_LICENSE_URL -from scancode.api import _empty_file_infos -from scancode.api import get_copyrights -from scancode.api import get_emails -from scancode.api import get_file_infos -from scancode.api import get_licenses -from scancode.api import get_package_infos -from scancode.api import get_urls -from scancode.api import Resource - -from scancode.cache import get_scans_cache_class -from scancode.cache import ScanFileCache +# import early +from scancode_config import __version__ as scancode_version +from scancode_config import scancode_cache_dir +from scancode_config import scancode_temp_dir + +from commoncode.fileutils import PATH_TYPE +from commoncode.timeutils import time2tstamp + +from plugincode import CommandLineOption +from plugincode import PluginManager + +# these are important to register plugin managers +from plugincode import pre_scan +from plugincode import scan +from plugincode import post_scan +from plugincode import output_filter +from plugincode import output + +from scancode import CORE_GROUP +from scancode import DOC_GROUP +from scancode import MISC_GROUP +from scancode import OTHER_SCAN_GROUP +from scancode import OUTPUT_GROUP +from scancode import OUTPUT_FILTER_GROUP +from scancode import OUTPUT_CONTROL_GROUP +from scancode import POST_SCAN_GROUP +from scancode import PRE_SCAN_GROUP +from scancode import SCAN_GROUP +from scancode import SCAN_OPTIONS_GROUP +from scancode import notice +from scancode import print_about +from scancode import Scanner +from scancode import validate_option_dependencies from scancode.interrupt import DEFAULT_TIMEOUT from scancode.interrupt import fake_interruptible from scancode.interrupt import interruptible -from scancode.interrupt import TimeoutError - +from scancode.resource import Codebase +from scancode.resource import Resource from scancode.utils import BaseCommand -from scancode.utils import compute_fn_max_len -from scancode.utils import fixed_width_file_name +from scancode.utils import path_progress_message from scancode.utils import progressmanager - -echo_stderr = partial(click.secho, err=True) - - # Python 2 and 3 support try: # Python 2 unicode str_orig = str - bytes = str - str = unicode + bytes = str # NOQA + str = unicode # NOQA except NameError: # Python 3 - unicode = str - - -# this will init the plugins -plugincode.pre_scan.initialize() -plugincode.output.initialize() -plugincode.post_scan.initialize() - - -info_text = ''' -ScanCode scans code and other files for origin and license. -Visit https://github.com/nexB/scancode-toolkit/ for support and download. - -''' + unicode = str # NOQA -notice_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'NOTICE') -notice_text = open(notice_path).read() +# Tracing flags +TRACE = False +TRACE_DEEP = False -delimiter = '\n\n\n' -[notice_text, extra_notice_text] = notice_text.split(delimiter, 1) -extra_notice_text = delimiter + extra_notice_text -delimiter = '\n\n ' -[notice_text, acknowledgment_text] = notice_text.split(delimiter, 1) -acknowledgment_text = delimiter + acknowledgment_text +def logger_debug(*args): + pass -notice = acknowledgment_text.strip().replace(' ', '') -# CLI help groups -SCANS = 'scans' -OUTPUT = 'output' -PRE_SCAN = 'pre-scan' -POST_SCAN = 'post-scan' -MISC = 'misc' -CORE = 'core' +if TRACE or TRACE_DEEP: + import logging + logger = logging.getLogger(__name__) + logging.basicConfig(stream=sys.stdout) + logger.setLevel(logging.DEBUG) -def print_about(ctx, param, value): - if not value or ctx.resilient_parsing: - return - click.echo(info_text + notice_text + acknowledgment_text + extra_notice_text) - ctx.exit() + def logger_debug(*args): + return logger.debug(' '.join(isinstance(a, unicode) + and a or repr(a) for a in args)) +echo_stderr = partial(click.secho, err=True) +# FIXME: this should be pushed out in some external help or pushed down in plugins. +# FIXME: the glob story is very weird!!! examples_text = ''' Scancode command lines examples: (Note for Windows: use '\\' back slash instead of '/' forward slash for paths.) -Scan the 'samples' directory for licenses and copyrights. Save scan results to -an HTML app file for interactive scan results navigation. When the scan is done, -open 'scancode_result.html' in your web browser. Note that additional app files -are saved in a directory named 'scancode_result_files': - - scancode --format html-app samples/ scancode_result.html - -Scan a directory for licenses and copyrights. Save scan results to an -HTML file: - - scancode --format html samples/zlib scancode_result.html - Scan a single file for copyrights. Print scan results to stdout as JSON: - scancode --copyright samples/zlib/zlib.h + scancode --copyright samples/zlib/zlib.h --json Scan a single file for licenses, print verbose progress to stderr as each file is scanned. Save scan to a JSON file: - scancode --license --verbose samples/zlib/zlib.h licenses.json + scancode --license --verbose samples/zlib/zlib.h --json licenses.json Scan a directory explicitly for licenses and copyrights. Redirect JSON scan results to a file: - scancode -f json -l -c samples/zlib/ > scan.json + scancode --json -l -c samples/zlib/ > scan.json -Scan a directory while ignoring a single file. Print scan results to stdout as JSON: +Scan a directory while ignoring a single file. +Print scan results to stdout as JSON: - scancode --ignore README samples/ + scancode --json --ignore README samples/ -Scan a directory while ignoring all files with txt extension. Print scan results to -stdout as JSON (It is recommended to use quoted glob patterns to prevent pattern -expansion by the shell): +Scan a directory while ignoring all files with .txt extension. +Print scan results to stdout as JSON. +It is recommended to use quotes around glob patterns to prevent pattern +expansion by the shell: - scancode --ignore "*.txt" samples/ + scancode --json --ignore "*.txt" samples/ Special characters supported in GLOB pattern: -* matches everything -? matches any single character -[seq] matches any character in seq -[!seq] matches any character not in seq +- * matches everything +- ? matches any single character +- [seq] matches any character in seq +- [!seq] matches any character not in seq + +For a literal match, wrap the meta-characters in brackets. +For example, '[?]' matches the character '?'. +For details on GLOB patterns see https://en.wikipedia.org/wiki/Glob_(programming). + +Note: Glob patterns cannot be applied to path as strings. +For example, this will not ignore "samples/JGroups/licenses". + + scancode --json --ignore "samples*licenses" samples/ -For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'. -For glob see https://en.wikipedia.org/wiki/Glob_(programming). -Note: Glob patterns cannot be applied to path as strings, for e.g. - scancode --ignore "samples*licenses" samples/ -will not ignore "samples/JGroups/licenses". +Scan a directory while ignoring multiple files (or glob patterns). +Print the scan results to stdout as JSON: + + scancode --json --ignore README --ignore "*.txt" samples/ + +Scan the 'samples' directory for licenses and copyrights. Save scan results to +an HTML app file for interactive scan results navigation. When the scan is done, +open 'scancode_result.html' in your web browser. Note that additional app files +are saved in a directory named 'scancode_result_files': + + scancode --output-html-app scancode_result.html samples/ -Scan a directory while ignoring multiple files (or glob patterns). Print the scan -results to stdout as JSON: +Scan a directory for licenses and copyrights. Save scan results to an +HTML file: - scancode --ignore README --ignore "*.txt" samples/ + scancode --output-html scancode_result.html samples/zlib To extract archives, see the 'extractcode' command instead. ''' @@ -213,728 +196,1128 @@ def print_examples(ctx, param, value): def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return - click.echo('ScanCode version ' + version) - ctx.exit() - - -def reindex_licenses(ctx, param, value): - if not value or ctx.resilient_parsing: - return - from licensedcode import cache - click.echo('Checking and rebuilding the license index...') - cache.reindex() - click.echo('Done.') + click.echo('ScanCode version ' + scancode_version) ctx.exit() +# FIXME: this should be pushed out in some external help or pushed down in plugins. epilog_text = '''Examples (use --examples for more): \b Scan the 'samples' directory for licenses and copyrights. -Save scan results to a JSON file: +Save scan results to the 'scancode_result.json' JSON file: - scancode --format json samples scancode_result.json + scancode --license --copyright --json=scancode_result.json samples \b -Scan the 'samples' directory for licenses and copyrights. Save scan results to -an HTML app file for interactive web browser results navigation. Additional app -files are saved to the 'myscan_files' directory: +Scan the 'samples' directory for licenses and package manifests. Print scan +results on screen as pretty-formatted JSON (using the special '-' FILE to print +to on screen/to stdout): - scancode --format html-app samples myscan.html + scancode --json-pp - --license --package samples Note: when you run scancode, a progress bar is displayed with a counter of the number of files processed. Use --verbose to display file-by-file progress. ''' + class ScanCommand(BaseCommand): + """ + A command class that is aware of ScanCode options that provides enhanced + help where each option is grouped by group. + """ + short_usage_help = ''' Try 'scancode --help' for help on options and arguments.''' - def __init__(self, name, context_settings=None, callback=None, - params=None, help=None, epilog=None, short_help=None, - options_metavar='[OPTIONS]', add_help_option=True): + def __init__(self, name, context_settings=None, callback=None, params=None, + help=None, # NOQA + epilog=None, short_help=None, + options_metavar='[OPTIONS]', add_help_option=True, + plugin_options=()): + """ + Create a new ScanCommand using the `plugin_options` list of + CommandLineOption instances. + """ + super(ScanCommand, self).__init__(name, context_settings, callback, - params, help, epilog, short_help, options_metavar, add_help_option) - - for name, callback in plugincode.post_scan.get_post_scan_plugins().items(): - # normalize white spaces in help. - help_text = ' '.join(callback.__doc__.split()) - option = ScanOption(('--' + name,), is_flag=True, help=help_text, group=POST_SCAN) - self.params.append(option) - for name, plugin in plugincode.pre_scan.get_pre_scan_plugins().items(): - attrs = plugin.option_attrs - attrs['default'] = None - attrs['group'] = PRE_SCAN - attrs['help'] = ' '.join(plugin.__doc__.split()) - option = ScanOption(('--' + name,), **attrs) - self.params.append(option) + params, help, epilog, short_help, options_metavar, add_help_option) + + # this makes the options "known" to the command + self.params.extend(plugin_options) def format_options(self, ctx, formatter): """ - Overridden from click.Command to write all options into the formatter in groups - they belong to. If a group is not specified, add the option to MISC group. + Overridden from click.Command to write all options into the formatter in + help_groups they belong to. If a group is not specified, add the option + to MISC_GROUP group. """ - groups = OrderedDict([ - (SCANS, []), - (OUTPUT, []), - (PRE_SCAN, []), - (POST_SCAN, []), - (MISC, []), - (CORE, []), + # this mapping defines the CLI help presentation order + help_groups = OrderedDict([ + (SCAN_GROUP, []), + (OTHER_SCAN_GROUP, []), + (SCAN_OPTIONS_GROUP, []), + (OUTPUT_GROUP, []), + (OUTPUT_FILTER_GROUP, []), + (OUTPUT_CONTROL_GROUP, []), + (PRE_SCAN_GROUP, []), + (POST_SCAN_GROUP, []), + (CORE_GROUP, []), + (MISC_GROUP, []), + (DOC_GROUP, []), ]) for param in self.get_params(ctx): # Get the list of option's name and help text help_record = param.get_help_record(ctx) - if help_record: - if getattr(param, 'group', None): - groups[param.group].append(help_record) - else: - groups['misc'].append(help_record) + if not help_record: + continue + # organize options by group + help_group = getattr(param, 'help_group', MISC_GROUP) + sort_order = getattr(param, 'sort_order', 100) + help_groups[help_group].append((sort_order, help_record)) with formatter.section('Options'): - for group, option in groups.items(): - if option: - with formatter.section(group): - formatter.write_dl(option) + for group, help_records in help_groups.items(): + if not help_records: + continue + with formatter.section(group): + sorted_records = [help_record for _, help_record in sorted(help_records)] + formatter.write_dl(sorted_records) + + +try: + # IMPORTANT: this discovers, loads and validates all available plugins + plugin_classes, plugin_options = PluginManager.load_plugins() +except ImportError, e: + echo_stderr('========================================================================') + echo_stderr('ERROR: Unable to import ScanCode plugins.'.upper()) + echo_stderr('Check your installation configuration (setup.py) or re-install/re-configure ScanCode.') + echo_stderr('The following plugin(s) are referenced and cannot be loaded/imported:') + echo_stderr(str(e), color='red') + echo_stderr('========================================================================') + raise e + + +def print_plugins(ctx, param, value): + if not value or ctx.resilient_parsing: + return + for plugin_cls in sorted(plugin_classes, key=lambda pc: (pc.stage, pc.name)): + click.echo('--------------------------------------------') + click.echo('Plugin: scancode_{self.stage}:{self.name}'.format(self=plugin_cls), nl=False) + click.echo(' class: {self.__module__}:{self.__name__}'.format(self=plugin_cls)) + if hasattr(plugin_cls, 'requires'): + requires = ', '.join(plugin_cls.requires) + click.echo(' requires: {}'.format(requires), nl=False) + click.echo(' doc: {self.__doc__}'.format(self=plugin_cls)) + click.echo(' options:'.format(self=plugin_cls)) + for option in plugin_cls.options: + name = option.name + opts = ', '.join(option.opts) + help_group = option.help_group + help_txt = option.help # noqa + click.echo(' help_group: {help_group!s}, name: {name!s}: {opts}\n help: {help_txt!s}'.format(**locals())) + click.echo('') + ctx.exit() -class ScanOption(click.Option): - """ - Allow an extra param `group` to be set which can be used - to determine to which group the option belongs. - """ - def __init__(self, param_decls=None, show_default=False, - prompt=False, confirmation_prompt=False, - hide_input=False, is_flag=None, flag_value=None, - multiple=False, count=False, allow_from_autoenv=True, - type=None, help=None, group=None, **attrs): - super(ScanOption, self).__init__(param_decls, show_default, - prompt, confirmation_prompt, - hide_input, is_flag, flag_value, - multiple, count, allow_from_autoenv, type, help, **attrs) - self.group = group +@click.command(name='scancode', + epilog=epilog_text, + cls=ScanCommand, + plugin_options=plugin_options) +@click.pass_context -def validate_formats(ctx, param, value): +# ensure that the input path is bytes on Linux, unicode elsewhere +@click.argument('input', metavar=' ', + type=click.Path(exists=True, readable=True, path_type=PATH_TYPE)) + +@click.option('--strip-root', + is_flag=True, + conflicts=['full_root'], + help='Strip the root directory segment of all paths. The default is to ' + 'always include the last directory segment of the scanned path such ' + 'that all paths have a common root directory.', + help_group=OUTPUT_CONTROL_GROUP, cls=CommandLineOption) + +@click.option('--full-root', + is_flag=True, + conflicts=['strip_root'], + help='Report full, absolute paths. The default is to always ' + 'include the last directory segment of the scanned path such that all ' + 'paths have a common root directory.', + help_group=OUTPUT_CONTROL_GROUP, cls=CommandLineOption) + +@click.option('-n', '--processes', + type=int, default=1, + metavar='INT', + help='Set the number of parallel processes to use. ' + 'Disable parallel processing if 0. Also disable threading if -1. [default: 1]', + help_group=CORE_GROUP, sort_order=10, cls=CommandLineOption) + +@click.option('--timeout', + type=float, default=DEFAULT_TIMEOUT, + metavar='', + help='Stop an unfinished file scan after a timeout in seconds. ' + '[default: %d seconds]' % DEFAULT_TIMEOUT, + help_group=CORE_GROUP, sort_order=10, cls=CommandLineOption) + +@click.option('--quiet', + is_flag=True, + conflicts=['verbose'], + help='Do not print summary or progress.', + help_group=CORE_GROUP, sort_order=20, cls=CommandLineOption) + +@click.option('--verbose', + is_flag=True, + conflicts=['quiet'], + help='Print progress as file-by-file path instead of a progress bar. ' + 'Print a verbose scan summary.', + help_group=CORE_GROUP, sort_order=20, cls=CommandLineOption) + +@click.option('--cache-dir', + type=click.Path( + exists=True, file_okay=False, dir_okay=True, + readable=True, path_type=PATH_TYPE), + default=scancode_cache_dir, + metavar='DIR', + sort_order=210, + + help='Set the path to an existing directory where ScanCode can cache ' + 'files available across runs.' + + 'If not set, the value of the `SCANCODE_CACHE` environment variable is ' + 'used if available. If `SCANCODE_CACHE` is not set, a default ' + 'sub-directory in the user home directory is used instead. ' + '[default: ~/.cache/scancode-tk/version]', + help_group=CORE_GROUP, + cls=CommandLineOption) + +@click.option('--temp-dir', + type=click.Path( + exists=True, file_okay=False, dir_okay=True, + readable=True, path_type=PATH_TYPE), + default=scancode_temp_dir, + show_default=False, + metavar='DIR', + sort_order=210, + help='Set the path to an existing directory where ScanCode can create ' + 'temporary files. ' + 'If not set, the value of the `SCANCODE_TMP` environment variable is ' + 'used if available. If `SCANCODE_TMP` is not set, a default ' + 'sub-directory in the system temp directory is used instead. ' + '[default: TMP/scancode-tk-]', + help_group=CORE_GROUP, + cls=CommandLineOption) + +@click.option('--timing', + is_flag=True, + help='Collect scan timing for each scan/scanned file.', + help_group=CORE_GROUP, sort_order=250, cls=CommandLineOption) + +@click.option('--max-in-memory', + type=int, default=10000, + show_default=True, + help= + 'Maximum number of files and directories scan details kept in memory ' + 'during a scan. Additional files and directories scan details above this ' + 'number are cached on-disk rather than in memory. ' + 'Use 0 to use unlimited memory and disable on-disk caching. ' + 'Use -1 to use only on-disk caching.', + help_group=CORE_GROUP, sort_order=300, cls=CommandLineOption) + +@click.help_option('-h', '--help', + help_group=DOC_GROUP, sort_order=10, cls=CommandLineOption) + +@click.option('--about', + is_flag=True, is_eager=True, expose_value=False, + callback=print_about, + help='Show information about ScanCode and licensing and exit.', + help_group=DOC_GROUP, sort_order=20, cls=CommandLineOption) + +@click.option('--version', + is_flag=True, is_eager=True, expose_value=False, + callback=print_version, + help='Show the version and exit.', + help_group=DOC_GROUP, sort_order=20, cls=CommandLineOption) + +@click.option('--examples', + is_flag=True, is_eager=True, expose_value=False, + callback=print_examples, + help=('Show command examples and exit.'), + help_group=DOC_GROUP, sort_order=50, cls=CommandLineOption) + +@click.option('--plugins', + is_flag=True, is_eager=True, expose_value=False, + callback=print_plugins, + help='Show the list of available ScanCode plugins and exit.', + help_group=DOC_GROUP, cls=CommandLineOption) + +@click.option('--test-mode', + is_flag=True, default=False, + # not yet supported in Click 6.7 but added in CommandLineOption + hidden=True, + help='Run ScanCode in a special "test mode". Only for testing.', + help_group=MISC_GROUP, sort_order=1000, cls=CommandLineOption) +def scancode(ctx, input, # NOQA + strip_root, full_root, + processes, timeout, + quiet, verbose, + cache_dir, temp_dir, + timing, + max_in_memory, + test_mode, + *args, **kwargs): + """scan the file or directory for license, origin and packages and save results to FILE(s) using one or more ouput format option. + + Error and progress are printed to stderr. """ - Validate formats and template files. Raise a BadParameter on errors. + + # notes: the above docstring of this function is used in the CLI help Here is + # it's actual docstring: """ - value_lower = value.lower() - if value_lower in plugincode.output.get_format_plugins(): - return value_lower - # render using a user-provided custom format template - if not os.path.isfile(value): - raise click.BadParameter('Unknwow or invalid template file path: "%(value)s" does not exist or is not readable.' % locals()) - return value + This function is the main ScanCode CLI entry point. + Return a return code of 0 on success or a positive integer on error from + running all the scanning "stages" with the `input` file or + directory. -def validate_exclusive(ctx, exclusive_options): - """ - Validate mutually exclusive options. - Raise a UsageError with on errors. - """ - ctx_params = ctx.params - selected_options = [ctx_params[eop] for eop in exclusive_options if ctx_params[eop]] - if len(selected_options) > 1: - msg = ' and '.join('`' + eo.replace('_', '-') + '`' for eo in exclusive_options) - msg += ' are mutually exclusion options. You can use only one of them.' - raise click.UsageError(msg) + The scanning stages are: + - `inventory`: collect the codebase inventory resources tree for the + `input`. This is a built-in stage that does not accept plugins. -@click.command(name='scancode', epilog=epilog_text, cls=ScanCommand) -@click.pass_context + - `setup`: as part of the plugins system, each plugin is loaded and + its `setup` method is called if it is enabled. -# ensure that the input path is bytes on Linux, unicode elsewhere -@click.argument('input', metavar='', type=click.Path(exists=True, readable=True, path_type=fileutils.PATH_TYPE)) -@click.argument('output_file', default='-', metavar='', type=click.File(mode='wb', lazy=False)) - -# Note that click's 'default' option is set to 'false' here despite these being documented to be enabled by default in -# order to more elegantly enable all of these (see code below) if *none* of the command line options are specified. -@click.option('-c', '--copyright', is_flag=True, default=False, help='Scan for copyrights. [default]', group=SCANS, cls=ScanOption) -@click.option('-l', '--license', is_flag=True, default=False, help='Scan for licenses. [default]', group=SCANS, cls=ScanOption) -@click.option('-p', '--package', is_flag=True, default=False, help='Scan for packages. [default]', group=SCANS, cls=ScanOption) - -@click.option('-e', '--email', is_flag=True, default=False, help='Scan for emails.', group=SCANS, cls=ScanOption) -@click.option('-u', '--url', is_flag=True, default=False, help='Scan for urls.', group=SCANS, cls=ScanOption) -@click.option('-i', '--info', is_flag=True, default=False, help='Include information such as size, type, etc.', group=SCANS, cls=ScanOption) - -@click.option('--license-score', is_flag=False, default=0, type=int, show_default=True, - help='Do not return license matches with scores lower than this score. A number between 0 and 100.', group=SCANS, cls=ScanOption) -@click.option('--license-text', is_flag=True, default=False, - help='Include the detected licenses matched text. Has no effect unless --license is requested.', group=SCANS, cls=ScanOption) -@click.option('--license-url-template', is_flag=False, default=DEJACODE_LICENSE_URL, show_default=True, - help='Set the template URL used for the license reference URLs. In a template URL, curly braces ({}) are replaced by the license key.', group=SCANS, cls=ScanOption) -@click.option('--strip-root', is_flag=True, default=False, - help='Strip the root directory segment of all paths. The default is to always ' - 'include the last directory segment of the scanned path such that all paths have a common root directory. ' - 'This cannot be combined with `--full-root` option.', group=OUTPUT, cls=ScanOption) -@click.option('--full-root', is_flag=True, default=False, - help='Report full, absolute paths. The default is to always ' - 'include the last directory segment of the scanned path such that all paths have a common root directory. ' - 'This cannot be combined with the `--strip-root` option.', group=OUTPUT, cls=ScanOption) - -@click.option('-f', '--format', is_flag=False, default='json', show_default=True, metavar='', - help=('Set format to one of: %s or use ' - 'as the path to a custom template file' % ', '.join(plugincode.output.get_format_plugins())), - callback=validate_formats, group=OUTPUT, cls=ScanOption) - -@click.option('--verbose', is_flag=True, default=False, help='Print verbose file-by-file progress messages.', group=OUTPUT, cls=ScanOption) -@click.option('--quiet', is_flag=True, default=False, help='Do not print summary or progress messages.', group=OUTPUT, cls=ScanOption) - -@click.help_option('-h', '--help', group=CORE, cls=ScanOption) -@click.option('-n', '--processes', is_flag=False, default=1, type=int, show_default=True, help='Scan using n parallel processes.', group=CORE, cls=ScanOption) -@click.option('--examples', is_flag=True, is_eager=True, callback=print_examples, help=('Show command examples and exit.'), group=CORE, cls=ScanOption) -@click.option('--about', is_flag=True, is_eager=True, callback=print_about, help='Show information about ScanCode and licensing and exit.', group=CORE, cls=ScanOption) -@click.option('--version', is_flag=True, is_eager=True, callback=print_version, help='Show the version and exit.', group=CORE, cls=ScanOption) - -@click.option('--diag', is_flag=True, default=False, help='Include additional diagnostic information such as error messages or result details.', group=CORE, cls=ScanOption) -@click.option('--timeout', is_flag=False, default=DEFAULT_TIMEOUT, type=float, show_default=True, help='Stop scanning a file if scanning takes longer than a timeout in seconds.', group=CORE, cls=ScanOption) -@click.option('--reindex-licenses', is_flag=True, default=False, is_eager=True, callback=reindex_licenses, help='Force a check and possible reindexing of the cached license index.', group=MISC, cls=ScanOption) - -def scancode(ctx, - input, output_file, - copyright, license, package, - email, url, info, - license_score, license_text, license_url_template, - strip_root, full_root, - format, verbose, quiet, processes, - diag, timeout, *args, **kwargs): - """scan the file or directory for origin clues and license and save results to the . + - `pre-scan`: each enabled pre-scan plugin `process_codebase(codebase)` + method is called to update/transforme the whole codebase. + + - `scan`: the codebase is walked and each enabled scan plugin + `get_scanner()(resource.location)` scanner function is called once for + each codebase resource. - The scan results are printed to stdout if is not provided. - Error and progress is printed to stderr. + - `post-scan`: each enabled post-scan plugin `process_codebase(codebase)` + method is called to update/transforme the whole codebase. + + - `output_filter`: the `process_resource` method of each enabled + output_filter plugin is called on each resource to determine if the + resource should be kept or not in the output stage. + + - `output`: each enabled output plugin `process_codebase(codebase)` + method is called to create an output for the codebase filtered resources. + + Beside `input`, the other arguments are: + + - `strip_root` and `full_root`: boolean flags: In the outputs, strip the + first path segment of a file if `strip_root` is True unless the `input` is + a single file. If `full_root` is True report the path as an absolute path. + These options are mutually exclusive. + + - `processes`: int: run the scan using up to this number of processes in + parallel. If 0, disable the multiprocessing machinery. if -1 also + disable the multithreading machinery. + + - `timeout`: float: intterup the scan of a file if it does not finish within + `timeout` seconds. This applied to each file and scan individually (e.g. + if the license scan is interrupted they other scans may complete, each + withing the timeout) + + - `quiet` and `verbose`: boolean flags: Do not display any message if + `quiet` is True. Otherwise, display extra verbose messages if `quiet` is + False and `verbose` is True. These two options are mutually exclusive. + + - `cache_dir` and `temp_dir`: paths to alternative directories for caching + and temporary files. + + - `timing`: boolean flag: collect per-scan and per-file scan timings if + True. + + - `on_disk_results`: boolean flag: default to True to enable on-disk saving + of intermediate scan results. + + - `temp_dir`: path to a non-default temporary directory fo caching and other + temporary files. If not provided, the default is used. + + Other **kwargs are passed down to plugins as CommandOption indirectly + through Click context machinery. """ - validate_exclusive(ctx, ['strip_root', 'full_root']) - - possible_scans = OrderedDict([ - ('infos', info), - ('licenses', license), - ('copyrights', copyright), - ('packages', package), - ('emails', email), - ('urls', url) - ]) - - options = OrderedDict([ - ('--copyright', copyright), - ('--license', license), - ('--package', package), - ('--email', email), - ('--url', url), - ('--info', info), - ('--license-score', license_score), - ('--license-text', license_text), - ('--strip-root', strip_root), - ('--full-root', full_root), - ('--format', format), - ('--diag', diag), - ]) - - # Use default scan options when no options are provided on the command line. - if not any(possible_scans.values()): - possible_scans['copyrights'] = True - possible_scans['licenses'] = True - possible_scans['packages'] = True - options['--copyright'] = True - options['--license'] = True - options['--package'] = True - - # A hack to force info being exposed for SPDX output in order to reuse calculated file SHA1s. - if format in ('spdx-tv', 'spdx-rdf'): - possible_scans['infos'] = True - - # FIXME: pombredanne: what is this? I cannot understand what this does - for key in options: - if key == "--license-score": - continue - if options[key] == False: - del options[key] - - get_licenses_with_score = partial(get_licenses, min_score=license_score, include_text=license_text, diag=diag, license_url_template=license_url_template) - - # List of scan functions in the same order as "possible_scans". - scan_functions = [ - None, # For "infos" there is no separate scan function, they are always gathered, though not always exposed. - get_licenses_with_score, - get_copyrights, - get_package_infos, - get_emails, - get_urls - ] - - # FIXME: this is does not make sense to use tuple and positional values - scanners = OrderedDict(zip(possible_scans.keys(), zip(possible_scans.values(), scan_functions))) - - scans_cache_class = get_scans_cache_class() - pre_scan_plugins = [] - for name, plugin in plugincode.pre_scan.get_pre_scan_plugins().items(): - user_input = kwargs[name.replace('-', '_')] - if user_input: - options['--' + name] = user_input - pre_scan_plugins.append(plugin(user_input)) + # build mappings of all kwargs to pass down to plugins + standard_kwargs = dict( + input=input, + strip_root=strip_root, + full_root=full_root, + processes=processes, + timeout=timeout, + quiet=quiet, + verbose=verbose, + cache_dir=cache_dir, + temp_dir=temp_dir, + timing=timing, + max_in_memory=max_in_memory, + test_mode=test_mode + ) + kwargs.update(standard_kwargs) + + success = True + codebase = None + processing_start = time() + + # UTC start timestamp + scan_start = time2tstamp() + + if not quiet: + if not processes: + echo_stderr('Disabling multi-processing for debugging.', fg='yellow') + elif processes == -1: + echo_stderr('Disabling multi-processing ' + 'and multi-threading for debugging.', fg='yellow') try: - files_count, results, success = scan( - input_path=input, - scanners=scanners, - verbose=verbose, - quiet=quiet, - processes=processes, - timeout=timeout, - diag=diag, - scans_cache_class=scans_cache_class, - strip_root=strip_root, - full_root=full_root, - pre_scan_plugins=pre_scan_plugins) - - # Find all scans that are both enabled and have a valid function - # reference. This deliberately filters out the "info" scan - # (which always has a "None" function reference) as there is no - # dedicated "infos" key in the results that "plugin_only_findings.has_findings()" - # could check. - # FIXME: we should not use positional tings tuples for v[0], v[1] that are mysterious values for now - active_scans = [k for k, v in scanners.items() if v[0] and v[1]] - - has_requested_post_scan_plugins = False - - for option, post_scan_handler in plugincode.post_scan.get_post_scan_plugins().items(): - is_requested = kwargs[option.replace('-', '_')] - if is_requested: - options['--' + option] = True - if not quiet: - echo_stderr('Running post-scan plugin: %(option)s...' % locals(), fg='green') - results = post_scan_handler(active_scans, results) - has_requested_post_scan_plugins = True - - if has_requested_post_scan_plugins: - # FIXME: computing len needs a list and therefore needs loading it all ahead of time - results = list(results) - files_count = len(results) + ######################################################################## + # 1. create all plugin instances + ######################################################################## + # FIXME: + validate_option_dependencies(ctx) + + if TRACE_DEEP: + ctx_params = sorted(ctx.params.items()) + logger_debug('scancode: ctx.params:') + for co in ctx.params: + logger_debug(' scancode: ctx.params:', co) + + # NOTE and FIXME: this is a two level nested mapping, which is TOO + # complicated + enabled_plugins = OrderedDict() + + for stage, manager in PluginManager.managers.items(): + enabled_plugins[stage] = stage_plugins = OrderedDict() + for name, plugin_cls in manager.plugin_classes.items(): + try: + plugin = plugin_cls(**kwargs) + if plugin.is_enabled(**kwargs): + stage_plugins[name] = plugin + except: + msg = 'ERROR: failed to load plugin: %(stage)s:%(name)s:' % locals() + echo_stderr(msg, fg='red') + echo_stderr(traceback.format_exc()) + ctx.exit(2) + + # NOTE: these are mappings of plugin instances, not classes! + pre_scan_plugins = enabled_plugins[pre_scan.stage] + scanner_plugins = enabled_plugins[scan.stage] + post_scan_plugins = enabled_plugins[post_scan.stage] + output_filter_plugins = enabled_plugins[output_filter.stage] + output_plugins = enabled_plugins[output.stage] + + if not scanner_plugins: + msg = ('Missing scan option(s): at least one scan ' + 'option is required.') + raise click.UsageError(msg) + + if not output_plugins: + msg = ('Missing output option(s): at least one output ' + 'option is required to save scan results.') + raise click.UsageError(msg) + + # TODO: check for plugin dependencies and if a plugin is ACTIVE!!! + + ######################################################################## + # 2. setup enabled plugins + ######################################################################## + + setup_timings = OrderedDict() + plugins_setup_start = time() + + if not quiet and not verbose: + echo_stderr('Setup plugins...', fg='green') + + # TODO: add progress indicator + for stage, stage_plugins in enabled_plugins.items(): + for name, plugin in stage_plugins.items(): + plugin_setup_start = time() + if verbose: + echo_stderr(' Setup plugin: %(stage)s:%(name)s...' % locals(), + fg='green') + try: + plugin.setup(**kwargs) + except: + msg = 'ERROR: failed to setup plugin: %(stage)s:%(name)s:' % locals() + echo_stderr(msg, fg='red') + echo_stderr(traceback.format_exc()) + ctx.exit(2) + + timing_key = 'setup_%(stage)s:%(name)s' % locals() + setup_timings[timing_key] = time() - plugin_setup_start + + setup_timings['setup'] = time() - plugins_setup_start + + ######################################################################## + # 2.5. Create a new Resource subclass for this scan + ######################################################################## + # Craft a new Resource class with the attributes contributed by plugins + sortable_attributes = [] + + # mapping of {"plugin stage:name": [list of attribute keys]} + # also available as a kwarg entry for plugin + kwargs['attributes_by_plugin'] = attributes_by_plugin = {} + for stage, stage_plugins in enabled_plugins.items(): + for name, plugin in stage_plugins.items(): + try: + sortable_attributes.append( + (plugin.sort_order, name, plugin.attributes,) + ) + attributes_by_plugin[plugin.qname] = plugin.attributes.keys() + except: + msg = ('ERROR: failed to collect attributes for plugin: ' + '%(stage)s:%(name)s:' % locals()) + echo_stderr(msg, fg='red') + echo_stderr(traceback.format_exc()) + ctx.exit(2) + + attributes = OrderedDict() + for _, name, attribs in sorted(sortable_attributes): + attributes.update(attribs) + + # FIXME: workaround for https://github.com/python-attrs/attrs/issues/339 + # we reset the _CountingAttribute internal .counter to a proper value + # that matches our ordering + for order, attrib in enumerate(attributes.values(), 100): + attrib.counter = order + + if TRACE_DEEP: + logger_debug('scancode:attributes') + for a in attributes.items(): + logger_debug(a) + + resource_class = attr.make_class( + name=b'ScannedResource', attrs=attributes, bases=(Resource,)) + + ######################################################################## + # 3. collect codebase inventory + ######################################################################## + + inventory_start = time() if not quiet: - echo_stderr('Saving results.', fg='green') - - # FIXME: we should have simpler args: a scan "header" and scan results - save_results(scanners, files_count, results, format, options, input, output_file) + echo_stderr('Collect file inventory...', fg='green') + # TODO: add progress indicator + # note: inventory timing collection is built in Codebase initialization + # TODO: this should also collect the basic size/dates + try: + codebase = Codebase( + location=input, + resource_class=resource_class, + full_root=full_root, + strip_root=strip_root, + temp_dir=temp_dir, + max_in_memory=max_in_memory + ) + except: + msg = 'ERROR: failed to collect codebase at: %(input)r' % locals() + echo_stderr(msg, fg='red') + echo_stderr(traceback.format_exc()) + ctx.exit(2) + + # TODO: this is weird: may be the timings should NOt be stored on the + # codebase, since they exist in abstract of it?? + codebase.timings.update(setup_timings) + + codebase.timings['inventory'] = time() - inventory_start + files_count, dirs_count, size_count = codebase.compute_counts() + codebase.summary['initial:files_count'] = files_count + codebase.summary['initial:dirs_count'] = dirs_count + codebase.summary['initial:size_count'] = size_count + + ######################################################################## + # 4. prescan scans: run the early scans required by prescan plugins + ######################################################################## + # FIXME: this stage is extremely convoluted and needs cleaning! + + # resolve pre-scan plugin requirements that require a scan first + early_scan_plugins = pre_scan.PreScanPlugin.get_all_required( + pre_scan_plugins.values(), scanner_plugins) + + success = success and run_scanners(early_scan_plugins , codebase, + processes, timeout, timing, + quiet, verbose, + stage='pre-scan-scan', kwargs=kwargs) + + ######################################################################## + # 5. run prescans + ######################################################################## + + # TODO: add progress indicator + run_plugins(ctx, plugins=pre_scan_plugins, stage='pre-scan', + codebase=codebase, kwargs=kwargs, + quiet=quiet, verbose=verbose, + stage_msg='Run %(stage)ss...', + plugin_msg=' Run %(stage)s: %(name)s...') + + ######################################################################## + # 6. run scans. + ######################################################################## + + # do not rerun scans already done in prescan-scan + scan_plugins = [p for p in scanner_plugins.values() + if p not in early_scan_plugins] + + success = success and run_scanners(scan_plugins, codebase, + processes, timeout, timing, + quiet, verbose, + stage='scan', kwargs=kwargs) + + ######################################################################## + # 7. run postscans + ######################################################################## + + # TODO: add progress indicator + run_plugins(ctx, plugins=post_scan_plugins, stage='post-scan', + codebase=codebase, kwargs=kwargs, + quiet=quiet, verbose=verbose, + stage_msg='Run %(stage)ss...', + plugin_msg=' Run %(stage)s: %(name)s...') + + ######################################################################## + # 8. apply output filters + ######################################################################## + + # TODO: add progress indicator + run_plugins(ctx, plugins=output_filter_plugins, stage='output-filter', + codebase=codebase, kwargs=kwargs, + quiet=quiet, verbose=verbose, + stage_msg='Apply %(stage)ss...', + plugin_msg=' Apply %(stage)s: %(name)s...') + + ######################################################################## + # 9. save outputs + ######################################################################## + + counts = codebase.compute_counts(skip_root=strip_root, skip_filtered=True) + files_count, dirs_count, size_count = counts + + # TODO: cleanup kwargs vs. codebase attrs + codebase.summary['final:files_count'] = files_count + codebase.summary['final:dirs_count'] = dirs_count + codebase.summary['final:size_count'] = size_count + + # WHY this count here? + kwargs['files_count'] = files_count + kwargs['pretty_options'] = get_pretty_params(ctx, generic_paths=test_mode) + kwargs['scancode_notice'] = notice + kwargs['scancode_version'] = scancode_version + + # TODO: add progress indicator + run_plugins(ctx, plugins=output_plugins, stage='output', + codebase=codebase, kwargs=kwargs, + quiet=quiet, verbose=verbose, + stage_msg='Save scan results...', + plugin_msg=' Save scan results as: %(name)s...', + exit_on_fail=False) + + ######################################################################## + # 9. display summary + ######################################################################## + codebase.timings['total'] = time() - processing_start + + # TODO: compute summary for output plugins too?? + if not quiet: + scan_names = ', '.join(s.name for s in scan_plugins) + echo_stderr('Scanning done.', fg='green' if success else 'red') + display_summary(codebase, scan_names, processes, verbose=verbose) finally: - # cleanup - cache = scans_cache_class() - cache.clear() + # cleanup including cache cleanup + if codebase: + codebase.clear() rc = 0 if success else 1 ctx.exit(rc) -def scan(input_path, - scanners, - verbose=False, quiet=False, - processes=1, timeout=DEFAULT_TIMEOUT, - diag=False, - scans_cache_class=None, - strip_root=False, - full_root=False, - pre_scan_plugins=None): +def run_plugins(ctx, stage, plugins, codebase, kwargs, quiet, verbose, + stage_msg='', plugin_msg='', exit_on_fail=True): """ - Return a tuple of (files_count, scan_results, success) where - scan_results is an iterable and success is a boolean. - - Run each requested scan proper: each individual file scan is cached - on disk to free memory. Then the whole set of scans is loaded from - the cache and streamed at the end. + Run the `stage` `plugins` (a mapping of {name: plugin} on `codebase`. + Display errors. + Exit the CLI on failure if `exit_on_fail` is True. """ - assert scans_cache_class - scan_summary = OrderedDict() - scan_summary['scanned_path'] = input_path - scan_summary['processes'] = processes - - # Display scan start details - ############################ - # FIXME: it does not make sense to use tuple and positional values - scans = [k for k, v in scanners.items() if v[0]] - _scans = ', '.join(scans) - if not quiet: - echo_stderr('Scanning files for: %(_scans)s with %(processes)d process(es)...' % locals()) + stage_start = time() + if verbose and plugins: + echo_stderr(stage_msg % locals(), fg='green') - scan_summary['scans'] = scans[:] - scan_start = time() - indexing_time = 0 - # FIXME: It does not make sense to use tuple and positional values - with_licenses, _ = scanners.get('licenses', (False, '')) - if with_licenses: - # build index outside of the main loop for speed - # this also ensures that forked processes will get the index on POSIX naturally - if not quiet: - echo_stderr('Building license detection index...', fg='green', nl=False) - from licensedcode.cache import get_index - get_index(False) - indexing_time = time() - scan_start - if not quiet: - echo_stderr('Done.', fg='green', nl=True) + # TODO: add progress indicator + for name, plugin in plugins.items(): + plugin_start = time() - scan_summary['indexing_time'] = indexing_time + if verbose: + echo_stderr(plugin_msg % locals(), fg='green') - pool = None + try: + if TRACE_DEEP: + from pprint import pformat + logger_debug('run_plugins: kwargs passed to %(stage)s:%(name)s' % locals()) + logger_debug(pformat(sorted(kwargs.items()))) + logger_debug() - resources = resource_paths(input_path, diag, scans_cache_class, pre_scan_plugins=pre_scan_plugins) - paths_with_error = [] - files_count = 0 + plugin.process_codebase(codebase, **kwargs) - logfile_path = scans_cache_class().cache_files_log - if on_linux: - file_logger = partial(open, logfile_path, 'wb') - else: - file_logger = partial(codecs.open, logfile_path, 'w', encoding='utf-8') + except: + msg = 'ERROR: failed to run %(stage)s plugin: %(name)s:' % locals() + echo_stderr(msg, fg='red') + echo_stderr(traceback.format_exc()) + if exit_on_fail: + ctx.exit(2) - with file_logger() as logfile_fd: + timing_key = '%(stage)s:%(name)s' % locals() + codebase.timings[timing_key] = time() - plugin_start - logged_resources = _resource_logger(logfile_fd, resources) + codebase.timings[stage] = time() - stage_start - scanit = partial(_scanit, scanners=scanners, scans_cache_class=scans_cache_class, - diag=diag, timeout=timeout, processes=processes) - max_file_name_len = compute_fn_max_len() - # do not display a file name in progress bar if there is less than 5 chars available. - display_fn = bool(max_file_name_len > 10) - try: - if processes: - # maxtasksperchild helps with recycling processes in case of leaks - pool = get_pool(processes=processes, maxtasksperchild=1000) - # Using chunksize is documented as much more efficient in the Python doc. - # Yet "1" still provides a better and more progressive feedback. - # With imap_unordered, results are returned as soon as ready and out of order. - scanned_files = pool.imap_unordered(scanit, logged_resources, chunksize=1) - pool.close() - else: - # no multiprocessing with processes=0 - scanned_files = imap(scanit, logged_resources) - if not quiet: - echo_stderr('Disabling multi-processing and multi-threading...', fg='yellow') - - if not quiet: - echo_stderr('Scanning files...', fg='green') - - def scan_event(item): - """Progress event displayed each time a file is scanned""" - if quiet or not item or not display_fn: - return '' - _scan_success, _scanned_path = item - _scanned_path = unicode(toascii(_scanned_path)) - if verbose: - _progress_line = _scanned_path - else: - _progress_line = fixed_width_file_name(_scanned_path, max_file_name_len) - return style('Scanned: ') + style(_progress_line, fg=_scan_success and 'green' or 'red') - - scanning_errors = [] - files_count = 0 - with progressmanager( - scanned_files, item_show_func=scan_event, show_pos=True, - verbose=verbose, quiet=quiet, file=sys.stderr) as scanned: - while True: - try: - result = scanned.next() - scan_success, scanned_rel_path = result - if not scan_success: - paths_with_error.append(scanned_rel_path) - files_count += 1 - except StopIteration: - break - except KeyboardInterrupt: - print('\nAborted with Ctrl+C!') - if pool: - pool.terminate() - break - finally: - if pool: - # ensure the pool is really dead to work around a Python 2.7.3 bug: - # http://bugs.python.org/issue15101 - pool.terminate() +def run_scanners(scan_plugins, codebase, processes, timeout, timing, + quiet, verbose, stage, kwargs): + """ + Run the `scan_plugins` list of ScanPlugin on the `codebase`. Return True on + success or False otherwise. + + Display progress and update the codebase with computed counts and scan + results. + """ - # TODO: add stats to results somehow + scan_start = time() + + scanners = [] + scan_sorter = lambda s: (s.sort_order, s.name) + for scanner in sorted(scan_plugins, key=scan_sorter): + func = scanner.get_scanner(**kwargs) + scanners.append(Scanner(name=scanner.name, function=func)) - # Compute stats - ########################## - scan_summary['files_count'] = files_count - scan_summary['files_with_errors'] = paths_with_error - total_time = time() - scan_start - scanning_time = total_time - indexing_time - scan_summary['total_time'] = total_time - scan_summary['scanning_time'] = scanning_time + if TRACE_DEEP: logger_debug('run_scanners: scanners:', scanners) + if not scanners: + return True - files_scanned_per_second = round(float(files_count) / scanning_time , 2) - scan_summary['files_scanned_per_second'] = files_scanned_per_second + scan_names = ', '.join(s.name for s in scanners) + progress_manager = None if not quiet: - # Display stats - ########################## - echo_stderr('Scanning done.', fg=paths_with_error and 'red' or 'green') - if paths_with_error: - if diag: - echo_stderr('Some files failed to scan properly:', fg='red') - # iterate cached results to collect all scan errors - cached_scan = scans_cache_class() - root_dir = _get_root_dir(input_path, strip_root, full_root) - scan_results = cached_scan.iterate(scans, root_dir, paths_subset=paths_with_error) - for scan_result in scan_results: - errored_path = scan_result.get('path', '') - echo_stderr('Path: ' + errored_path, fg='red') - for error in scan_result.get('scan_errors', []): - for emsg in error.splitlines(False): - echo_stderr(' ' + emsg) - echo_stderr('') - else: - echo_stderr('Some files failed to scan properly. Use the --diag option for additional details:', fg='red') - for errored_path in paths_with_error: - echo_stderr(' ' + errored_path, fg='red') + echo_stderr('Scan files for: %(scan_names)s ' + 'with %(processes)d process(es)...' % locals()) + item_show_func = partial(path_progress_message, verbose=verbose) + progress_manager = partial(progressmanager, + item_show_func=item_show_func, + verbose=verbose, file=sys.stderr) - echo_stderr('Scan statistics: %(files_count)d files scanned in %(total_time)ds.' % locals()) - echo_stderr('Scan options: %(_scans)s with %(processes)d process(es).' % locals()) - echo_stderr('Scanning speed: %(files_scanned_per_second)s files per sec.' % locals()) - echo_stderr('Scanning time: %(scanning_time)ds.' % locals()) - echo_stderr('Indexing time: %(indexing_time)ds.' % locals(), reset=True) + # TODO: add CLI option to bypass cache entirely? + scan_success = scan_codebase( + codebase, scanners, processes, timeout, + with_timing=timing, progress_manager=progress_manager) - success = not paths_with_error - # finally return an iterator on cached results - cached_scan = scans_cache_class() - root_dir = _get_root_dir(input_path, strip_root, full_root) - return files_count, cached_scan.iterate(scans, root_dir), success + codebase.timings[stage] = time() - scan_start + scanned_fc, scanned_dc, scanned_sc = codebase.compute_counts() + codebase.summary[stage + ':scanners'] = scan_names + codebase.summary[stage + ':files_count'] = scanned_fc + codebase.summary[stage + ':dirs_count'] = scanned_dc + codebase.summary[stage + ':size_count'] = scanned_sc -def _get_root_dir(input_path, strip_root=False, full_root=False): - """ - Return a root dir name or None. - On Windows, the path uses POSIX (forward slash) separators. + return scan_success + + +def scan_codebase(codebase, scanners, processes=1, timeout=DEFAULT_TIMEOUT, + with_timing=False, progress_manager=None): """ - if strip_root: - return + Run the `scanners` Scanner objects on the `codebase` Codebase. Return True + on success or False otherwise. - scanned_path = os.path.abspath(os.path.normpath(os.path.expanduser(input_path))) - scanned_path = fileutils.as_posixpath(scanned_path) - if filetype.is_dir(scanned_path): - root_dir = scanned_path - else: - root_dir = fileutils.parent_directory(scanned_path) - root_dir = fileutils.as_posixpath(root_dir) + Use multiprocessing with `processes` number of processes. Disable + multiprocessing is processes <=0. Disable threading is processes is < 0 - if full_root: - return root_dir - else: - return fileutils.file_name(root_dir) + Run each scanner function for up to `timeout` seconds and fail it otherwise. + If `with_timing` is True, each Resource is updated with per-scanner + execution time (as a float in seconds). This is added to the `scan_timings` + mapping of each Resource as {scanner.name: execution time}. -def _resource_logger(logfile_fd, resources): + Provide optional progress feedback in the UI using the `progress_manager` + callable that accepts an iterable of tuple of (location, rid, scan_errors, + scan_result ) as argument. """ - Log file path to the logfile_fd opened file descriptor for each resource and - yield back the resources. - """ - file_logger = ScanFileCache.log_file_path - for resource in resources: - file_logger(logfile_fd, resource.rel_path) - yield resource + # FIXME: this path computation is super inefficient tuples of (absolute + # location, resource id) -def _scanit(resource, scanners, scans_cache_class, diag, timeout=DEFAULT_TIMEOUT, processes=1): - """ - Run scans and cache results on disk. Return a tuple of (success, scanned relative - path) where sucess is True on success, False on error. Note that this is really - only a wrapper function used as an execution unit for parallel processing. - """ - success = True - scans_cache = scans_cache_class() + # NOTE: we never scan directories + resources = ((r.location, r.rid) for r in codebase.walk() if r.is_file) - # note: "flag and function" expressions return the function if flag is True - # note: the order of the scans matters to show things in logical order - scanner_functions = map(lambda t : t[0] and t[1], scanners.values()) - scanners = OrderedDict(zip(scanners.keys(), scanner_functions)) + runner = partial(scan_resource, scanners=scanners, + timeout=timeout, with_timing=with_timing, + with_threading=processes >= 0) - if processes: - interrupter = interruptible - else: - # fake, non inteerrupting used for debugging when processes=0 - interrupter = fake_interruptible - - if any(scanner_functions): - # Skip other scans if already cached - # FIXME: ENSURE we only do this for files not directories - if not resource.is_cached: - # run the scan as an interruptiple task - scans_runner = partial(scan_one, resource.abs_path, scanners, diag) - success, scan_result = interrupter(scans_runner, timeout=timeout) - if not success: - # Use scan errors as the scan result for that file on failure this is - # a top-level error not attachedd to a specific scanner, hence the - # "scan" key is used for these errors - scan_result = {'scan_errors': [scan_result]} - - scans_cache.put_scan(resource.rel_path, resource.get_info(), scan_result) - - # do not report success if some other errors happened - if scan_result.get('scan_errors'): + if TRACE: + logger_debug('scan_codebase: scanners:', ', '.join(s.name for s in scanners)) + + get_resource = codebase.get_resource + + success = True + pool = None + scans = None + try: + if processes >= 1: + # maxtasksperchild helps with recycling processes in case of leaks + pool = get_pool(processes=processes, maxtasksperchild=1000) + # Using chunksize is documented as much more efficient in the Python doc. + # Yet "1" still provides a better and more progressive feedback. + # With imap_unordered, results are returned as soon as ready and out of order. + scans = pool.imap_unordered(runner, resources, chunksize=1) + pool.close() + else: + # no multiprocessing with processes=0 or -1 + scans = imap(runner, resources) + + if progress_manager: + scans = progress_manager(scans) + # hack to avoid using a context manager + if hasattr(scans, '__enter__'): + scans.__enter__() + + while True: + try: + location, rid, scan_errors, scan_time, scan_result, scan_timings = scans.next() + + if TRACE_DEEP: + logger_debug( + 'scan_codebase: location:', location, 'results:', scan_result) + + resource = get_resource(rid) + + if not resource: + # this should never happen + msg = ('ERROR: Internal error in scan_codebase: Resource ' + 'at %(rid)r is missing from codebase.\n' + 'Scan result not saved:\n%(scan_result)r.' % locals()) + codebase.errors.append(msg) + success = False + continue + + if scan_errors: + success = False + resource.scan_errors.extend(scan_errors) + + if TRACE: logger_debug('scan_codebase: scan_timings:', scan_timings) + if with_timing and scan_timings: + if scan_timings: + resource.scan_timings.update(scan_timings) + + # NOTE: here we effectively single threaded the saving a + # Resource to the cache! .... not sure this is a good or bad + # thing for scale. Likely not + for key, value in scan_result.items(): + setattr(resource, key, value) + codebase.save_resource(resource) + + except StopIteration: + break + except KeyboardInterrupt: + echo_stderr('\nAborted with Ctrl+C!', fg='red') success = False + if pool: + pool.terminate() + break - return success, resource.rel_path + finally: + if pool: + # ensure the pool is really dead to work around a Python 2.7.3 bug: + # http://bugs.python.org/issue15101 + pool.terminate() + if scans and hasattr(scans, 'render_finish'): + # hack to avoid using a context manager + scans.render_finish() + return success -def build_ignorer(ignores, unignores): + +def scan_resource(location_rid, scanners, timeout=DEFAULT_TIMEOUT, + with_timing=False, with_threading=True): """ - Return a callable suitable for path ignores with OS-specific encoding - preset. + Return a tuple of (location, rid, scan_errors, scan_time, scan_results, timings) + by running the `scanners` Scanner objects for the file or directory resource + with id `rid` at `location` provided as a `location_rid` tuple of (location, + rid) for up to `timeout` seconds. + If `with_threading` is False, threading is disabled. + + The returned tuple has these values (: + - `location` and `rid` are the orginal arguments. + - `scan_errors` is a list of error strings. + - `scan_results` is a mapping of scan results from all scanners. + - `scan_time` is the duration in seconds to run all scans for this resource. + - `timings` is a mapping of scan {scanner.name: execution time in seconds} + tracking the execution duration each each scan individually. + `timings` is empty unless `with_timing` is True. + + All these values MUST be serializable/pickable because of the way multi- + processing/threading works. """ - ignores = ignores or {} - unignores = unignores or {} - if on_linux: - ignores = {path_to_bytes(k): v for k, v in ignores.items()} - unignores = {path_to_bytes(k): v for k, v in unignores.items()} + scan_time = time() + location, rid = location_rid + results = OrderedDict() + scan_errors = [] + timings = OrderedDict() if with_timing else None + + if not with_threading: + interruptor = fake_interruptible else: - ignores = {path_to_unicode(k): v for k, v in ignores.items()} - unignores = {path_to_unicode(k): v for k, v in unignores.items()} - return partial(ignore.is_ignored, ignores=ignores, unignores=unignores) + interruptor = interruptible + # run each scanner in sequence in its own interruptible + for scanner in scanners: + if with_timing: + start = time() + + try: + runner = partial(scanner.function, location) + error, values_mapping = interruptor(runner, timeout=timeout) + if error: + msg = 'ERROR: for scanner: ' + scanner.name + ':\n' + error + scan_errors.append(msg) + # the return value of a scanner fun MUST be a mapping + if values_mapping: + results.update(values_mapping) + + except Exception: + msg = 'ERROR: for scanner: ' + scanner.name + ':\n' + traceback.format_exc() + scan_errors.append(msg) + finally: + if with_timing: + timings[scanner.name] = time() - start -def resource_paths(base_path, diag, scans_cache_class, pre_scan_plugins=None): + scan_time = time() - scan_time + + return location, rid, scan_errors, scan_time, results, timings + + +def display_summary(codebase, scan_names, processes, verbose): """ - Yield `Resource` objects for all the files found at base_path - (either a directory or file) given an absolute base_path. Only yield - Files, not directories. - absolute path is a native OS path. - base_path-relative path is a POSIX path. - - The relative path is guaranted to be unicode and may be URL-encoded and may not - be suitable to address an actual file. + Display a scan summary. """ - if base_path: - if on_linux: - base_path = path_to_bytes(base_path) + initial_files_count = codebase.summary.get('initial:files_count', 0) + initial_dirs_count = codebase.summary.get('initial:dirs_count', 0) + initial_res_count = initial_files_count + initial_dirs_count + initial_size_count = codebase.summary.get('initial:size_count', 0) + if initial_size_count: + initial_size_count = format_size(initial_size_count) + initial_size_count = 'for %(initial:size_count)s' % locals() + else: + initial_size_count = '' + + ###################################################################### + prescan_scan_time = codebase.timings.get('pre-scan-scan', 0.) + + if prescan_scan_time: + prescan_scan_files_count = codebase.summary.get('pre-scan-scan:files_count', 0) + prescan_scan_file_speed = round(float(prescan_scan_files_count) / prescan_scan_time , 2) + + prescan_scan_size_count = codebase.summary.get('pre-scan-scan:size_count', 0) + + if prescan_scan_size_count: + prescan_scan_size_speed = format_size(prescan_scan_size_count / prescan_scan_time) + prescan_scan_size_speed = '%(prescan_scan_size_speed)s/sec.' % locals() + + prescan_scan_size_count = format_size(prescan_scan_size_count) + prescan_scan_size_count = 'for %(prescan_scan_size_count)s' % locals() else: - base_path = path_to_unicode(base_path) - - base_path = os.path.abspath(os.path.normpath(os.path.expanduser(base_path))) - base_is_dir = filetype.is_dir(base_path) - len_base_path = len(base_path) - ignores = {} - if pre_scan_plugins: - for plugin in pre_scan_plugins: - ignores.update(plugin.get_ignores()) - ignores.update(ignore.ignores_VCS) - - ignorer = build_ignorer(ignores, unignores={}) - resources = fileutils.resource_iter(base_path, ignored=ignorer) - - for abs_path in resources: - resource = Resource(scans_cache_class, abs_path, base_is_dir, len_base_path) - # always fetch infos and cache. - resource.put_info(scan_infos(abs_path, diag=diag)) - if pre_scan_plugins: - for plugin in pre_scan_plugins: - resource = plugin.process_resource(resource) - if resource: - yield resource - - -def scan_infos(input_file, diag=False): + prescan_scan_size_count = '' + prescan_scan_size_speed = '' + + ###################################################################### + scan_time = codebase.timings.get('scan', 0.) + + scan_files_count = codebase.summary.get('scan:files_count', 0) + scan_file_speed = round(float(scan_files_count) / scan_time , 2) + + scan_size_count = codebase.summary.get('scan:size_count', 0) + + if scan_size_count: + scan_size_speed = format_size(scan_size_count / scan_time) + scan_size_speed = '%(scan_size_speed)s/sec.' % locals() + + scan_size_count = format_size(scan_size_count) + scan_size_count = 'for %(scan_size_count)s' % locals() + else: + scan_size_count = '' + scan_size_speed = '' + + ###################################################################### + final_files_count = codebase.summary.get('final:files_count', 0) + final_dirs_count = codebase.summary.get('final:dirs_count', 0) + final_res_count = final_files_count + final_dirs_count + final_size_count = codebase.summary.get('final:size_count', 0) + if final_size_count: + final_size_count = format_size(final_size_count) + final_size_count = 'for %(final_size_count)s' % locals() + else: + final_size_count = '' + ###################################################################### + + top_errors = codebase.errors + path_and_errors = [(r.path, r.scan_errors) + for r in codebase.walk() if r.scan_errors] + + has_errors = top_errors or path_and_errors + + errors_count = 0 + if has_errors: + echo_stderr('Some files failed to scan properly:', fg='red') + for error in top_errors: + echo_stderr(error) + errors_count += 1 + + for errored_path, errors in path_and_errors: + echo_stderr('Path: ' + errored_path, fg='red') + if not verbose: + continue + + for error in errors: + for emsg in error.splitlines(False): + echo_stderr(' ' + emsg, fg='red') + errors_count += 1 + + ###################################################################### + + echo_stderr('Summary: %(scan_names)s with %(processes)d process(es)' % locals()) + echo_stderr('Errors count: %(errors_count)d' % locals()) + echo_stderr('Scan Speed: %(scan_file_speed).2f files/sec. %(scan_size_speed)s' % locals()) + if prescan_scan_time: + echo_stderr('Early Scanners Speed: %(prescan_scan_file_speed).2f files/sec. %(prescan_scan_size_speed)s' % locals()) + + echo_stderr('Initial counts: %(initial_res_count)d resource(s): ' + '%(initial_files_count)d file(s) ' + 'and %(initial_dirs_count)d directorie(s) ' + '%(initial_size_count)s' % locals()) + + echo_stderr('Final counts: %(final_res_count)d resource(s): ' + '%(final_files_count)d file(s) ' + 'and %(final_dirs_count)d directorie(s) ' + '%(final_size_count)s' % locals()) + + echo_stderr('Timings:') + for name, value, in codebase.timings.items(): + if value > 0.1: + echo_stderr(' %(name)s: %(value).2fs' % locals()) + + # TODO: if timing was requested display top per-scan/per-file stats? + + +def format_size(size): """ - Scan one file or directory and return file_infos data. This always - contains an extra 'errors' key with a list of error messages, - possibly empty. If `diag` is True, additional diagnostic messages - are included. + Return a human-readable value for the `size` int or float. + + For example: + >>> format_size(0) + u'0 Byte' + >>> format_size(1) + u'1 Byte' + >>> format_size(0.123) + u'0.1 Byte' + >>> format_size(123) + u'123 Bytes' + >>> format_size(1023) + u'1023 Bytes' + >>> format_size(1024) + u'1 KB' + >>> format_size(2567) + u'2.51 KB' + >>> format_size(2567000) + u'2.45 MB' + >>> format_size(1024*1024) + u'1 MB' + >>> format_size(1024*1024*1024) + u'1 GB' + >>> format_size(1024*1024*1024*12.3) + u'12.30 GB' """ - errors = [] - try: - infos = get_file_infos(input_file) - except Exception as e: - # never fail but instead add an error message. - infos = _empty_file_infos() - errors = ['ERROR: infos: ' + e.message] - if diag: - errors.append('ERROR: infos: ' + traceback.format_exc()) - # put errors last - infos['scan_errors'] = errors - return infos - - -def scan_one(location, scanners, diag=False): + if not size: + return '0 Byte' + if size < 1: + return '%(size).1f Byte' % locals() + if size == 1: + return '%(size)d Byte' % locals() + size = float(size) + for symbol in ('Bytes', 'KB', 'MB', 'GB', 'TB'): + if size < 1024: + if int(size) == float(size): + return '%(size)d %(symbol)s' % locals() + return '%(size).2f %(symbol)s' % locals() + size = size / 1024. + return '%(size).2f %(symbol)s' % locals() + + +def get_pretty_params(ctx, generic_paths=False): """ - Scan one file or directory at `location` and return a scan result - mapping, calling every scanner callable in the `scanners` mapping of - (scan name -> scan function). - - The scan result mapping contain a 'scan_errors' key with a list of - error messages. If `diag` is True, 'scan_errors' error messages also - contain detailed diagnostic information such as a traceback if - available. + Return a sorted mapping of {CLI option: pretty value string} for the + `ctx` Click.context, putting arguments first then options: + + {"input": ~/some/path, "--license": True} + + Skip options that are not set or hidden. + + If `generic_paths` is True, click.File and click.Path parameters are made + "generic" replacing their value with a placeholder. This is used mostly for + testing. """ - if on_linux: - location = path_to_bytes(location) - else: - location = path_to_unicode(location) - scan_result = OrderedDict() - scan_errors = [] - for scan_name, scanner in scanners.items(): - if not scanner: + if TRACE: + logger_debug('get_pretty_params: generic_paths', generic_paths) + args = [] + options = [] + + param_values = ctx.params + for param in ctx.command.params: + name = param.name + value = param_values.get(name) + + if param.is_eager: + continue + # This attribute is not yet in Click 6.7 but in head + if getattr(param, 'hidden', False): continue - try: - scan_details = scanner(location) - # consume generators - if isinstance(scan_details, GeneratorType): - scan_details = list(scan_details) - scan_result[scan_name] = scan_details - except TimeoutError: - raise - except Exception as e: - # never fail but instead add an error message and keep an empty scan: - scan_result[scan_name] = [] - messages = ['ERROR: ' + scan_name + ': ' + e.message] - if diag: - messages.append('ERROR: ' + scan_name + ': ' + traceback.format_exc()) - scan_errors.extend(messages) - - # put errors last, after scans proper - scan_result['scan_errors'] = scan_errors - return scan_result - - -def save_results(scanners, files_count, results, format, options, input, output_file): - """ - Save scan results to file or screen. - """ - # note: in tests, sys.stdout is not used, but is instead some io - # wrapper with no name attributes. We use this to check if this is a - # real filesystem file or not. - # note: sys.stdout.name == '' so it has a name. - is_real_file = hasattr(output_file, 'name') - - if output_file != sys.stdout and is_real_file: - # we are writing to a real filesystem file: create directories! - parent_dir = os.path.dirname(output_file.name) - if parent_dir: - fileutils.create_dir(abspath(expanduser(parent_dir))) - - # Write scan results to file or screen as a formatted output ... - # ... using a user-provided custom format template - format_plugins = plugincode.output.get_format_plugins() - if format not in format_plugins: - # format may be a custom template file path - if not os.path.isfile(format): - # this check was done before in the CLI validation, but this - # is done again if the function is used directly - echo_stderr('\nInvalid template: must be a file.', fg='red') + if value == param.default: + continue + if value is None: + continue + if value in (tuple(), [],): + # option with multiple values, the value is a tuple + continue + + if isinstance(param.type, click.Path) and generic_paths: + value = '' + + if isinstance(param.type, click.File): + if generic_paths: + value = '' + else: + # the value cannot be displayed as-is as this may be an opened file- + # like object + vname = getattr(value, 'name', None) + if vname: + value = vname + else: + value = '' + + # coerce to string for non-basic supported types + if not (value in (True, False, None) + or isinstance(value, (str, unicode, bytes, tuple, list, dict, OrderedDict))): + value = repr(value) + + # opts is a list of CLI options as in "--strip-root": the last opt is + # the CLI option long form by convention + cli_opt = param.opts[-1] + + if isinstance(param, click.Argument): + args.append((cli_opt, value)) else: - from formattedcode import format_templated - # FIXME: carrying an echo function does not make sense - format_templated.write_custom( - results, output_file, _echo=echo_stderr, version=version, template_path=format) + options.append((cli_opt, value)) - # ... or using the selected format plugin - else: - writer = format_plugins[format] - # FIXME: carrying an echo function does not make sense - # FIXME: do not use input as a variable name - writer(files_count=files_count, version=version, notice=notice, - scanned_files=results, - options=options, - input=input, output_file=output_file, _echo=echo_stderr) + return OrderedDict(sorted(args) + sorted(options)) diff --git a/src/scancode/cli_test_utils.py b/src/scancode/cli_test_utils.py index ddac798bf6d..11137b01aa3 100644 --- a/src/scancode/cli_test_utils.py +++ b/src/scancode/cli_test_utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -31,80 +31,156 @@ from collections import OrderedDict import json import os + from commoncode.system import on_linux +from scancode_config import scancode_root_dir -def remove_dates(scan_result): +def run_scan_plain(options, cwd=None, test_mode=True, expected_rc=0): """ - Remove date fields from scan. + Run a scan as a plain subprocess. Return rc, stdout, stderr. """ - for scanned_file in scan_result['files']: - if 'date' in scanned_file: - del scanned_file['date'] + from commoncode.command import execute + + if test_mode and '--test-mode' not in options: + options.append('--test-mode') + scmd = b'scancode' if on_linux else 'scancode' + scan_cmd = os.path.join(scancode_root_dir, scmd) + rc, stdout, stderr = execute(scan_cmd, options, cwd=cwd) + + if rc != expected_rc: + opts = get_opts(options) + error = ''' +Failure to run: scancode %(opts)s +stdout: +%(stdout)s + +stderr: +%(stderr)s +''' % locals() + assert rc == expected_rc, error + + return rc, stdout, stderr -def check_json_scan(expected_file, result_file, regen=False, strip_dates=False): + +def run_scan_click(options, monkeypatch=None, test_mode=True, expected_rc=0): """ - Check the scan result_file JSON results against the expected_file expected JSON - results. Removes references to test_dir for the comparison. If regen is True the - expected_file WILL BE overwritten with the results. This is convenient for - updating tests expectations. But use with caution. + Run a scan as a Click-controlled subprocess + If monkeypatch is provided, a tty with a size (80, 43) is mocked. + Return a click.testing.Result object. """ - result = _load_json_result(result_file) - if strip_dates: - remove_dates(result) + import click + from click.testing import CliRunner + from scancode import cli + + if test_mode and '--test-mode' not in options: + options.append('--test-mode') + + if monkeypatch: + monkeypatch.setattr(click._termui_impl, 'isatty', lambda _: True) + monkeypatch.setattr(click , 'get_terminal_size', lambda : (80, 43,)) + runner = CliRunner() + + result = runner.invoke(cli.scancode, options, catch_exceptions=False) + + output = result.output + if result.exit_code != expected_rc: + opts = get_opts(options) + error = ''' +Failure to run: scancode %(opts)s +output: +%(output)s +''' % locals() + assert result.exit_code == expected_rc, error + return result + + +def get_opts(options): + try: + return ' '.join(options) + except: + try: + return b' '.join(options) + except: + return b' '.join(map(repr, options)) + + +def check_json_scan(expected_file, result_file, regen=False, + strip_dates=False, clean_errs=True): + """ + Check the scan result_file JSON results against the expected_file expected + JSON results. Removes references to test_dir for the comparison. If regen is + True the expected_file WILL BE overwritten with the results. This is + convenient for updating tests expectations. But use with caution. + """ + scan_results = load_json_result(result_file, strip_dates, clean_errs) + if regen: with open(expected_file, 'wb') as reg: - json.dump(result, reg, indent=2, separators=(',', ': ')) - expected = _load_json_result(expected_file) - if strip_dates: - remove_dates(expected) + json.dump(scan_results, reg, indent=2, separators=(',', ': ')) + + expected = load_json_result(expected_file, strip_dates, clean_errs) # NOTE we redump the JSON as a string for a more efficient comparison of # failures + # TODO: remove sort, this should no longer be needed expected = json.dumps(expected, indent=2, sort_keys=True, separators=(',', ': ')) - result = json.dumps(result, indent=2, sort_keys=True, separators=(',', ': ')) - assert expected == result + scan_results = json.dumps(scan_results, indent=2, sort_keys=True, separators=(',', ': ')) + assert expected == scan_results -def _load_json_result(result_file): +def load_json_result(result_file, strip_dates=False, clean_errs=True): """ Load the result file as utf-8 JSON Sort the results by location. """ with codecs.open(result_file, encoding='utf-8') as res: - scan_result = json.load(res, object_pairs_hook=OrderedDict) + scan_results = json.load(res, object_pairs_hook=OrderedDict) + + if strip_dates: + remove_dates(scan_results) + + if clean_errs: + clean_errors(scan_results) - if scan_result.get('scancode_version'): - del scan_result['scancode_version'] + if scan_results.get('scancode_version'): + del scan_results['scancode_version'] - scan_result['files'].sort(key=lambda x: x['path']) - return scan_result + # TODO: remove sort, this should no longer be needed + scan_results['files'].sort(key=lambda x: x['path']) + return scan_results -def run_scan_plain(options, cwd=None): +def remove_dates(scan_result): """ - Run a scan as a plain subprocess. Return rc, stdout, stderr. + Remove date fields from scan. """ - import scancode - scmd = b'scancode' if on_linux else 'scancode' - from commoncode.command import execute - scan_cmd = os.path.join(scancode.root_dir, scmd) - return execute(scan_cmd, options, cwd=cwd) + for scanned_file in scan_result['files']: + scanned_file.pop('date', None) -def run_scan_click(options, monkeypatch=None, catch_exceptions=False): +def clean_errors(scan_results): """ - Run a scan as a Click-controlled subprocess - If monkeypatch is provided, a tty with a size (80, 43) is mocked. - Return a click.testing.Result object. + Clean error fields from scan by keeping only the first and last line + (removing the stack traces). """ - import click - from click.testing import CliRunner - from scancode import cli - if monkeypatch: - monkeypatch.setattr(click._termui_impl, 'isatty', lambda _: True) - monkeypatch.setattr(click , 'get_terminal_size', lambda : (80, 43,)) - runner = CliRunner() - return runner.invoke(cli.scancode, options, catch_exceptions=catch_exceptions) + def clean(_errors): + """Modify the __errors list in place""" + for _i, _error in enumerate(_errors[:]): + _error_split = _error.splitlines(True) + if len(_error_split) <= 1: + continue + # keep first and last line + _clean_error = ''.join([_error_split[0] + _error_split[-1]]) + _errors[_i] = _clean_error + + top_level = scan_results.get('scan_errors') + if top_level: + clean(top_level) + + for result in scan_results['files']: + file_level = result.get('scan_errors') + if file_level: + clean(file_level) diff --git a/src/scancode/extract_cli.py b/src/scancode/extract_cli.py index d0dc23edccf..92887218e0e 100644 --- a/src/scancode/extract_cli.py +++ b/src/scancode/extract_cli.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -33,27 +33,22 @@ click.disable_unicode_literals_warning = True from commoncode import fileutils -from commoncode.fileutils import path_to_unicode from commoncode import filetype -from commoncode.system import on_linux from commoncode.text import toascii +from scancode_config import __version__ from scancode.api import extract_archives -from scancode.cli import print_about -from scancode.cli import version +from scancode import print_about from scancode import utils - # Python 2 and 3 support try: # Python 2 unicode - str = unicode + str = unicode # NOQA except NameError: # Python 3 - unicode = str - - + unicode = str # NOQA echo_stderr = partial(click.secho, err=True) @@ -61,7 +56,7 @@ def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return - echo_stderr('ScanCode extractcode version ' + version) + echo_stderr('ScanCode extractcode version ' + __version__) ctx.exit() @@ -105,8 +100,7 @@ class ExtractCommand(utils.BaseCommand): @click.help_option('-h', '--help') @click.option('--about', is_flag=True, is_eager=True, callback=print_about, help='Show information about ScanCode and licensing and exit.') @click.option('--version', is_flag=True, is_eager=True, callback=print_version, help='Show the version and exit.') - -def extractcode(ctx, input, verbose, quiet, shallow, *args, **kwargs): # @ReservedAssignment +def extractcode(ctx, input, verbose, quiet, shallow, *args, **kwargs): # NOQA """extract archives and compressed files found in the file or directory tree. Use this command before scanning proper as an preparation step. @@ -130,7 +124,7 @@ def extract_event(item): if verbose: if item.done: return '' - line = source and utils.get_relative_path(path=source, len_base_path=len_base_path, base_is_dir=base_is_dir) or '' + line = source and get_relative_path(path=source, len_base_path=len_base_path, base_is_dir=base_is_dir) or '' else: line = source and fileutils.file_name(source) or '' if not isinstance(line, unicode): @@ -150,7 +144,7 @@ def display_extract_summary(): source = fileutils.as_posixpath(xev.source) if not isinstance(source, unicode): source = toascii(source, translit=True).decode('utf-8', 'replace') - source = utils.get_relative_path(path=source, len_base_path=len_base_path, base_is_dir=base_is_dir) + source = get_relative_path(path=source, len_base_path=len_base_path, base_is_dir=base_is_dir) for e in xev.errors: echo_stderr('ERROR extracting: %(source)s: %(e)s' % locals(), fg='red') for warn in xev.warnings: @@ -164,25 +158,45 @@ def display_extract_summary(): echo_stderr('Extracting done.', fg=summary_color, reset=True) - # use for relative paths computation len_base_path = len(abs_location) base_is_dir = filetype.is_dir(abs_location) extract_results = [] has_extract_errors = False + extractibles = extract_archives(abs_location, recurse=not shallow) + if not quiet: echo_stderr('Extracting archives...', fg='green') + with utils.progressmanager(extractibles, + item_show_func=extract_event, verbose=verbose) as extraction_events: - with utils.progressmanager(extract_archives(abs_location, recurse=not shallow), item_show_func=extract_event, - verbose=verbose, quiet=quiet) as extraction_events: - for xev in extraction_events: + for xev in extraction_events: + if xev.done and (xev.warnings or xev.errors): + has_extract_errors = has_extract_errors or xev.errors + extract_results.append(xev) + + display_extract_summary() + else: + for xev in extractibles: if xev.done and (xev.warnings or xev.errors): has_extract_errors = has_extract_errors or xev.errors extract_results.append(xev) - if not quiet: - display_extract_summary() - rc = 1 if has_extract_errors else 0 ctx.exit(rc) + + +def get_relative_path(path, len_base_path, base_is_dir): + """ + Return a posix relative path from the posix 'path' relative to a + base path of `len_base_path` length where the base is a directory if + `base_is_dir` True or a file otherwise. + """ + path = fileutils.fsdecode(path) + if base_is_dir: + rel_path = path[len_base_path:] + else: + rel_path = fileutils.file_name(path) + + return rel_path.lstrip('/') diff --git a/src/scancode/interrupt.py b/src/scancode/interrupt.py index c67b0c2d30e..913d0859e76 100644 --- a/src/scancode/interrupt.py +++ b/src/scancode/interrupt.py @@ -11,42 +11,46 @@ # specific language governing permissions and limitations under the License. # -from __future__ import print_function from __future__ import absolute_import +from __future__ import print_function from __future__ import unicode_literals -from commoncode.system import on_windows - -DEFAULT_TIMEOUT = 120 # seconds +from traceback import format_exc as traceback_format_exc +from commoncode.system import on_windows """ -This modules povides an interruptible() function to run a callable and -stop it after a timeout with a windows and POSIX implementation. +This modules povides an interruptible() function to run a callable and stop it +after a timeout with a windows and POSIX implementation. -Call `func` function with `args` and `kwargs` arguments and return a -tuple of (success, return value). `func` is invoked through an OS- -specific wrapper and will be interrupted if it does not return within -`timeout` seconds. +interruptible() calls the `func` function with `args` and `kwargs` arguments and +return a tuple of (error, value). `func` is invoked through an OS- specific +wrapper and will be interrupted if it does not return within `timeout` seconds. `func` returned results must be pickable. `timeout` in seconds defaults to DEFAULT_TIMEOUT. - `args` and `kwargs` are passed to `func` as *args and **kwargs. -In the returned tuple of (success, value), success is True or False. If -success is True, the call was successful and the second item in the -tuple is the returned value of `func`. +In the returned tuple of (`error`, `value`), `error` is an error string or None. +The error message is verbose with a full traceback. +`value` is the returned value of `func` or None. -If success is False, the call did not complete within `timeout` -seconds and was interrupted. In this case, the second item in the -tuple is an error message string. +If `error` is not None, the call did not complete within `timeout` +seconds and was interrupted. In this case, the returned `value` is None. """ + class TimeoutError(Exception): pass +DEFAULT_TIMEOUT = 120 # seconds + +TIMEOUT_MSG = 'ERROR: Processing interrupted: timeout after %(timeout)d seconds.' +ERROR_MSG = 'ERROR: Unknown error:\n' +NO_ERROR = None +NO_VALUE = None + if not on_windows: """ Some code based in part and inspired from the RobotFramework and @@ -68,7 +72,10 @@ class TimeoutError(Exception): permissions and limitations under the License. """ - import signal + from signal import ITIMER_REAL + from signal import SIGALRM + from signal import setitimer + from signal import signal as create_signal def interruptible(func, args=None, kwargs=None, timeout=DEFAULT_TIMEOUT): """ @@ -79,19 +86,18 @@ def handler(signum, frame): raise TimeoutError try: - signal.signal(signal.SIGALRM, handler) - signal.setitimer(signal.ITIMER_REAL, timeout) - return True, func(*(args or ()), **(kwargs or {})) + create_signal(SIGALRM, handler) + setitimer(ITIMER_REAL, timeout) + return NO_ERROR, func(*(args or ()), **(kwargs or {})) + except TimeoutError: - return False, ('ERROR: Processing interrupted: timeout after ' - '%(timeout)d seconds.' % locals()) + return TIMEOUT_MSG % locals(), NO_VALUE except Exception: - import traceback - return False, ('ERROR: Unknown error:\n' + traceback.format_exc()) + return ERROR_MSG + traceback_format_exc(), NO_VALUE finally: - signal.setitimer(signal.ITIMER_REAL, 0) + setitimer(ITIMER_REAL, 0) else: """ @@ -101,44 +107,54 @@ def handler(signum, frame): But not code has been reused from this post. """ - import ctypes - import multiprocessing - import Queue + from ctypes import c_long + from ctypes import py_object + from ctypes import pythonapi + from multiprocessing import TimeoutError as MpTimeoutError + from Queue import Empty as Queue_Empty + from Queue import Queue try: - import thread + from thread import start_new_thread except ImportError: - import _thread as thread - + from _thread import start_new_thread def interruptible(func, args=None, kwargs=None, timeout=DEFAULT_TIMEOUT): """ Windows, threads-based interruptible runner. It can work also on POSIX, but is not reliable and works only if everything is pickable. """ - # We run `func` in a thread and run a loop until timeout - results = Queue.Queue() + # We run `func` in a thread and block on a queue until timeout + results = Queue() def runner(): - results.put(func(*(args or ()), **(kwargs or {}))) + try: + _res = func(*(args or ()), **(kwargs or {})) + results.put((NO_ERROR, _res,)) + except Exception: + results.put((ERROR_MSG + traceback_format_exc(), NO_VALUE,)) - tid = thread.start_new_thread(runner, ()) + tid = start_new_thread(runner, ()) try: - res = results.get(timeout=timeout) - return True, res - except (Queue.Empty, multiprocessing.TimeoutError): - return False, ('ERROR: Processing interrupted: timeout after ' - '%(timeout)d seconds.' % locals()) + err_res = results.get(timeout=timeout) + + if not err_res: + return ERROR_MSG, NO_VALUE + + return err_res + + except (Queue_Empty, MpTimeoutError): + return TIMEOUT_MSG % locals(), NO_VALUE + except Exception: - import traceback - return False, ('ERROR: Unknown error:\n' + traceback.format_exc()) + return ERROR_MSG + traceback_format_exc(), NO_VALUE + finally: try: async_raise(tid, Exception) except (SystemExit, ValueError): pass - def async_raise(tid, exctype=Exception): """ Raise an Exception in the Thread with id `tid`. Perform cleanup if @@ -150,15 +166,15 @@ def async_raise(tid, exctype=Exception): """ assert isinstance(tid, int), 'Invalid thread id: must an integer' - tid = ctypes.c_long(tid) - exception = ctypes.py_object(Exception) - res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, exception) + tid = c_long(tid) + exception = py_object(Exception) + res = pythonapi.PyThreadState_SetAsyncExc(tid, exception) if res == 0: raise ValueError('Invalid thread id.') elif res != 1: # if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect - ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0) + pythonapi.PyThreadState_SetAsyncExc(tid, 0) raise SystemError('PyThreadState_SetAsyncExc failed.') @@ -170,7 +186,6 @@ def fake_interruptible(func, args=None, kwargs=None, timeout=DEFAULT_TIMEOUT): """ try: - return True, func(*(args or ()), **(kwargs or {})) + return NO_ERROR, func(*(args or ()), **(kwargs or {})) except Exception: - import traceback - return False, ('ERROR: Unknown error:\n' + traceback.format_exc()) + return ERROR_MSG + traceback_format_exc(), NO_VALUE diff --git a/src/formattedcode/format_jsonlines.py b/src/scancode/plugin_copyright.py similarity index 59% rename from src/formattedcode/format_jsonlines.py rename to src/scancode/plugin_copyright.py index e79b572acdc..d69edccc9c1 100644 --- a/src/formattedcode/format_jsonlines.py +++ b/src/scancode/plugin_copyright.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,38 +23,39 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import +from __future__ import division +from __future__ import print_function from __future__ import unicode_literals -from collections import OrderedDict +import attr -import simplejson +from plugincode.scan import ScanPlugin +from plugincode.scan import scan_impl +from scancode import CommandLineOption +from scancode import SCAN_GROUP -from plugincode.output import scan_output_writer +@scan_impl +class CopyrightScanner(ScanPlugin): + """ + Scan a Resource for copyrights. + """ -""" -Output plugins to write scan results as JSON Lines. -""" + attributes = dict(copyrights=attr.ib(default=attr.Factory(list))) + sort_order = 4 -@scan_output_writer -def write_jsonlines(files_count, version, notice, scanned_files, options, output_file, *args, **kwargs): - """ - Write scan output formatted as JSON Lines. - """ - header = dict(header=OrderedDict([ - ('scancode_notice', notice), - ('scancode_version', version), - ('scancode_options', options), - ('files_count', files_count) - ])) - - kwargs = dict(iterable_as_array=True, encoding='utf-8', separators=(',', ':',)) - - output_file.write(simplejson.dumps(header, **kwargs)) - output_file.write('\n') - - for scanned_file in scanned_files: - scanned_file_line = {'files': [scanned_file]} - output_file.write(simplejson.dumps(scanned_file_line, **kwargs)) - output_file.write('\n') + options = [ + CommandLineOption(('-c', '--copyright',), + is_flag=True, default=False, + help='Scan for copyrights.', + help_group=SCAN_GROUP, + sort_order=50), + ] + + def is_enabled(self, copyright, **kwargs): # NOQA + return copyright + + def get_scanner(self, **kwargs): + from scancode.api import get_copyrights + return get_copyrights diff --git a/tests/scancode/test_scan_cache.py b/src/scancode/plugin_email.py similarity index 63% rename from tests/scancode/test_scan_cache.py rename to src/scancode/plugin_email.py index f934d550914..9dfea3c8ec0 100644 --- a/tests/scancode/test_scan_cache.py +++ b/src/scancode/plugin_email.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,29 +23,37 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals -import os +import attr + +from plugincode.scan import ScanPlugin +from plugincode.scan import scan_impl +from scancode import CommandLineOption +from scancode import OTHER_SCAN_GROUP -from commoncode.testcase import FileBasedTesting -from scancode.cache import ScanFileCache +@scan_impl +class EmailScanner(ScanPlugin): + """ + Scan a Resource for emails. + """ + attributes = dict(emails=attr.ib(default=attr.Factory(list))) + sort_order = 8 -class TestCache(FileBasedTesting): - test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + options = [ + CommandLineOption(('-e', '--email',), + is_flag=True, default=False, + help='Scan for emails.', + help_group=OTHER_SCAN_GROUP) + ] - def test_can_cache(self): - test_file = self.get_test_loc('cache/package/package.json') - from scancode import api - package = api.get_package_infos(test_file) - file_info = dict(sha1='def') + def is_enabled(self, email, **kwargs): + return email - test_dir = self.get_temp_dir() - cache = ScanFileCache(test_dir) - cache.put_info(path='abc', file_info=file_info) - cache.put_scan(path='abc', file_info=file_info, scan_result=package) - assert file_info == cache.get_info(path='abc') - assert package == cache.get_scan(path='abc', file_info=file_info) + def get_scanner(self, **kwargs): + from scancode.api import get_emails + return get_emails diff --git a/src/scancode/plugin_ignore.py b/src/scancode/plugin_ignore.py index fec8de91704..ecc7126a8de 100644 --- a/src/scancode/plugin_ignore.py +++ b/src/scancode/plugin_ignore.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -25,19 +25,58 @@ from __future__ import absolute_import from __future__ import unicode_literals +from functools import partial + +from commoncode.fileset import match from plugincode.pre_scan import PreScanPlugin from plugincode.pre_scan import pre_scan_impl +from scancode import CommandLineOption +from scancode import PRE_SCAN_GROUP @pre_scan_impl class ProcessIgnore(PreScanPlugin): """ - Ignore files matching . + Ignore files matching the supplied pattern. """ - option_attrs = dict(multiple=True, metavar='') - def __init__(self, user_input): - super(ProcessIgnore, self).__init__(user_input) + options = [ + CommandLineOption(('--ignore',), + multiple=True, + metavar='', + help='Ignore files matching .', + help_group=PRE_SCAN_GROUP) + ] + + def is_enabled(self, ignore, **kwargs): + return ignore + + def process_codebase(self, codebase, ignore=(), **kwargs): + """ + Remove ignored Resources from the resource tree. + """ + + if not ignore: + return + + ignores = { + pattern: 'User ignore: Supplied by --ignore' for pattern in ignore + } - def get_ignores(self): - return {pattern: 'User ignore: Supplied by --ignore' for pattern in self.user_input} + ignorable = partial(is_ignored, ignores=ignores) + + remove_resource = codebase.remove_resource + # first walk top down the codebase and collect ignored resource ids + for resource in codebase.walk(topdown=True): + if ignorable(resource.path): + for child in resource.children(codebase): + remove_resource(child) + remove_resource(resource) + + +def is_ignored(location, ignores): + """ + Return a tuple of (pattern , message) if a file at location is ignored or + False otherwise. `ignores` is a mappings of patterns to a reason. + """ + return match(location, includes=ignores, excludes={}) diff --git a/src/scancode/plugin_info.py b/src/scancode/plugin_info.py new file mode 100644 index 00000000000..ce461d04ddb --- /dev/null +++ b/src/scancode/plugin_info.py @@ -0,0 +1,76 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from collections import OrderedDict + +import attr + +from plugincode.scan import ScanPlugin +from plugincode.scan import scan_impl +from scancode import CommandLineOption +from scancode import OTHER_SCAN_GROUP + + +@scan_impl +class InfoScanner(ScanPlugin): + """ + Scan a file Resource for miscellaneous information such as mime/filetype and + basic checksums. + """ + attributes = OrderedDict([ + ('date', attr.ib(default=None)), + ('sha1', attr.ib(default=None)), + ('md5', attr.ib(default=None)), + ('mime_type', attr.ib(default=None)), + ('file_type', attr.ib(default=None)), + ('programming_language', attr.ib(default=None)), + ('is_binary', attr.ib(default=False, type=bool)), + ('is_text', attr.ib(default=False, type=bool)), + ('is_archive', attr.ib(default=False, type=bool)), + ('is_media', attr.ib(default=False, type=bool)), + ('is_source', attr.ib(default=False, type=bool)), + ('is_script', attr.ib(default=False, type=bool)), + ]) + + sort_order = 0 + + options = [ + CommandLineOption(('-i', '--info'), + is_flag=True, default=False, + help='Scan for file information (size, type, checksums, etc).', + help_group=OTHER_SCAN_GROUP, sort_order=10 + ) + ] + + def is_enabled(self, info, **kwargs): + return info + + def get_scanner(self, **kwargs): + from scancode.api import get_file_info + return get_file_info diff --git a/src/scancode/plugin_license.py b/src/scancode/plugin_license.py new file mode 100644 index 00000000000..83da619e1a8 --- /dev/null +++ b/src/scancode/plugin_license.py @@ -0,0 +1,127 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from functools import partial + +import attr + +from plugincode.scan import ScanPlugin +from plugincode.scan import scan_impl +from scancode import CommandLineOption +from scancode import MISC_GROUP +from scancode import SCAN_OPTIONS_GROUP +from scancode import SCAN_GROUP +from scancode.api import DEJACODE_LICENSE_URL + + +def reindex_licenses(ctx, param, value): + if not value or ctx.resilient_parsing: + return + + # TODO: check for temp file configuration and use that for the cache!!! + from licensedcode.cache import get_cached_index + import click + click.echo('Checking and rebuilding the license index...') + get_cached_index(check_consistency=True,) + click.echo('Done.') + ctx.exit(0) + + +@scan_impl +class LicenseScanner(ScanPlugin): + """ + Scan a Resource for licenses. + """ + + attributes = dict(licenses=attr.ib(default=attr.Factory(list))) + + sort_order = 2 + + options = [ + CommandLineOption(('-l', '--license'), + is_flag=True, + help='Scan for licenses.', + help_group=SCAN_GROUP, + sort_order=10), + + CommandLineOption(('--license-score',), + type=int, default=0, show_default=True, + requires=['license'], + help='Do not return license matches with a score lower than this score. ' + 'A number between 0 and 100.', + help_group=SCAN_OPTIONS_GROUP), + + CommandLineOption(('--license-text',), + is_flag=True, + requires=['license'], + help='Include the detected licenses matched text.', + help_group=SCAN_OPTIONS_GROUP), + + CommandLineOption(('--license-url-template',), + default=DEJACODE_LICENSE_URL, show_default=True, + requires=['license'], + help='Set the template URL used for the license reference URLs. ' + 'Curly braces ({}) are replaced by the license key.', + help_group=SCAN_OPTIONS_GROUP), + + CommandLineOption(('--license-diag',), + is_flag=True, + requires=['license'], + help='Include diagnostic information in license scan results.', + help_group=SCAN_OPTIONS_GROUP), + + CommandLineOption( + ('--reindex-licenses',), + is_flag=True, is_eager=True, + callback=reindex_licenses, + help='Check the license index cache and reindex if needed and exit.', + help_group=MISC_GROUP) + ] + + def is_enabled(self, license, **kwargs): # NOQA + return license + + def setup(self, cache_dir, **kwargs): + """ + This is a cache warmup such that child process inherit from this. + """ + from scancode_config import SCANCODE_DEV_MODE + from licensedcode.cache import get_index + get_index(cache_dir, check_consistency=SCANCODE_DEV_MODE, + return_value=False) + + def get_scanner(self, license_score=0, license_text=False, + license_url_template=DEJACODE_LICENSE_URL, + license_diag=False, cache_dir=None, **kwargs): + + from scancode.api import get_licenses + return partial(get_licenses, min_score=license_score, + include_text=license_text, diag=license_diag, + license_url_template=license_url_template, + cache_dir=cache_dir) diff --git a/src/scancode/plugin_mark_source.py b/src/scancode/plugin_mark_source.py index 6fefc52012b..f1810793ea8 100644 --- a/src/scancode/plugin_mark_source.py +++ b/src/scancode/plugin_mark_source.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -24,53 +24,75 @@ from __future__ import absolute_import from __future__ import division +from __future__ import print_function from __future__ import unicode_literals -from os import path +import attr +from plugincode.post_scan import PostScanPlugin from plugincode.post_scan import post_scan_impl +from scancode import CommandLineOption +from scancode import POST_SCAN_GROUP @post_scan_impl -def process_mark_source(active_scans, results): +class MarkSource(PostScanPlugin): """ Set the "is_source" flag to true for directories that contain over 90% of source files as direct children. Has no effect unless the --info scan is requested. """ - # FIXME: this is forcing all the scan results to be loaded in memory - # and defeats lazy loading from cache - results = list(results) - - # FIXME: we should test for active scans instead, but "info" may not - # be present for now. check if the first item has a file info. - has_file_info = 'type' in results[0] - - if not has_file_info: - # just yield results untouched - for scanned_file in results: - yield scanned_file - return - - # FIXME: this is an nested loop, looping twice on results - # TODO: this may not recusrively roll up the is_source flag, as we - # may not iterate bottom up. - for scanned_file in results: - if scanned_file['type'] == 'directory' and scanned_file['files_count'] > 0: - source_files_count = 0 - for scanned_file2 in results: - if path.dirname(scanned_file2['path']) == scanned_file['path']: - if scanned_file2['is_source']: - source_files_count += 1 - mark_source(source_files_count, scanned_file) - yield scanned_file - - -def mark_source(source_files_count, scanned_file): + attributes = dict(source_count=attr.ib(default=0, type=int)) + + sort_order = 8 + + options = [ + CommandLineOption(('--mark-source',), + is_flag=True, default=False, + requires=['info'], + help='Set the "is_source" to true for directories that contain ' + 'over 90% of source files as children and descendants. ' + 'Count the number of source files in a directory as a new source_file_counts attribute', + help_group=POST_SCAN_GROUP) + ] + + def is_enabled(self, mark_source, info, **kwargs): + return mark_source and info + + def process_codebase(self, codebase, mark_source, **kwargs): + """ + Set the `is_source` to True in directories if they contain over 90% of + source code files at full depth. + """ + if not mark_source: + return + + for resource in codebase.walk(topdown=False): + if resource.is_file: + continue + + children = resource.children(codebase) + if not children: + continue + + src_count = sum(1 for c in children + if c.is_file and c.is_source) + + src_count += sum(c.source_count for c in children + if not c.is_file) + + is_source = is_source_directory(src_count, resource.files_count) + + if src_count and is_source: + resource.is_source = is_source + resource.source_count = src_count + codebase.save_resource(resource) + + +def is_source_directory(src_count, files_count): """ - Set `is_source` to True for a `scanned_file` directory if - `source_files_count` is >=90% of files_count for this directory. + Return True is this resource is a source directory with at least over 90% of + source code files at full depth. """ - if source_files_count / scanned_file['files_count'] >= 0.9: - scanned_file['is_source'] = True + return src_count / files_count >= 0.9 diff --git a/src/scancode/plugin_only_findings.py b/src/scancode/plugin_only_findings.py index db155543e1d..9e8d2233b8c 100644 --- a/src/scancode/plugin_only_findings.py +++ b/src/scancode/plugin_only_findings.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -25,32 +25,52 @@ from __future__ import absolute_import from __future__ import unicode_literals -from plugincode.post_scan import post_scan_impl +from plugincode.output_filter import OutputFilterPlugin +from plugincode.output_filter import output_filter_impl +from scancode import CommandLineOption +from scancode import OUTPUT_FILTER_GROUP -@post_scan_impl -def process_only_findings(active_scans, results): +@output_filter_impl +class OnlyFindings(OutputFilterPlugin): """ - Only return files or directories with findings for the requested - scans. Files and directories without findings are omitted (not - considering basic file information as findings). + Filter files or directories without scan findings for the requested scans. """ - # FIXME: this is forcing all the scan results to be loaded in memory - # and defeats lazy loading from cache. Only a different caching - # (e.g. DB) could work here. - # FIXME: We should instead use a generator or use a filter function - # that pass to the scan results loader iterator - for scanned_file in results: - if has_findings(active_scans, scanned_file): - yield scanned_file + options = [ + CommandLineOption(('--only-findings',), is_flag=True, + help='Only return files or directories with findings for the ' + 'requested scans. Files and directories without findings are ' + 'omitted (file information is not treated as findings).', + help_group=OUTPUT_FILTER_GROUP) + ] + def is_enabled(self, only_findings, **kwargs): + return only_findings -def has_findings(active_scans, scanned_file): + def process_codebase(self, codebase, attributes_by_plugin, **kwargs): + """ + Set Resource.is_filtered to True for resources from the codebase that do + not have findings e.g. if they have no scan data (cinfo) and no + errors. + """ + attributes_with_findings = set(['scan_errors']) + for plugin_qname, keys in attributes_by_plugin.items(): + if plugin_qname == 'scan:info': + # skip info attributes + continue + attributes_with_findings.update(keys) + + for resource in codebase.walk(): + if has_findings(resource, attributes_with_findings): + continue + resource.is_filtered = True + codebase.save_resource(resource) + + +def has_findings(resource, attributes_with_findings): """ - Return True if the `scanned_file` has findings for any of the - `active_scans` names list (excluding basic file information) - or any errors occured when scanning the file. + Return True if this resource has findings. """ - findings = active_scans + ['scan_errors'] - return any(scanned_file.get(scan_name) for scan_name in findings) + attribs = (getattr(resource, key, None) for key in attributes_with_findings) + return bool(any(attribs)) diff --git a/tests/scancode/test_mark_source.py b/src/scancode/plugin_package.py similarity index 60% rename from tests/scancode/test_mark_source.py rename to src/scancode/plugin_package.py index 03dff2460ca..2784ccca956 100644 --- a/tests/scancode/test_mark_source.py +++ b/src/scancode/plugin_package.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,22 +23,39 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import +from __future__ import division +from __future__ import print_function from __future__ import unicode_literals -from unittest import TestCase -from scancode.plugin_mark_source import mark_source +import attr +from plugincode.scan import ScanPlugin +from plugincode.scan import scan_impl +from scancode import CommandLineOption +from scancode import SCAN_GROUP -class TestMarkSource(TestCase): - def test_mark_source_above_threshold(self): - test_dict = dict(files_count=10, is_source=False) - test_source_file_count = 9 - mark_source(test_source_file_count, test_dict) - assert test_dict['is_source'] +@scan_impl +class PackageScanner(ScanPlugin): + """ + Scan a Resource for Package manifests. + """ - def test_mark_source_below_threshold(self): - test_dict = dict(files_count=10, is_source=False) - test_source_file_count = 5 - mark_source(test_source_file_count, test_dict) - assert not test_dict['is_source'] + attributes = dict(packages=attr.ib(default=attr.Factory(list))) + + sort_order = 6 + + options = [ + CommandLineOption(('-p', '--package',), + is_flag=True, default=False, + help='Scan for packages.', + help_group=SCAN_GROUP, + sort_order=20), + ] + + def is_enabled(self, package, **kwargs): + return package + + def get_scanner(self, **kwargs): + from scancode.api import get_package_info + return get_package_info diff --git a/tests/scancode/test_has_findings.py b/src/scancode/plugin_url.py similarity index 62% rename from tests/scancode/test_has_findings.py rename to src/scancode/plugin_url.py index be0bd00646f..7e18ef9e687 100644 --- a/tests/scancode/test_has_findings.py +++ b/src/scancode/plugin_url.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,27 +23,38 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import +from __future__ import division +from __future__ import print_function from __future__ import unicode_literals -from unittest import TestCase -from scancode.plugin_only_findings import has_findings +import attr +from plugincode.scan import ScanPlugin +from plugincode.scan import scan_impl +from scancode import CommandLineOption +from scancode import OTHER_SCAN_GROUP -class TestHasFindings(TestCase): - def test_has_findings(self): - scanned_file = {'licenses': ['MIT']} - active_scans = ['licenses'] +@scan_impl +class UrlScanner(ScanPlugin): + """ + Scan a Resource for URLs. + """ - assert has_findings(active_scans, scanned_file) + attributes = dict(urls=attr.ib(default=attr.Factory(list))) - def test_has_findings_includes_errors(self): - active_scans = [] - scanned_file = { - 'scan_errors': [ - 'ERROR: Processing interrupted: timeout after 10 seconds.' - ] - } + sort_order = 10 - assert has_findings(active_scans, scanned_file) + options = [ + CommandLineOption(('-u', '--url',), + is_flag=True, default=False, + help='Scan for urls.', + help_group=OTHER_SCAN_GROUP) + ] + def is_enabled(self, url, **kwargs): + return url + + def get_scanner(self, **kwargs): + from scancode.api import get_urls + return get_urls diff --git a/src/scancode/pool.py b/src/scancode/pool.py index 6fdc4f71d0a..ba3875e3f92 100644 --- a/src/scancode/pool.py +++ b/src/scancode/pool.py @@ -9,7 +9,6 @@ Apply proper monkeypatch to work around some bugs or limitations. """ - """ Monkeypatch Pool iterators so that Ctrl-C interrupts everything properly derived from https://gist.github.com/aljungberg/626518 @@ -39,15 +38,19 @@ from multiprocessing import pool + def wrapped(func): # ensure that we do not double wrap if func.func_name != 'wrap': + def wrap(self, timeout=None): return func(self, timeout=timeout or 1e10) + return wrap else: return func + pool.IMapIterator.next = wrapped(pool.IMapIterator.next) pool.IMapIterator.__next__ = pool.IMapIterator.next pool.IMapUnorderedIterator.next = wrapped(pool.IMapUnorderedIterator.next) diff --git a/src/scancode/resource.py b/src/scancode/resource.py new file mode 100644 index 00000000000..68be015faa9 --- /dev/null +++ b/src/scancode/resource.py @@ -0,0 +1,1017 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import codecs +from collections import deque +from collections import OrderedDict +from functools import partial +import json +import os +from os import walk as os_walk +from os.path import abspath +from os.path import exists +from os.path import expanduser +from os.path import join +from os.path import normpath +import posixpath +import traceback +import sys + +import attr +from intbitset import intbitset + +from scancode_config import scancode_temp_dir + +from commoncode.filetype import is_file as filetype_is_file +from commoncode.filetype import is_special +from commoncode.fileutils import POSIX_PATH_SEP +from commoncode.fileutils import WIN_PATH_SEP +from commoncode.fileutils import as_posixpath +from commoncode.fileutils import create_dir +from commoncode.fileutils import delete +from commoncode.fileutils import file_name +from commoncode.fileutils import fsdecode +from commoncode.fileutils import fsencode +from commoncode.fileutils import parent_directory +from commoncode.fileutils import splitext_name +from commoncode import ignore +from commoncode.system import on_linux + +# Python 2 and 3 support +try: + # Python 2 + unicode + str_orig = str + bytes = str # NOQA + str = unicode # NOQA +except NameError: + # Python 3 + unicode = str # NOQA + +""" +This module provides Codebase and Resource objects as an abstraction for files +and directories used throughout ScanCode. ScanCode deals with a lot of these as +they are the basic unit of processing. + +A Codebase is a tree of Resource. A Resource represents a file or directory and +holds essential file information as attributes. At runtime, scan data is added +as attributes to a Resource. Resource are kept in memory or saved on disk. + +This module handles all the details of walking files, path handling and caching. +""" + +# Tracing flags +TRACE = False +TRACE_DEEP = False + + +def logger_debug(*args): + pass + + +if TRACE or TRACE_DEEP: + import logging + + logger = logging.getLogger(__name__) + # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) + logging.basicConfig(stream=sys.stdout) + logger.setLevel(logging.DEBUG) + + def logger_debug(*args): + return logger.debug( + ' '.join(isinstance(a, unicode) and a or repr(a) for a in args)) + + +class ResourceNotInCache(Exception): + pass + + +class UnknownResource(Exception): + pass + + +class Codebase(object): + """ + Represent a codebase being scanned. A Codebase is a tree of Resources. + """ + + def __init__(self, location, resource_class=None, + full_root=False, strip_root=False, + temp_dir=scancode_temp_dir, + max_in_memory=10000): + """ + Initialize a new codebase rooted at the `location` existing file or + directory. + + `resource_class` is a Resource sub-class configured to accept plugin- + provided scan attributes. + + `strip_root` and `full_root`: boolean flags: these controls the values + of the path attribute of the codebase Resources. These are mutually + exclusive. + If `strip_root` is True, strip the first `path` segment of a Resource + unless the codebase contains a single root Resource. + If `full_root` is True the path is an an absolute path. + + `temp_dir` is the base temporary directory to use to cache resources on + disk and other temporary files. + + `max_in_memory` is the maximum number of Resource instances to keep in + memory. Beyond this number, Resource are saved on disk instead. -1 means + no memory is used and 0 means unlimited memory is used. + """ + self.original_location = location + self.full_root = full_root + self.strip_root = strip_root + + # Resourse sub-class to use. Configured with plugin attributes attached. + self.resource_class = resource_class or Resource + + # dir used for caching and other temp files + self.temp_dir = temp_dir + + # maximmum number of Resource objects kept in memory cached in this + # Codebase. When the number of in-memory Resources exceed this number, + # the next Resource instances are saved to disk instead and re-loaded + # from disk when used/needed. + self.max_in_memory = max_in_memory + + # setup location + ######################################################################## + if on_linux: + location = fsencode(location) + else: + location = fsdecode(location) + + location = abspath(normpath(expanduser(location))) + location = location.rstrip(POSIX_PATH_SEP).rstrip(WIN_PATH_SEP) + + # TODO: we should also accept to create "virtual" codebase without a + # backing filesystem location + assert exists(location) + # FIXME: what if is_special(location)??? + self.location = location + self.is_file = filetype_is_file(location) + + # setup Resources + ######################################################################## + # root resource, never cached on disk + self.root = None + + # set index of existing resource ids ints, initially allocated with + # 10000 positions (this will grow as needed) + self.resource_ids = intbitset(10000) + + # True if this codebase root is a file or an empty directory. + self.has_single_resource = bool(self.is_file or not os.listdir(location)) + + # setup caching + ######################################################################## + # map of {rid: resource} for resources that are kept in memory + self.resources = {} + # use only memory + self.all_in_memory = max_in_memory == 0 + # use only disk + self.all_on_disk = max_in_memory == -1 + # dir where the on-disk cache is stored + self.cache_dir = None + if not self.all_in_memory: + # this is unique to this codebase instance + self.cache_dir = get_codebase_cache_dir(temp_dir=temp_dir) + + # setup extra misc attributes + ######################################################################## + # mapping of scan summary data and statistics at the codebase level such + # as ScanCode version, notice, command options, etc. + # This is populated automatically. + self.summary = OrderedDict() + + # mapping of timings for scan stage as {stage: time in seconds as float} + # This is populated automatically. + self.timings = OrderedDict() + + # list of errors from collecting the codebase details (such as + # unreadable file, etc). + self.errors = [] + + # finally walk the location and populate + ######################################################################## + self._populate() + + def _get_next_rid(self): + """ + Return the next available resource id. + """ + return len(self.resource_ids) + + def _get_resource_cache_location(self, rid, create=False): + """ + Return the location where to get/put a Resource in the cache given a + Resource `rid`. Create the directories if requested. + """ + if not self.cache_dir: + return + resid = (b'%08x'if on_linux else '%08x') % rid + cache_sub_dir, cache_file_name = resid[-2:], resid + parent = join(self.cache_dir, cache_sub_dir) + if create and not exists(parent): + create_dir(parent) + return join(parent, cache_file_name) + + # TODO: add populate progress manager!!! + def _populate(self): + """ + Populate this codebase with Resource objects. + + Population is done by walking its `location` topdown, breadth-first, + first creating first file then directory Resources both sorted in case- + insensitive name order. + + Special files, links and VCS files are ignored. + """ + + def err(_error): + """os.walk error handler""" + self.errors.append( + ('ERROR: cannot populate codeasbe: %(_error)r\n' % _error) + + traceback.format_exc()) + + def skip_ignored(_loc): + """Always ignore VCS and some special filetypes.""" + ignored = partial(ignore.is_ignored, ignores=ignore.ignores_VCS) + + if TRACE_DEEP: + logger_debug() + logger_debug('Codebase.populate: walk: ignored loc:', _loc, + 'ignored:', ignored(_loc), + 'is_special:', is_special(_loc)) + + return is_special(_loc) or ignored(_loc) + + def create_resources(_seq, _top, _parent, _is_file): + """Create Resources of parent from a seq of files or directories.""" + _seq.sort(key=lambda p: (p.lower(), p)) + for name in _seq: + location = join(_top, name) + if skip_ignored(location): + continue + res = self.create_resource(name, parent=_parent, is_file=_is_file) + if not _is_file: + # on the plain, bare FS, files cannot be parents + parent_by_loc[location] = res + if TRACE: logger_debug('Codebase.populate:', res) + + root = self.create_root_resource() + if TRACE: logger_debug('Codebase.populate: root:', root) + + if self.has_single_resource: + # there is nothing else to do for a single file or a single + # childless directory + return + + # track resources parents by location during construction. + # NOTE: this cannot exhaust memory on a large codebase, because we do + # not keep parents already walked and we walk topdown. + parent_by_loc = {root.location: root} + + # walk proper + for top, dirs, files in os_walk(root.location, topdown=True, onerror=err): + if skip_ignored(top): + continue + # the parent reference is needed only once in a top-doan walk, hence + # the pop + parent = parent_by_loc.pop(top) + create_resources(files, top, parent, _is_file=True) + create_resources(dirs, top, parent, _is_file=False) + + def create_root_resource(self): + """ + Create and return the root Resource of this codebase. + """ + # we cannot recreate a root if it exists!! + if self.root: + raise TypeError('Root resource already exists and cannot be recreated') + + location = self.location + name = file_name(location) + + # do not strip root for codebase with a single Resource. + if self.strip_root: + if self.has_single_resource: + path = fsdecode(name) + else: + # NOTE: this may seem weird but the root path will be an empty + # string for a codebase root with strip_root=True if not + # single_resource + path = '' + else: + path = get_path(location, location, full_root=self.full_root, + strip_root=self.strip_root) + if TRACE: + logger_debug(' Codebase.create_root_resource:', path) + logger_debug() + + root = self.resource_class(name=name, location=location, path=path, + rid=0, pid=None, is_file=self.is_file) + + self.resource_ids.add(0) + self.resources[0] = root + self.root = root + return root + + def create_resource(self, name, parent, is_file=False): + """ + Create and return a new Resource in this codebase with `name` as a child + of the `parent` Resource. + `name` is always in native OS-preferred encoding (e.g. byte on Linux, + unicode elsewhere). + """ + if parent is None: + raise TypeError('Cannot create resource without parent.') + + rid = self._get_next_rid() + + if self._use_disk_cache_for_resource(rid): + cache_location = self._get_resource_cache_location(rid, create=True) + else: + cache_location = None + + location = join(parent.location, name) + path = posixpath.join(parent.path, fsdecode(name)) + if TRACE: + logger_debug(' Codebase.create_resource: parent.path:', parent.path, 'path:', path) + + child = self.resource_class( + name=name, + location=location, + path=path, + cache_location=cache_location, + rid=rid, + pid=parent.rid, + is_file=is_file + ) + + self.resource_ids.add(rid) + parent.children_rids.append(rid) + # TODO: fixme, this is not great to save also the parent :| + self.save_resource(parent) + self.save_resource(child) + return child + + def exists(self, resource): + """ + Return True if the Resource with `rid` exists in the codebase. + """ + return resource.rid in self.resource_ids + + def _use_disk_cache_for_resource(self, rid): + """ + Return True if Resource `rid` should be cached on-disk or False if it + should be cached in-memory. + """ + if TRACE: + msg = [' Codebase._use_disk_cache_for_resource:, rid:', rid, 'mode:'] + if rid == 0: + msg.append('root') + elif self.all_on_disk: + msg.append('all_on_disk') + elif self.all_in_memory: + msg.append('all_in_memory') + else: + msg.extend(['mixed:', 'self.max_in_memory:', self.max_in_memory]) + if rid < self.max_in_memory: + msg.append('from memory') + else: + msg.append('from disk') + logger_debug(*msg) + + if rid == 0: + return False + elif self.all_on_disk: + return True + elif self.all_in_memory: + return False + # mixed case where some are in memory and some on disk + elif rid < self.max_in_memory: + return False + else: + return True + + def _exists_in_memory(self, rid): + """ + Return True if Resource `rid` exists in the codebase memory cache. + """ + return rid in self.resources + + def _exists_on_disk(self, rid): + """ + Return True if Resource `rid` exists in the codebase disk cache. + """ + cache_location = self._get_resource_cache_location(rid) + if cache_location: + return exists(cache_location) + + def get_resource(self, rid): + """ + Return the Resource with `rid` or None if it does not exists. + """ + if TRACE: + msg = [' Codebase.get_resource:', 'rid:', rid] + if rid == 0: + msg.append('root') + elif not rid or rid not in self.resource_ids: + msg.append('not in resources!') + elif self._use_disk_cache_for_resource(rid): + msg.extend(['from disk', 'exists:', self._exists_on_disk(rid)]) + else: + msg.extend(['from memory', 'exists:', self._exists_in_memory(rid)]) + logger_debug(*msg) + + if rid == 0: + res = self.root + elif not rid or rid not in self.resource_ids: + res = None + if self._use_disk_cache_for_resource(rid): + res = self._load_resource(rid) + else: + res = self.resources.get(rid) + + if TRACE: + logger_debug(' Resource:', res) + return res + + def save_resource(self, resource): + """ + Save the `resource` Resource to cache (in memory or disk). + """ + if TRACE: + msg = [' Codebase.save_resource:', resource] + rid = resource.rid + if resource.is_root: + msg.append('root') + elif rid not in self.resource_ids: + msg.append('missing resource') + elif self._use_disk_cache_for_resource(rid): + msg.extend(['to disk:', 'exists:', self._exists_on_disk(rid)]) + else: + msg.extend(['to memory:', 'exists:', self._exists_in_memory(rid)]) + logger_debug(*msg) + + if not resource: + return + + rid = resource.rid + if rid not in self.resource_ids: + raise UnknownResource('Not part of codebase: %(resource)r' % resource) + + if resource.is_root: + # this can possibly damage things badly + self.root = resource + + if self._use_disk_cache_for_resource(rid): + self._dump_resource(resource) + else: + self.resources[rid] = resource + + def _dump_resource(self, resource): + """ + Dump a Resource to the disk cache. + """ + cache_location = resource.cache_location + + if not cache_location: + raise TypeError('Resource cannot be dumped to disk and is used only' + 'in memory: %(resource)r' % resource) + + # TODO: consider messagepack or protobuf for compact/faster processing? + with codecs.open(cache_location , 'wb', encoding='utf-8') as cached: + json.dump(resource.serialize(), cached, check_circular=False) + + # TODO: consider adding a small LRU cache in frint of this for perf? + def _load_resource(self, rid): + """ + Return a Resource with `rid` loaded from the disk cache. + """ + cache_location = self._get_resource_cache_location(rid, create=False) + + if TRACE: + logger_debug(' Codebase._load_resource: exists:', exists(cache_location), 'cache_location:', cache_location) + + if not exists(cache_location): + raise ResourceNotInCache( + 'Failed to load Resource: %(rid)d from %(cache_location)r' % locals()) + + # TODO: consider messagepack or protobuf for compact/faster processing + with codecs.open(cache_location, 'r', encoding='utf-8') as cached: + data = json.load(cached, object_pairs_hook=OrderedDict) + return self.resource_class(**data) + + def _remove_resource(self, resource): + """ + Remove the `resource` Resource object from the resource tree. + Does not remove children. + """ + if resource.is_root: + raise TypeError('Cannot remove the root resource from ' + 'codebase:', repr(resource)) + rid = resource.rid + # remove from index. + self.resource_ids.discard(rid) + # remove from in-memory cache. The disk cache is cleared on exit. + self.resources.pop(rid, None) + if TRACE: + logger_debug('Codebase._remove_resource:', resource) + + def remove_resource(self, resource): + """ + Remove the `resource` Resource object and all its children from the + resource tree. Return a set of removed Resource ids. + """ + if TRACE: + logger_debug('Codebase.remove_resource') + logger_debug(' resource', resource) + + if resource.is_root: + raise TypeError('Cannot remove the root resource from ' + 'codebase:', repr(resource)) + + removed_rids = set () + + # remove all descendants bottom up to avoid out-of-order access to + # removed resources + for descendant in resource.walk(self, topdown=False): + self._remove_resource(descendant) + removed_rids.add(descendant.rid) + + # remove resource from parent + parent = resource.parent(self) + if TRACE: logger_debug(' parent', parent) + parent.children_rids.remove(resource.rid) + + # remove resource proper + self._remove_resource(resource) + removed_rids.add(resource.rid) + + return removed_rids + + def walk(self, topdown=True, skip_root=False): + """ + Yield all resources for this Codebase walking its resource tree. + Walk the tree top-down, depth-first if `topdown` is True, otherwise walk + bottom-up. + + Each level is sorted by children sort order (e.g. without-children, then + with-children and each group by case-insensitive name) + + If `skip_root` is True, the root resource is not returned unless this is + a codebase with a single resource. + """ + root = self.root + # include root if no children (e.g. codebase with a single resource) + if skip_root and not root.has_children(): + skip_root = False + + if topdown and not skip_root: + yield root + + for res in root.walk(self, topdown): + yield res + + if not topdown and not skip_root: + yield root + + def walk_filtered(self, topdown=True, skip_root=False): + """ + Walk this Codebase as with walk() but doe not return Resources with + `is_filtered` flag set to True. + """ + for resource in self.walk(topdown, skip_root): + if resource.is_filtered: + continue + yield resource + + def compute_counts(self, skip_root=False, skip_filtered=False): + """ + Compute and update the counts of every resource. + Return a tuple of top level counters (files_count, dirs_count, + size_count) for this codebase. + + The counts are computed differently based on these falsg: + - If `skip_root` is True, the root resource is not included in counts. + - If `skip_filtered` is True, resources with `is_filtered` set to True + are not included in counts. + """ + self.update_counts(skip_filtered=skip_filtered) + + root = self.root + files_count = root.files_count + dirs_count = root.dirs_count + size_count = root.size_count + + if (skip_root and not root.is_file) or (skip_filtered and root.is_filtered): + return files_count, dirs_count, size_count + + if root.is_file: + files_count += 1 + else: + dirs_count += 1 + size_count += root.size + + return files_count, dirs_count, size_count + + def update_counts(self, skip_filtered=False): + """ + Update files_count, dirs_count and size_count attributes of each + Resource in this codebase based on the current state. + + If `skip_filtered` is True, resources with `is_filtered` set to True are + not included in counts. + """ + # note: we walk bottom up to update things in the proper order + # and the walk MUST NOT skip filtered, only the compute + for resource in self.walk(topdown=False): + resource._compute_children_counts(self, skip_filtered) + + def clear(self): + """ + Purge the codebase cache(s). + """ + delete(self.cache_dir) + + +def to_native_path(path): + """ + Return `path` using the preferred OS encoding (bytes on Linux, + Unicode elsewhere) given a unicode or bytes path string. + """ + if not path: + return path + if on_linux: + return fsencode(path) + else: + return fsdecode(path) + + +def to_decoded_posix_path(path): + """ + Return `path` as a Unicode POSIX path given a unicode or bytes path string. + """ + return fsdecode(as_posixpath(path)) + + +@attr.attributes +class Resource(object): + """ + A resource represent a file or directory with essential "file information" + and the scanned data details. + + A Resource is a tree that models the fileystem tree structure. + + In order to support lightweight and smaller objects that can be serialized + and deserialized (such as pickled in multiprocessing) without pulling in a + whole object tree, a Resource does not store its related objects directly: + the Codebase it belongs to, its parent Resource and its Resource children + objects are stored only as integer ids. Querying the Resource relationships + and walking the Resources tree requires to lookup the corresponding object + by id in the codebase object. + """ + # the file or directory name in the OS preferred representation (either + # bytes on Linux and Unicode elsewhere) + name = attr.attrib(converter=to_native_path) + + # the file or directory absolute location in the OS preferred representation + # (either bytes on Linux and Unicode elsewhere) using the OS native path + # separators. + location = attr.attrib(converter=to_native_path, repr=False) + + # the file or directory POSIX path decoded as unicode using the filesystem + # encoding. This is the path that will be reported in output and can be + # either one of these: + # - if the codebase was created with strip_root==True, this is a path + # relative to the root, stripped from its root segment unless the codebase + # contains a single file. + # - if the codebase was created with full_root==True, this is an absolute + # path + path = attr.attrib(converter=to_decoded_posix_path) + + # resource id as an integer + # the root of a Resource tree has a pid==0 by convention + rid = attr.ib() + + # parent resource id of this resource as an integer + # the root of a Resource tree has a pid==None by convention + pid = attr.ib() + + # location of the file where this resource can be chached on disk in the OS + # preferred representation (either bytes on Linux and Unicode elsewhere) + cache_location = attr.attrib(default=None, converter=to_native_path, repr=False) + + # True for file, False for directory + is_file = attr.ib(default=False) + + # True if this Resource should be filtered out, e.g. skipped from the + # returned list of resources + is_filtered = attr.ib(default=False) + + # a list of rids + children_rids = attr.ib(default=attr.Factory(list), repr=TRACE) + + # external data to serialize + size = attr.ib(default=0, type=int, repr=TRACE) + + # These attributes are re/computed for directories and files with children + # they represent are the for the full descendants of a Resource + size_count = attr.ib(default=0, type=int, repr=False) + files_count = attr.ib(default=0, type=int, repr=False) + dirs_count = attr.ib(default=0, type=int, repr=False) + + # list of scan error strinsg + scan_errors = attr.ib(default=attr.Factory(list), repr=False) + + # Duration in seconds as float to run all scans for this resource + scan_time = attr.ib(default=0, repr=False) + + # mapping of timings for each scan as {scan_key: duration in seconds as a float} + scan_timings = attr.ib(default=attr.Factory(OrderedDict), repr=False) + + @property + def is_root(self): + return self.rid == 0 + + @property + def type(self): + return 'file' if self.is_file else 'directory' + + @property + def base_name(self): + base_name, _extension = splitext_name(self.name, is_file=self.is_file) + return base_name + + @property + def extension(self): + _base_name, extension = splitext_name(self.name, is_file=self.is_file) + return extension + + @classmethod + def get(cls, codebase, rid): + """ + Return the Resource with `rid` in `codebase` or None if it does not + exists. + """ + return codebase.get_resource(rid) + + def save(self, codebase): + """ + Save this resource in `codebase` (in memory or disk). + """ + return codebase.save_resource(self) + + def remove(self, codebase): + """ + Remove this resource and all its children from the codebase. + Return a set of removed Resource ids. + """ + return codebase.remove_resource(self) + + def create_child(self, codebase, name, is_file=False): + """ + Create and return a new child Resource of this resource in `codebase` + with `name`. `name` is always in native OS-preferred encoding (e.g. byte + on Linux, unicode elsewhere). + """ + return codebase.create_resource(name, self, is_file) + + def _compute_children_counts(self, codebase, skip_filtered=False): + """ + Compute counts and update self with these counts from direct children. + Return a tuple of counters (files_count, dirs_count, size_count) for the + direct children of this Resource. + + If `skip_filtered` is True, skip resources with the `is_filtered` flag + set to True. + + Note: because certain files such as archives can have children, they may + have a files and dirs counts. The size of a directory is aggregated size + of its files (including the count of files inside archives). + """ + files_count = dirs_count = size_count = 0 + for child in self.children(codebase): + files_count += child.files_count + dirs_count += child.dirs_count + size_count += child.size_count + + if skip_filtered and child.is_filtered: + continue + + if child.is_file: + files_count += 1 + else: + dirs_count += 1 + size_count += child.size + + self.files_count = files_count + self.dirs_count = dirs_count + self.size_count = size_count + return files_count, dirs_count, size_count + + def walk(self, codebase, topdown=True,): + """ + Yield all descendant Resources of this Resource. Does not include self. + + Walk the tree top-down, depth-first if `topdown` is True, otherwise walk + bottom-up. + + Each level is sorted by children sort order (e.g. without-children, then + with-children and each group by case-insensitive name) + """ + + for child in self.children(codebase): + if topdown: + yield child + for subchild in child.walk(codebase, topdown): + yield subchild + if not topdown: + yield child + + def has_children(self): + """ + Return True is this Resource has children. + """ + return bool(self.children_rids) + + def children(self, codebase): + """ + Return a sorted sequence of direct children Resource objects for this Resource + or an empty sequence. + Sorting is by resources without children, then resource with children + (e.g. directories or files with children), then case-insentive name. + """ + _sorter = lambda r: (r.has_children(), r.name.lower(), r.name) + get_resource = codebase.get_resource + return sorted((get_resource(rid) for rid in self.children_rids), key=_sorter) + + def has_parent(self): + """ + Return True is this Resource has children. + """ + return not self.is_root + + def parent(self, codebase): + """ + Return the parent Resource object for this Resource or None. + """ + return codebase.get_resource(self.pid) + + def has_siblings(self, codebase): + """ + Return True is this Resource has siblings. + """ + return self.has_parent() and self.parent(codebase).has_children() + + def siblings(self, codebase): + """ + Return a sequence of sibling Resource objects for this Resource + or an empty sequence. + """ + if self.has_parent(): + return self.parent(codebase).children(codebase) + return [] + + def ancestors(self, codebase): + """ + Return a sequence of ancestor Resource objects from self to root + (includes self). + """ + if self.is_root: + return [self] + + ancestors = deque() + current = self + # walk up the tree parent tree up to the root + while not current.is_root: + ancestors.appendleft(current) + current = codebase.get_resource(current.pid) + # append root too + ancestors.appendleft(current) + return list(ancestors) + + def to_dict(self, with_timing=False, with_info=False): + """ + Return a mapping of representing this Resource and its scans. + """ + res = OrderedDict() + res['path'] = self.path + + if with_info: + res['type'] = self.type + res['name'] = fsdecode(self.name) + res['base_name'] = fsdecode(self.base_name) + res['extension'] = fsdecode(self.extension) + res['size'] = self.size + + self_fields_filter = attr.filters.exclude(*attr.fields(Resource)) + + other_data = attr.asdict( + self, filter=self_fields_filter, dict_factory=OrderedDict) + + res.update(other_data) + + if with_timing: + res['scan_time'] = self.scan_time or 0 + res['scan_timings'] = self.scan_timings or {} + + if with_info: + res['files_count'] = self.files_count + res['dirs_count'] = self.dirs_count + res['size_count'] = self.size_count + + res['scan_errors'] = self.scan_errors + if TRACE: + logger_debug('Resource.to_dict:', res) + return res + + def serialize(self): + """ + Return a mapping of representing this Resource and its scans in a form + that is fully serializable and can be used to reconstruct a Resource. + All path-derived OS-native strings are decoded to Unicode for JSON + serialization. + """ + saveable = attr.asdict(self, dict_factory=OrderedDict) + saveable['name'] = fsdecode(self.name) + saveable['location'] = fsdecode(self.location) + if self.cache_location: + saveable['cache_location'] = fsdecode(self.cache_location) + return saveable + + +def get_path(root_location, location, full_root=False, strip_root=False): + """ + Return a Unicode POSIX `path` (using "/" separators) derived from + `root_location` and `location` (both absolute native locations + respectively the root location of the codebase and to the Resource). + + - If `full_root` is True, return an absolute path. Otherwise return a + relative path where the first segment is the root name. + + - If `strip_root` is True, return a relative path without the first root + segment. Ignored if `full_root` is True. + """ + + posix_loc = fsdecode(as_posixpath(location)) + if full_root: + return posix_loc + + if not strip_root: + # keep the root directory name by default + root_loc = parent_directory(root_location) + else: + root_loc = root_location + + posix_root_loc = fsdecode(as_posixpath(root_loc)).rstrip('/') + '/' + + return posix_loc.replace(posix_root_loc, '', 1) + + +def get_codebase_cache_dir(temp_dir=scancode_temp_dir): + """ + Return a new, created and unique per-run cache storage directory path rooted + at the `temp_dir` base temp directory in the OS-preferred representation + (either bytes on Linux and Unicode elsewhere). + """ + from commoncode.fileutils import get_temp_dir + from commoncode.timeutils import time2tstamp + + prefix = 'scancode-scans-' + time2tstamp() + '-' + cache_dir = get_temp_dir(base_dir=temp_dir, prefix=prefix) + if on_linux: + cache_dir = fsencode(cache_dir) + else: + cache_dir = fsdecode(cache_dir) + return cache_dir diff --git a/src/scancode/utils.py b/src/scancode/utils.py index 4af4170c473..c513ef5e3c6 100644 --- a/src/scancode/utils.py +++ b/src/scancode/utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,29 +23,34 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals import click +click.disable_unicode_literals_warning = True from click.utils import echo +from click.termui import style from click._termui_impl import ProgressBar -from commoncode import fileutils -from commoncode.fileutils import path_to_unicode - - -""" -Various CLI UI utilities, many related to Click and progress reporting. -""" +from commoncode.fileutils import file_name +from commoncode.fileutils import splitext +from commoncode.text import toascii # Python 2 and 3 support try: # Python 2 unicode + str_orig = str + bytes = str # NOQA + str = unicode # NOQA except NameError: # Python 3 - unicode = str + unicode = str # NOQA + +""" +Command line UI utilities for help and and progress reporting. +""" class BaseCommand(click.Command): @@ -78,20 +83,12 @@ class EnhancedProgressBar(ProgressBar): """ Enhanced progressbar ensuring that nothing is displayed when the bar is hidden. """ + def render_progress(self): if not self.is_hidden: return super(EnhancedProgressBar, self).render_progress() -class NoOpProgressBar(EnhancedProgressBar): - """ - A ProgressBar-like object that does not show any progress. - """ - def __init__(self, *args, **kwargs): - super(NoOpProgressBar, self).__init__(*args, **kwargs) - self.is_hidden = True - - class ProgressLogger(ProgressBar): """ A subclass of Click ProgressBar providing a verbose line-by-line progress @@ -106,6 +103,7 @@ class ProgressLogger(ProgressBar): If no item_show_func is provided a simple dot is printed for each event. """ + def __init__(self, *args, **kwargs): super(ProgressLogger, self).__init__(*args, **kwargs) self.is_hidden = False @@ -134,89 +132,76 @@ def render_finish(self): BAR_SEP = ' ' BAR_SEP_LEN = len(BAR_SEP) + def progressmanager(iterable=None, length=None, label=None, show_eta=True, - show_percent=None, show_pos=False, item_show_func=None, + show_percent=None, show_pos=True, item_show_func=None, fill_char='#', empty_char='-', bar_template=None, - info_sep=BAR_SEP, width=BAR_WIDTH, file=None, color=None, - verbose=False, quiet=False): + info_sep=BAR_SEP, width=BAR_WIDTH, file=None, color=None, # NOQA + verbose=False): - """This function creates an iterable context manager showing progress as a - bar (default) or line-by-line log (if verbose is True) while iterating. + """ + Return an iterable context manager showing progress as a progress bar + (default) or item-by-item log (if verbose is True) while iterating. - Its arguments are similar to Click.termui.progressbar with - these new arguments added at the end of the signature: + Its arguments are similar to Click.termui.progressbar with these new + arguments added at the end of the signature: - :param verbose: if False, display a progress bar, otherwise a progress log - :param quiet: If True, do not display any progress message. + :param verbose: if True, display a progress log. Otherwise, a progress bar. """ - if quiet: - progress_class = NoOpProgressBar - elif verbose: + if verbose: progress_class = ProgressLogger else: progress_class = EnhancedProgressBar bar_template = ('[%(bar)s]' + BAR_SEP + '%(info)s' if bar_template is None else bar_template) - return progress_class(iterable=iterable, length=length, show_eta=show_eta, - show_percent=show_percent, show_pos=show_pos, - item_show_func=item_show_func, fill_char=fill_char, - empty_char=empty_char, bar_template=bar_template, - info_sep=info_sep, file=file, label=label, - width=width, color=color) - - -def get_relative_path(path, len_base_path, base_is_dir): - """ - Return a posix relative path from the posix 'path' relative to a - base path of `len_base_path` length where the base is a directory if - `base_is_dir` True or a file otherwise. - """ - path = path_to_unicode(path) - if base_is_dir: - rel_path = path[len_base_path:] - else: - rel_path = fileutils.file_name(path) - - return rel_path.lstrip('/') + return progress_class(iterable=iterable, length=length, + show_eta=show_eta, show_percent=show_percent, show_pos=show_pos, + item_show_func=item_show_func, fill_char=fill_char, + empty_char=empty_char, bar_template=bar_template, info_sep=info_sep, + file=file, label=label, width=width, color=color) def fixed_width_file_name(path, max_length=25): """ - Return a fixed width file name of at most `max_length` characters - extracted from the `path` string and usable for fixed width display. - If the file_name is longer than `max_length`, it is truncated in the - middle with using three dots "..." as an ellipsis and the extension - is kept. + Return a fixed width file name of at most `max_length` characters computed + from the `path` string and usable for fixed width display. If the `path` + file name is longer than `max_length`, the file name is truncated in the + middle using three dots "..." as an ellipsis and the ext is kept. For example: - >>> short = fixed_width_file_name('0123456789012345678901234.c') - >>> assert '0123456789...5678901234.c' == short + >>> fwfn = fixed_width_file_name('0123456789012345678901234.c') + >>> assert '0123456789...5678901234.c' == fwfn + >>> fwfn = fixed_width_file_name('some/path/0123456789012345678901234.c') + >>> assert '0123456789...5678901234.c' == fwfn + >>> fwfn = fixed_width_file_name('some/sort.c') + >>> assert 'sort.c' == fwfn + >>> fwfn = fixed_width_file_name('some/123456', max_length=5) + >>> assert '' == fwfn """ if not path: return '' # get the path as unicode for display! - path = path_to_unicode(path) - filename = fileutils.file_name(path) + filename = file_name(path) if len(filename) <= max_length: return filename - base_name, extension = fileutils.splitext(filename) - number_of_dots = 3 - len_extension = len(extension) - remaining_length = max_length - len_extension - number_of_dots + base_name, ext = splitext(filename) + dots = 3 + len_ext = len(ext) + remaining_length = max_length - len_ext - dots - if remaining_length < (len_extension + number_of_dots) or remaining_length < 5: + if remaining_length < 5 or remaining_length < (len_ext + dots): return '' prefix_and_suffix_length = abs(remaining_length // 2) prefix = base_name[:prefix_and_suffix_length] - ellipsis = number_of_dots * '.' + ellipsis = dots * '.' suffix = base_name[-prefix_and_suffix_length:] - return '{prefix}{ellipsis}{suffix}{extension}'.format(**locals()) + return '{prefix}{ellipsis}{suffix}{ext}'.format(**locals()) -def compute_fn_max_len(used_width=BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 + BAR_SEP_LEN): +def file_name_max_len(used_width=BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 + BAR_SEP_LEN): """ Return the max length of a path given the current terminal width. @@ -229,12 +214,32 @@ def compute_fn_max_len(used_width=BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 - the word Scanned: 8 chars - one BAR_SEP - the file name proper - The space usage is therefore: BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 + BAR_SEP_LEN + the file name length + The space usage is therefore: + BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 + BAR_SEP_LEN + + the file name length """ term_width, _height = click.get_terminal_size() max_filename_length = term_width - used_width -# if term_width < 70: -# # if we have a small term width that is less than 70 column, we -# # may spill over and damage the progress bar... -# max_filename_length = 10 return max_filename_length + + +def path_progress_message(item, verbose=False, prefix='Scanned: '): + """ + Return a styled message suitable for progress display when processing a path + for an `item` tuple of (location, rid, scan_errors, *other items) + """ + if not item: + return '' + location = item[0] + errors = item[2] + location = unicode(toascii(location)) + progress_line = location + if not verbose: + max_file_name_len = file_name_max_len() + # do not display a file name in progress bar if there is no space available + if max_file_name_len <= 10: + return '' + progress_line = fixed_width_file_name(location, max_file_name_len) + + color = 'red' if errors else 'green' + return style(prefix) + style(progress_line, fg=color) diff --git a/src/scancode_config.py b/src/scancode_config.py new file mode 100644 index 00000000000..b6ca551d832 --- /dev/null +++ b/src/scancode_config.py @@ -0,0 +1,156 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import errno +import os +from os.path import abspath +from os.path import dirname +from os.path import expanduser +from os.path import join +from os.path import exists +import tempfile + +""" +Core configuration globals. + +Note: this module MUST import ONLY from the standard library. +""" + +# this exception is not available on posix +try: + WindowsError # noqa +except NameError: + WindowsError = None # NOQA + + +def _create_dir(location): + """ + Create directory and all sub-directories recursively at `location`. + Raise Exceptions if it fails to create the directory. + NOTE: this is essentailly a copy of commoncode.fileutils.create_dir() + """ + + if exists(location): + if not os.path.isdir(location): + err = ('Cannot create directory: existing file ' + 'in the way ''%(location)s.') + raise OSError(err % locals()) + return + + # may fail on win if the path is too long + # FIXME: consider using UNC ?\\ paths + try: + os.makedirs(location) + + # avoid multi-process TOCTOU conditions when creating dirs + # the directory may have been created since the exist check + except WindowsError, e: + # [Error 183] Cannot create a file when that file already exists + if e and e.winerror == 183: + if not os.path.isdir(location): + raise + else: + raise + except (IOError, OSError), o: + if o.errno == errno.EEXIST: + if not os.path.isdir(location): + raise + else: + raise + +################################################################################ +# INVARIABLE INSTALLATION-SPECIFIC, BUILT-IN LOCATIONS AND FLAGS +################################################################################ +# these are guaranteed to be there and are entirely based on and relative to the +# current installation location. This is where the source code and static data +# lives. + + +from pkg_resources import get_distribution, DistributionNotFound +try: + __version__ = get_distribution('scancode-toolkit').version +except DistributionNotFound: + # package is not installed ?? + __version__ = '2.2.1' + +system_temp_dir = tempfile.gettempdir() +scancode_src_dir = dirname(__file__) +scancode_root_dir = dirname(scancode_src_dir) + +################################################################################ +# USAGE MODE FLAGS +################################################################################ + +# tag file or env var to determined if we are in dev mode +SCANCODE_DEV_MODE = (os.getenv('SCANCODE_DEV_MODE') + or exists(join(scancode_root_dir, 'SCANCODE_DEV_MODE'))) + +################################################################################ +# USAGE MODE-, INSTALLATION- and IMPORT- and RUN-SPECIFIC DIRECTORIES +################################################################################ +# These vary based on the usage mode: dev or not: we define two locations: + +# - scancode_cache_dir: for long-lived caches which are installation-specific: +# this is for cached data which are infrequently written to and mostly readed, +# such as the license index cache. The same location is used across runs of +# a given version of ScanCode + +# - scancode_temp_dir: for short-lived temporary files which are import- or run- +# specific that may live for the duration of a function call or for the duration +# of a whole scancode run, such as any temp file and the per-run scan results +# cache. A new unique location is used for each run of ScanCode (e.g. for the +# lifetime of the Python interpreter process) + +if SCANCODE_DEV_MODE: + # in dev mode the cache and temp files are stored execlusively under the + # scancode_root_dir + scancode_cache_dir = join(scancode_root_dir, '.cache') + scancode_temp_dir = join(scancode_root_dir, 'tmp') + +else: + # in other usage modes (as a CLI or as a library, regardless of how + # installed) we use sensible defaults in the user home directory + # these are version specific + + # WARNING: do not change this code without changing + # commoncode.fileutils.get_temp_dir too + + user_home = abspath(expanduser('~')) + scancode_cache_dir = os.getenv('SCANCODE_CACHE') + if not scancode_cache_dir: + scancode_cache_dir = join(user_home, '.cache', 'scancode-tk', __version__) + + scancode_temp_dir = os.getenv('SCANCODE_TMP') + if not scancode_temp_dir: + _prefix = 'scancode-tk-' + __version__ + '-' + # NOTE: for now this is in the system_temp_dir + scancode_temp_dir = tempfile.mkdtemp(prefix=_prefix, dir=system_temp_dir) + +_create_dir(scancode_cache_dir) +_create_dir(scancode_temp_dir) diff --git a/src/textcode/markup.py b/src/textcode/markup.py index 672c74ec56d..c32e51c79bb 100644 --- a/src/textcode/markup.py +++ b/src/textcode/markup.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -46,7 +46,6 @@ bin_dir = os.path.join(os.path.dirname(__file__), 'bin') - extensions = ('.html', '.htm', '.php', '.phps', '.jsp', '.jspx' , '.xml', '.pom',) @@ -146,7 +145,7 @@ def convert_to_utf8(location): if encoding: encoding = encoding.get('encoding', None) if encoding: - target = os.path.join(fileutils.get_temp_dir('markup'), + target = os.path.join(fileutils.get_temp_dir(prefix='scancode-markup-'), fileutils.file_name(location)) with codecs.open(location, 'rb', encoding=encoding, errors='replace', buffering=16384) as inf: @@ -166,7 +165,7 @@ def convert_to_text(location, _retrying=False): if not is_markup(location): return - temp_file = os.path.join(fileutils.get_temp_dir('markup'), 'text') + temp_file = os.path.join(fileutils.get_temp_dir(prefix='scancode-markup-'), 'text') from bs4 import BeautifulSoup with open(location, 'rb') as input_text: soup = BeautifulSoup(input_text.read(), 'html5lib') diff --git a/src/textcode/pdf.py b/src/textcode/pdf.py index afb677f10b8..53cac168139 100644 --- a/src/textcode/pdf.py +++ b/src/textcode/pdf.py @@ -43,7 +43,6 @@ def get_text_lines(location): `location`. May raise exceptions. """ extracted_text = BytesIO() - lines = [] laparams = LAParams() with open(location, 'rb') as pdf_file: with contextlib.closing(PDFParser(pdf_file)) as parser: diff --git a/src/textcode/strings.py b/src/textcode/strings.py index 9876e4bf235..6f411402979 100644 --- a/src/textcode/strings.py +++ b/src/textcode/strings.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,14 +22,14 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function -import string import re +import string from commoncode.text import toascii - """ Extract raw ASCII strings from (possibly) binary strings. Both plain ASCII and UTF-16-LE-encoded (aka. wide) strings are extracted. @@ -78,7 +78,6 @@ def strings_from_file(location, buff_size=1024 * 1024, ascii=False, clean=True, printable = 'A-Za-z0-9' + whitespaces + punctuation null_byte = '\x00' - ascii_strings = re.compile( # plain ASCII is a sequence of printable of a minimum length '(' @@ -145,6 +144,7 @@ def clean_string(s, min_len=MIN_LEN, * not made of only of digits, punctuations and whitespaces """ s = s.strip() + def valid(st): st = remove_junk('', st) return (st and len(st) >= min_len @@ -156,7 +156,6 @@ def valid(st): if valid(s): yield s.strip() - ##################################################################################### # TODO: Strings classification # Classify strings, detect junk, detect paths, symbols, demangle symbols, unescape diff --git a/src/textcode/strings2.py b/src/textcode/strings2.py index 64bb20a7a45..7bddb04cb89 100644 --- a/src/textcode/strings2.py +++ b/src/textcode/strings2.py @@ -23,12 +23,11 @@ # - removed main() # - do not cache compiled patterns. re does cache patterns alright. - -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import re - ASCII_BYTE = ( " !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ" "\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~\t" diff --git a/src/typecode/contenttype.py b/src/typecode/contenttype.py index 73052500fff..3731ac5ac6d 100644 --- a/src/typecode/contenttype.py +++ b/src/typecode/contenttype.py @@ -22,7 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import print_function, absolute_import +from __future__ import absolute_import +from __future__ import print_function import contextlib import os @@ -52,13 +53,11 @@ extension and mostly its content. """ - LOG = logging.getLogger(__name__) data_dir = os.path.join(os.path.dirname(__file__), 'data') bin_dir = os.path.join(os.path.dirname(__file__), 'bin') - # Python mimetypes path setup using Apache mimetypes DB os.environ['XDG_DATA_DIRS'] = os.path.join(data_dir, 'apache') os.environ['XDG_DATA_HOME'] = os.environ['XDG_DATA_DIRS'] @@ -67,31 +66,26 @@ # Ensure that all dates are UTC, especially for fine free file. os.environ['TZ'] = 'UTC' - PLAIN_TEXT_EXTENSIONS = ('.rst', '.rest', '.txt', '.md', # This one is actually not handled by Pygments. There # are probably more. '.log') - C_EXTENSIONS = set(['.c', '.cc', '.cp', '.cpp', '.cxx', '.c++', '.h', '.hh', '.s', '.asm', '.hpp', '.hxx', '.h++', '.i', '.ii', '.m']) - ELF_EXE = 'executable' ELF_SHARED = 'shared object' ELF_RELOC = 'relocatable' ELF_UNKNOWN = 'unknown' elf_types = (ELF_EXE, ELF_SHARED, ELF_RELOC,) - # TODO: # http://svn.zope.org/z3c.mimetype/trunk/?pathrev=103648 # http://svn.zope.org/z3c.sharedmimeinfo/trunk/TODO.txt?revision=103668&view=markup # https://pypi.python.org/pypi/z3c.sharedmimeinfo/0.1.0 # https://github.com/plone/Products.MimetypesRegistry/ - # Global registry of Type objects, keyed by location # TODO: can this be a memroy hog for very large scans? _registry = {} @@ -109,9 +103,9 @@ def get_type(location): _registry[abs_loc] = t return t - # TODO: simplify code using a cached property decorator + class Type(object): """ Content, media and mime type information about a file. diff --git a/src/typecode/entropy.py b/src/typecode/entropy.py index 142b46fae2b..9b8d9175d79 100644 --- a/src/typecode/entropy.py +++ b/src/typecode/entropy.py @@ -22,7 +22,6 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. - from __future__ import division from __future__ import absolute_import diff --git a/src/typecode/magic2.py b/src/typecode/magic2.py index 870a285b5d0..36e2c92ddd7 100644 --- a/src/typecode/magic2.py +++ b/src/typecode/magic2.py @@ -45,6 +45,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from __future__ import absolute_import +from __future__ import print_function import os.path import ctypes @@ -58,18 +60,15 @@ except ImportError: from backports.os import fsencode - """ magic2 is minimal and specialized wrapper around a vendored libmagic file identification library. This is NOT thread-safe. It is based on python-magic by Adam Hup and adapted to the specific needs of ScanCode. """ - data_dir = os.path.join(os.path.dirname(__file__), 'data') bin_dir = os.path.join(os.path.dirname(__file__), 'bin') - # path to vendored magic DB, possibly OS-specific basemag = os.path.join(data_dir, 'magic') # keep the first which is the most specific directory @@ -81,7 +80,6 @@ # detectors = {} - # libmagic flags MAGIC_NONE = 0 MAGIC_MIME = 16 @@ -90,7 +88,6 @@ MAGIC_NO_CHECK_TEXT = 131072 MAGIC_NO_CHECK_CDF = 262144 - DETECT_TYPE = MAGIC_NONE DETECT_MIME = MAGIC_NONE | MAGIC_MIME DETECT_ENC = MAGIC_NONE | MAGIC_MIME | MAGIC_MIME_ENCODING @@ -149,6 +146,7 @@ class MagicException(Exception): class Detector(object): + def __init__(self, flags, magic_file=magic_db): """ Create a new libmagic detector. @@ -221,7 +219,7 @@ def load_lib(): libmagic = load_lib() -def check_error(result, func, args): # @UnusedVariable +def check_error(result, func, args): # NOQA """ ctypes error handler/checker: Check for errors and raise an exception or return the result otherwise. diff --git a/tests/cluecode/cluecode_assert_utils.py b/tests/cluecode/cluecode_assert_utils.py index 2030a185cf7..a85955dc3ba 100644 --- a/tests/cluecode/cluecode_assert_utils.py +++ b/tests/cluecode/cluecode_assert_utils.py @@ -22,27 +22,32 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals import cluecode.copyrights -def check_detection(expected, test_file, +def check_detection(expected, test_file_or_iterable, expected_in_results=True, results_in_expected=True, what='copyrights'): """ - Run detection of copyright on the test_file, checking the results - match the expected list of values. + Run detection of copyright on the `test_file_or_iterable`, checking the + results match the expected list of values. - If expected_in_results and results_in_expected are True (the default), - then expected and test results are tested for equality. To accommodate - for some level of approximate testing, the check can test only if an - expected result in a test result, or the opposite. If - expected_in_results and results_in_expected are both False an + `test_file_or_iterable` is either a path string or an iterable of text lines. + + If `expected_in_results` and `results_in_expected` are True (the default), + then expected and test results are tested for equality. To accommodate for + some level of approximate testing, the check can test only if an expected + result in a test result, or the opposite. + + If `expected_in_results` and `results_in_expected` are both False an exception is raised as this is not a case that make sense. """ - copyrights, authors, years, holders = cluecode.copyrights.detect(test_file) + copyrights, authors, years, holders = cluecode.copyrights.detect(test_file_or_iterable) results = { 'copyrights': copyrights, 'authors': authors, @@ -60,10 +65,10 @@ def check_detection(expected, test_file, elif expected_in_results: for i, expect in enumerate(expected): - msg = repr(expect) + ' not in ' + repr(result[i]) + ' for test file:' + test_file + msg = repr(expect) + ' not in ' + repr(result[i]) + ' for test file:' + test_file_or_iterable assert expect in result[i], msg elif results_in_expected: for i, res in enumerate(result): - msg = repr(expected[i]) + ' does not contain ' + repr(res) + ' for test file:' + test_file + msg = repr(expected[i]) + ' does not contain ' + repr(res) + ' for test file:' + test_file_or_iterable assert res in expected[i], msg diff --git a/tests/cluecode/data/copyrights/copyright_03e16f6c_0-e_f_c.0 b/tests/cluecode/data/copyright_lines/03e16f6c_0-e_f_c.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_03e16f6c_0-e_f_c.0 rename to tests/cluecode/data/copyright_lines/03e16f6c_0-e_f_c.0 diff --git a/tests/cluecode/data/copyrights/copyright_3a3b02ce_0-a_b_ce.0 b/tests/cluecode/data/copyright_lines/3a3b02ce_0-a_b_ce.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_3a3b02ce_0-a_b_ce.0 rename to tests/cluecode/data/copyright_lines/3a3b02ce_0-a_b_ce.0 diff --git a/tests/cluecode/data/copyrights/copyright_ABC_cpp-Case_cpp.cpp b/tests/cluecode/data/copyright_lines/ABC_cpp-Case_cpp.cpp similarity index 100% rename from tests/cluecode/data/copyrights/copyright_ABC_cpp-Case_cpp.cpp rename to tests/cluecode/data/copyright_lines/ABC_cpp-Case_cpp.cpp diff --git a/tests/cluecode/data/copyrights/copyright_ABC_file_cpp-File_cpp.cpp b/tests/cluecode/data/copyright_lines/ABC_file_cpp-File_cpp.cpp similarity index 100% rename from tests/cluecode/data/copyrights/copyright_ABC_file_cpp-File_cpp.cpp rename to tests/cluecode/data/copyright_lines/ABC_file_cpp-File_cpp.cpp diff --git a/tests/cluecode/data/copyrights/copyright_abc b/tests/cluecode/data/copyright_lines/abc similarity index 100% rename from tests/cluecode/data/copyrights/copyright_abc rename to tests/cluecode/data/copyright_lines/abc diff --git a/tests/cluecode/data/copyrights/copyright_abc_loss_of_holder_c-c.c b/tests/cluecode/data/copyright_lines/abc_loss_of_holder_c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_abc_loss_of_holder_c-c.c rename to tests/cluecode/data/copyright_lines/abc_loss_of_holder_c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_abiword_common_copyright-abiword_common_copyright.copyright b/tests/cluecode/data/copyright_lines/abiword_common.copyright similarity index 100% rename from tests/cluecode/data/copyrights/copyright_abiword_common_copyright-abiword_common_copyright.copyright rename to tests/cluecode/data/copyright_lines/abiword_common.copyright diff --git a/tests/cluecode/data/copyrights/copyright_acme_c-c.c b/tests/cluecode/data/copyright_lines/acme_c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_acme_c-c.c rename to tests/cluecode/data/copyright_lines/acme_c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_activefieldattribute_cs-ActiveFieldAttribute_cs.cs b/tests/cluecode/data/copyright_lines/activefieldattribute_cs-ActiveFieldAttribute_cs.cs similarity index 100% rename from tests/cluecode/data/copyrights/copyright_activefieldattribute_cs-ActiveFieldAttribute_cs.cs rename to tests/cluecode/data/copyright_lines/activefieldattribute_cs-ActiveFieldAttribute_cs.cs diff --git a/tests/cluecode/data/copyrights/copyright_addr_c-addr_c.c b/tests/cluecode/data/copyright_lines/addr_c-addr_c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_addr_c-addr_c.c rename to tests/cluecode/data/copyright_lines/addr_c-addr_c.c diff --git a/tests/cluecode/data/copyrights/copyright_adler_inflate_c-inflate_c.c b/tests/cluecode/data/copyright_lines/adler_inflate_c-inflate_c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_adler_inflate_c-inflate_c.c rename to tests/cluecode/data/copyright_lines/adler_inflate_c-inflate_c.c diff --git a/tests/cluecode/data/copyrights/copyright_aleal-c.c b/tests/cluecode/data/copyright_lines/aleal-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_aleal-c.c rename to tests/cluecode/data/copyright_lines/aleal-c.c diff --git a/tests/cluecode/data/copyrights/copyright_andre_darcy-c.c b/tests/cluecode/data/copyright_lines/andre_darcy-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_andre_darcy-c.c rename to tests/cluecode/data/copyright_lines/andre_darcy-c.c diff --git a/tests/cluecode/data/copyrights/copyright_android_c-c.c b/tests/cluecode/data/copyright_lines/android_c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_android_c-c.c rename to tests/cluecode/data/copyright_lines/android_c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_apache_notice-NOTICE b/tests/cluecode/data/copyright_lines/apache_notice-NOTICE similarity index 100% rename from tests/cluecode/data/copyrights/copyright_apache_notice-NOTICE rename to tests/cluecode/data/copyright_lines/apache_notice-NOTICE diff --git a/tests/cluecode/data/copyrights/copyright_aptitude_copyright_label-aptitude_copyright_label.label b/tests/cluecode/data/copyright_lines/aptitude-aptitude.label similarity index 100% rename from tests/cluecode/data/copyrights/copyright_aptitude_copyright_label-aptitude_copyright_label.label rename to tests/cluecode/data/copyright_lines/aptitude-aptitude.label diff --git a/tests/cluecode/data/copyrights/copyright_atheros_spanning_lines-py.py b/tests/cluecode/data/copyright_lines/atheros_spanning_lines-py.py similarity index 100% rename from tests/cluecode/data/copyrights/copyright_atheros_spanning_lines-py.py rename to tests/cluecode/data/copyright_lines/atheros_spanning_lines-py.py diff --git a/tests/cluecode/data/copyrights/copyright_att_in_c-9_c.c b/tests/cluecode/data/copyright_lines/att_in_c-9_c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_att_in_c-9_c.c rename to tests/cluecode/data/copyright_lines/att_in_c-9_c.c diff --git a/tests/cluecode/data/copyrights/copyright_audio_c-c.c b/tests/cluecode/data/copyright_lines/audio_c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_audio_c-c.c rename to tests/cluecode/data/copyright_lines/audio_c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_babkin_txt.txt b/tests/cluecode/data/copyright_lines/babkin_txt.txt similarity index 100% rename from tests/cluecode/data/copyrights/copyright_babkin_txt.txt rename to tests/cluecode/data/copyright_lines/babkin_txt.txt diff --git a/tests/cluecode/data/copyrights/copyright_blender_debian-blender_copyright.copyright b/tests/cluecode/data/copyright_lines/blender_debian-blender.copyright similarity index 100% rename from tests/cluecode/data/copyrights/copyright_blender_debian-blender_copyright.copyright rename to tests/cluecode/data/copyright_lines/blender_debian-blender.copyright diff --git a/tests/cluecode/data/copyright_lines/company_name_in_java-9_java.java b/tests/cluecode/data/copyright_lines/company_name_in_java-9_java.java new file mode 100644 index 00000000000..778a496994f --- /dev/null +++ b/tests/cluecode/data/copyright_lines/company_name_in_java-9_java.java @@ -0,0 +1,20 @@ +/* +Copyright (c) 2008-2011 Company Name Incorporated +All rights Reserved. +Company Name Proprietary + */ +import some.java.package; +import some.java.package2; +import some.java.package3; +import some.java.package4; +import some.java.package4; +import some.java.package5; + +import some.proprietary.pkg; +import some.proprietary.pkg2; +import some.proprietary.pkg3; +import some.proprietary.pkg4; + +public class SomeClass extends SomeOtherClass implements WhateverListener { + private static final String JB = "JB"; // John Blah? Japan Beach? : -) + \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/copyright_essential_smoke-ibm_c.c b/tests/cluecode/data/copyright_lines/essential_smoke-ibm_c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_essential_smoke-ibm_c.c rename to tests/cluecode/data/copyright_lines/essential_smoke-ibm_c.c diff --git a/tests/cluecode/data/copyrights/copyright_heunrich_c-c.c b/tests/cluecode/data/copyright_lines/heunrich_c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_heunrich_c-c.c rename to tests/cluecode/data/copyright_lines/heunrich_c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_isc-c.c b/tests/cluecode/data/copyright_lines/isc-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_isc-c.c rename to tests/cluecode/data/copyright_lines/isc-c.c diff --git a/tests/cluecode/data/copyrights/copyright_sample_py-py.py b/tests/cluecode/data/copyright_lines/sample_py-py.py similarity index 100% rename from tests/cluecode/data/copyrights/copyright_sample_py-py.py rename to tests/cluecode/data/copyright_lines/sample_py-py.py diff --git a/tests/cluecode/data/copyright_lines/vector50.hpp b/tests/cluecode/data/copyright_lines/vector50.hpp new file mode 100644 index 00000000000..fe53f22bf6b --- /dev/null +++ b/tests/cluecode/data/copyright_lines/vector50.hpp @@ -0,0 +1,174 @@ + +// Copyright (C) 2005 Arkadiy Vertleyb +// Copyright (C) 2005 Peder Holt +// +// Use modification and distribution are subject to the boost Software License, +// Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt). + +// Preprocessed code, do not edit manually ! + + +namespace boost { namespace type_of { + template struct v_iter; + template struct v_iter > { typedef typename V::item0 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item1 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item2 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item3 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item4 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item5 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item6 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item7 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item8 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item9 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item10 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item11 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item12 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item13 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item14 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item15 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item16 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item17 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item18 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item19 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item20 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item21 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item69 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item22 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item23 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item24 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item25 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item26 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item27 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item28 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item29 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item30 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item31 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item32 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item33 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item34 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item35 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item36 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item37 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item38 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item39 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item40 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item41 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item42 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item43 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item44 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item45 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item46 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item47 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item48 type; typedef v_iter > next; }; + template struct v_iter > { typedef typename V::item49 type; typedef v_iter > next; }; +}} +namespace boost { namespace type_of { + template< class T = void> struct vector0 { typedef v_iter, boost::mpl::int_<0> > begin; typedef mpl::int_<1> item0; typedef mpl::int_<1> item1; typedef mpl::int_<1> item2; typedef mpl::int_<1> item3; typedef mpl::int_<1> item4; typedef mpl::int_<1> item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 > struct vector1 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef mpl::int_<1> item1; typedef mpl::int_<1> item2; typedef mpl::int_<1> item3; typedef mpl::int_<1> item4; typedef mpl::int_<1> item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 > struct vector2 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef mpl::int_<1> item2; typedef mpl::int_<1> item3; typedef mpl::int_<1> item4; typedef mpl::int_<1> item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 > struct vector3 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef mpl::int_<1> item3; typedef mpl::int_<1> item4; typedef mpl::int_<1> item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 > struct vector4 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef mpl::int_<1> item4; typedef mpl::int_<1> item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 > struct vector5 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef mpl::int_<1> item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 > struct vector6 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef mpl::int_<1> item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 > struct vector7 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef mpl::int_<1> item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 > struct vector8 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef mpl::int_<1> item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 > struct vector9 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef mpl::int_<1> item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 > struct vector10 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef mpl::int_<1> item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 > struct vector11 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef mpl::int_<1> item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 > struct vector12 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef mpl::int_<1> item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 > struct vector13 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef mpl::int_<1> item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 > struct vector14 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef mpl::int_<1> item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 > struct vector15 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef mpl::int_<1> item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 > struct vector16 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef mpl::int_<1> item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 > struct vector17 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef mpl::int_<1> item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 > struct vector18 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef mpl::int_<1> item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 > struct vector19 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef mpl::int_<1> item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 > struct vector20 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef mpl::int_<1> item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 > struct vector21 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef mpl::int_<1> item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 > struct vector22 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef mpl::int_<1> item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 > struct vector23 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef mpl::int_<1> item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 > struct vector24 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef mpl::int_<1> item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 > struct vector25 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef mpl::int_<1> item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 > struct vector26 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef mpl::int_<1> item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 > struct vector27 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef mpl::int_<1> item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 > struct vector28 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef mpl::int_<1> item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 > struct vector29 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef mpl::int_<1> item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 > struct vector30 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef mpl::int_<1> item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 > struct vector31 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef mpl::int_<1> item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 > struct vector32 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef mpl::int_<1> item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 > struct vector33 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef mpl::int_<1> item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 > struct vector34 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef mpl::int_<1> item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 > struct vector35 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef mpl::int_<1> item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 > struct vector36 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef mpl::int_<1> item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 > struct vector37 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef mpl::int_<1> item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 > struct vector38 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef mpl::int_<1> item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 > struct vector39 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef mpl::int_<1> item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 > struct vector40 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef mpl::int_<1> item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 > struct vector41 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef mpl::int_<1> item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 > struct vector42 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef mpl::int_<1> item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 > struct vector43 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef mpl::int_<1> item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 > struct vector44 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef mpl::int_<1> item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 > struct vector45 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef P44 item44; typedef mpl::int_<1> item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 > struct vector46 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef P44 item44; typedef P45 item45; typedef mpl::int_<1> item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 > struct vector47 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef P44 item44; typedef P45 item45; typedef P46 item46; typedef mpl::int_<1> item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 , class P47 > struct vector48 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef P44 item44; typedef P45 item45; typedef P46 item46; typedef P47 item47; typedef mpl::int_<1> item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 , class P47 , class P48 > struct vector49 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef P44 item44; typedef P45 item45; typedef P46 item46; typedef P47 item47; typedef P48 item48; typedef mpl::int_<1> item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 , class P47 , class P48 , class P49 > struct vector50 { typedef v_iter, boost::mpl::int_<0> > begin; typedef P0 item0; typedef P1 item1; typedef P2 item2; typedef P3 item3; typedef P4 item4; typedef P5 item5; typedef P6 item6; typedef P7 item7; typedef P8 item8; typedef P9 item9; typedef P10 item10; typedef P11 item11; typedef P12 item12; typedef P13 item13; typedef P14 item14; typedef P15 item15; typedef P16 item16; typedef P17 item17; typedef P18 item18; typedef P19 item19; typedef P20 item20; typedef P21 item21; typedef P22 item22; typedef P23 item23; typedef P24 item24; typedef P25 item25; typedef P26 item26; typedef P27 item27; typedef P28 item28; typedef P29 item29; typedef P30 item30; typedef P31 item31; typedef P32 item32; typedef P33 item33; typedef P34 item34; typedef P35 item35; typedef P36 item36; typedef P37 item37; typedef P38 item38; typedef P39 item39; typedef P40 item40; typedef P41 item41; typedef P42 item42; typedef P43 item43; typedef P44 item44; typedef P45 item45; typedef P46 item46; typedef P47 item47; typedef P48 item48; typedef P49 item49; typedef mpl::int_<1> item50; typedef mpl::int_<1> item51; typedef mpl::int_<1> item52; typedef mpl::int_<1> item53; typedef mpl::int_<1> item54; typedef mpl::int_<1> item55; typedef mpl::int_<1> item56; typedef mpl::int_<1> item57; typedef mpl::int_<1> item58; typedef mpl::int_<1> item59; typedef mpl::int_<1> item60; typedef mpl::int_<1> item61; typedef mpl::int_<1> item62; typedef mpl::int_<1> item63; typedef mpl::int_<1> item64; typedef mpl::int_<1> item65; typedef mpl::int_<1> item66; typedef mpl::int_<1> item67; typedef mpl::int_<1> item68; typedef mpl::int_<1> item69; typedef mpl::int_<1> item70; typedef mpl::int_<1> item71; typedef mpl::int_<1> item72; typedef mpl::int_<1> item73; typedef mpl::int_<1> item74; typedef mpl::int_<1> item75; typedef mpl::int_<1> item76; typedef mpl::int_<1> item77; typedef mpl::int_<1> item78; typedef mpl::int_<1> item79; typedef mpl::int_<1> item80; typedef mpl::int_<1> item81; typedef mpl::int_<1> item82; typedef mpl::int_<1> item83; typedef mpl::int_<1> item84; typedef mpl::int_<1> item85; typedef mpl::int_<1> item86; typedef mpl::int_<1> item87; typedef mpl::int_<1> item88; typedef mpl::int_<1> item89; typedef mpl::int_<1> item90; typedef mpl::int_<1> item91; typedef mpl::int_<1> item92; typedef mpl::int_<1> item93; typedef mpl::int_<1> item94; typedef mpl::int_<1> item95; typedef mpl::int_<1> item96; typedef mpl::int_<1> item97; typedef mpl::int_<1> item98; typedef mpl::int_<1> item99; }; +}} +namespace boost { namespace type_of { + template struct push_back { + typedef V type; + }; + template< class T> struct push_back, T> { typedef boost::type_of::vector1< T > type; }; + template< class P0 , class T> struct push_back, T> { typedef boost::type_of::vector2< P0 , T > type; }; + template< class P0 , class P1 , class T> struct push_back, T> { typedef boost::type_of::vector3< P0 , P1 , T > type; }; + template< class P0 , class P1 , class P2 , class T> struct push_back, T> { typedef boost::type_of::vector4< P0 , P1 , P2 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class T> struct push_back, T> { typedef boost::type_of::vector5< P0 , P1 , P2 , P3 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class T> struct push_back, T> { typedef boost::type_of::vector6< P0 , P1 , P2 , P3 , P4 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class T> struct push_back, T> { typedef boost::type_of::vector7< P0 , P1 , P2 , P3 , P4 , P5 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class T> struct push_back, T> { typedef boost::type_of::vector8< P0 , P1 , P2 , P3 , P4 , P5 , P6 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class T> struct push_back, T> { typedef boost::type_of::vector9< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class T> struct push_back, T> { typedef boost::type_of::vector10< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class T> struct push_back, T> { typedef boost::type_of::vector11< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class T> struct push_back, T> { typedef boost::type_of::vector12< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class T> struct push_back, T> { typedef boost::type_of::vector13< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class T> struct push_back, T> { typedef boost::type_of::vector14< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class T> struct push_back, T> { typedef boost::type_of::vector15< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class T> struct push_back, T> { typedef boost::type_of::vector16< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class T> struct push_back, T> { typedef boost::type_of::vector17< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class T> struct push_back, T> { typedef boost::type_of::vector18< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class T> struct push_back, T> { typedef boost::type_of::vector19< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class T> struct push_back, T> { typedef boost::type_of::vector20< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class T> struct push_back, T> { typedef boost::type_of::vector21< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class T> struct push_back, T> { typedef boost::type_of::vector22< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class T> struct push_back, T> { typedef boost::type_of::vector23< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class T> struct push_back, T> { typedef boost::type_of::vector24< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class T> struct push_back, T> { typedef boost::type_of::vector25< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class T> struct push_back, T> { typedef boost::type_of::vector26< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class T> struct push_back, T> { typedef boost::type_of::vector27< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class T> struct push_back, T> { typedef boost::type_of::vector28< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class T> struct push_back, T> { typedef boost::type_of::vector29< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class T> struct push_back, T> { typedef boost::type_of::vector30< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class T> struct push_back, T> { typedef boost::type_of::vector31< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class T> struct push_back, T> { typedef boost::type_of::vector32< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class T> struct push_back, T> { typedef boost::type_of::vector33< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class T> struct push_back, T> { typedef boost::type_of::vector34< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class T> struct push_back, T> { typedef boost::type_of::vector35< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class T> struct push_back, T> { typedef boost::type_of::vector36< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class T> struct push_back, T> { typedef boost::type_of::vector37< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class T> struct push_back, T> { typedef boost::type_of::vector38< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class T> struct push_back, T> { typedef boost::type_of::vector39< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class T> struct push_back, T> { typedef boost::type_of::vector40< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class T> struct push_back, T> { typedef boost::type_of::vector41< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class T> struct push_back, T> { typedef boost::type_of::vector42< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class T> struct push_back, T> { typedef boost::type_of::vector43< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class T> struct push_back, T> { typedef boost::type_of::vector44< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class T> struct push_back, T> { typedef boost::type_of::vector45< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , P43 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class T> struct push_back, T> { typedef boost::type_of::vector46< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , P43 , P44 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class T> struct push_back, T> { typedef boost::type_of::vector47< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , P43 , P44 , P45 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 , class T> struct push_back, T> { typedef boost::type_of::vector48< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , P43 , P44 , P45 , P46 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 , class P47 , class T> struct push_back, T> { typedef boost::type_of::vector49< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , P43 , P44 , P45 , P46 , P47 , T > type; }; + template< class P0 , class P1 , class P2 , class P3 , class P4 , class P5 , class P6 , class P7 , class P8 , class P9 , class P10 , class P11 , class P12 , class P13 , class P14 , class P15 , class P16 , class P17 , class P18 , class P19 , class P20 , class P21 , class P22 , class P23 , class P24 , class P25 , class P26 , class P27 , class P28 , class P29 , class P30 , class P31 , class P32 , class P33 , class P34 , class P35 , class P36 , class P37 , class P38 , class P39 , class P40 , class P41 , class P42 , class P43 , class P44 , class P45 , class P46 , class P47 , class P48 , class T> struct push_back, T> { typedef boost::type_of::vector50< P0 , P1 , P2 , P3 , P4 , P5 , P6 , P7 , P8 , P9 , P10 , P11 , P12 , P13 , P14 , P15 , P16 , P17 , P18 , P19 , P20 , P21 , P22 , P23 , P24 , P25 , P26 , P27 , P28 , P29 , P30 , P31 , P32 , P33 , P34 , P35 , P36 , P37 , P38 , P39 , P40 , P41 , P42 , P43 , P44 , P45 , P46 , P47 , P48 , T > type; }; + template struct v_iter > { typedef typename V::item69 type; typedef v_iter > next; }; + +}} diff --git a/tests/cluecode/data/copyrights/03e16f6c_0-e_f_c.0 b/tests/cluecode/data/copyrights/03e16f6c_0-e_f_c.0 new file mode 100644 index 00000000000..4cdf2749e0f --- /dev/null +++ b/tests/cluecode/data/copyrights/03e16f6c_0-e_f_c.0 @@ -0,0 +1,78 @@ +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIPAMEAizw8iBHRPvZj7N9AMA0GCSqGSIb3DQEBBAUAMHAx +KzApBgNVBAsTIkNvcHlyaWdodCAoYykgMTk5NyBNaWNyb3NvZnQgQ29ycC4xHjAc +BgNVBAsTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEhMB8GA1UEAxMYTWljcm9zb2Z0 +IFJvb3QgQXV0aG9yaXR5MB4XDTk3MDExMDA3MDAwMFoXDTIwMTIzMTA3MDAwMFow +cDErMCkGA1UECxMiQ29weXJpZ2h0IChjKSAxOTk3IE1pY3Jvc29mdCBDb3JwLjEe +MBwGA1UECxMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSEwHwYDVQQDExhNaWNyb3Nv +ZnQgUm9vdCBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCpAr3BcOY78k4bKJ+XeF4w6qKpjSVf+P6VTKO3/p2iID58UaKboo9gMmvRQmR5 +7qx2yVTa8uuchhyPn4Rms8VremIj1h083g8BkuiWxL8tZpqaaCaZ0Dosvwy1WCbB +RucKPjiWLKkoOajsSYNC44QPu5psVWGsgnyhYC13TOmZtGQ7mlAcMQgkFJ+p55Er +GOY9mGMUYFgFZZ8dN1KH96fvlALGG9O/VUWziYC/OuxUlE6u/ad6bXROrxjMlgko +IQBXkGBpN7tLEgc8Vv9b+6RmCgim0oFWV++2O14WgXcE2va+roCV/rDNf9anGnJc +PMq88AijIjCzBoXJsyB3E4XfAgMBAAGjgagwgaUwgaIGA1UdAQSBmjCBl4AQW9Bw +72lyniNRfhSyTY7/y6FyMHAxKzApBgNVBAsTIkNvcHlyaWdodCAoYykgMTk5NyBN +aWNyb3NvZnQgQ29ycC4xHjAcBgNVBAsTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEh +MB8GA1UEAxMYTWljcm9zb2Z0IFJvb3QgQXV0aG9yaXR5gg8AwQCLPDyIEdE+9mPs +30AwDQYJKoZIhvcNAQEEBQADggEBAJXoC8CN85cYNe24ASTYdxHzXGAyn54Lyz4F +kYiPyTrmIfLwV5MstaBHyGLv/NfMOztaqTZUaf4kbT/JzKreBXzdMY09nxBwarv+ +Ek8YacD80EPjEVogT+pie6+qGcgrNyUtvmWhEoolD2Oj91Qc+SHJ1hXzUqxuQzIH +/YIX+OVnbA1R9r3xUse958Qw/CAxCYgdlSkaTdUdAqXxgOADtFv0sd3IV+5lScdS +VLa0AygS/5DW8AiPfriXxas3LOR65Kh343agANBqP8HSNorgQRKoNWobats14dQc +BOSoRQTIWjM4bk0cDWK3CqKM09VUP0bNHFWmcNsSOoeTdZ+n0qA= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + c1:00:8b:3c:3c:88:11:d1:3e:f6:63:ec:df:40 + Signature Algorithm: md5WithRSAEncryption + Issuer: OU=Copyright (c) 1997 Microsoft Corp., OU=Microsoft Corporation, CN=Microsoft Root Authority + Validity + Not Before: Jan 10 07:00:00 1997 GMT + Not After : Dec 31 07:00:00 2020 GMT + Subject: OU=Copyright (c) 1997 Microsoft Corp., OU=Microsoft Corporation, CN=Microsoft Root Authority + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (2048 bit) + Modulus (2048 bit): + 00:a9:02:bd:c1:70:e6:3b:f2:4e:1b:28:9f:97:78: + 5e:30:ea:a2:a9:8d:25:5f:f8:fe:95:4c:a3:b7:fe: + 9d:a2:20:3e:7c:51:a2:9b:a2:8f:60:32:6b:d1:42: + 64:79:ee:ac:76:c9:54:da:f2:eb:9c:86:1c:8f:9f: + 84:66:b3:c5:6b:7a:62:23:d6:1d:3c:de:0f:01:92: + e8:96:c4:bf:2d:66:9a:9a:68:26:99:d0:3a:2c:bf: + 0c:b5:58:26:c1:46:e7:0a:3e:38:96:2c:a9:28:39: + a8:ec:49:83:42:e3:84:0f:bb:9a:6c:55:61:ac:82: + 7c:a1:60:2d:77:4c:e9:99:b4:64:3b:9a:50:1c:31: + 08:24:14:9f:a9:e7:91:2b:18:e6:3d:98:63:14:60: + 58:05:65:9f:1d:37:52:87:f7:a7:ef:94:02:c6:1b: + d3:bf:55:45:b3:89:80:bf:3a:ec:54:94:4e:ae:fd: + a7:7a:6d:74:4e:af:18:cc:96:09:28:21:00:57:90: + 60:69:37:bb:4b:12:07:3c:56:ff:5b:fb:a4:66:0a: + 08:a6:d2:81:56:57:ef:b6:3b:5e:16:81:77:04:da: + f6:be:ae:80:95:fe:b0:cd:7f:d6:a7:1a:72:5c:3c: + ca:bc:f0:08:a3:22:30:b3:06:85:c9:b3:20:77:13: + 85:df + Exponent: 65537 (0x10001) + X509v3 extensions: + 2.5.29.1: + 0....[.p.ir.#Q~..M....r0p1+0)..U..."Copyright (c) 1997 Microsoft Corp.1.0...U....Microsoft Corporation1!0...U....Microsoft Root Authority......<<...>.c..@ + Signature Algorithm: md5WithRSAEncryption + 95:e8:0b:c0:8d:f3:97:18:35:ed:b8:01:24:d8:77:11:f3:5c: + 60:32:9f:9e:0b:cb:3e:05:91:88:8f:c9:3a:e6:21:f2:f0:57: + 93:2c:b5:a0:47:c8:62:ef:fc:d7:cc:3b:3b:5a:a9:36:54:69: + fe:24:6d:3f:c9:cc:aa:de:05:7c:dd:31:8d:3d:9f:10:70:6a: + bb:fe:12:4f:18:69:c0:fc:d0:43:e3:11:5a:20:4f:ea:62:7b: + af:aa:19:c8:2b:37:25:2d:be:65:a1:12:8a:25:0f:63:a3:f7: + 54:1c:f9:21:c9:d6:15:f3:52:ac:6e:43:32:07:fd:82:17:f8: + e5:67:6c:0d:51:f6:bd:f1:52:c7:bd:e7:c4:30:fc:20:31:09: + 88:1d:95:29:1a:4d:d5:1d:02:a5:f1:80:e0:03:b4:5b:f4:b1: + dd:c8:57:ee:65:49:c7:52:54:b6:b4:03:28:12:ff:90:d6:f0: + 08:8f:7e:b8:97:c5:ab:37:2c:e4:7a:e4:a8:77:e3:76:a0:00: + d0:6a:3f:c1:d2:36:8a:e0:41:12:a8:35:6a:1b:6a:db:35:e1: + d4:1c:04:e4:a8:45:04:c8:5a:33:38:6e:4d:1c:0d:62:b7:0a: + a2:8c:d3:d5:54:3f:46:cd:1c:55:a6:70:db:12:3a:87:93:75: + 9f:a7:d2:a0 +SHA1 Fingerprint=A4:34:89:15:9A:52:0F:0D:93:D0:32:CC:AF:37:E7:FE:20:A8:B4:19 diff --git a/tests/cluecode/data/copyrights/3a3b02ce_0-a_b_ce.0 b/tests/cluecode/data/copyrights/3a3b02ce_0-a_b_ce.0 new file mode 100644 index 00000000000..8b744f6c64d --- /dev/null +++ b/tests/cluecode/data/copyrights/3a3b02ce_0-a_b_ce.0 @@ -0,0 +1,84 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 41:3d:72:c7:f4:6b:1f:81:43:7d:f1:d2:28:54:df:9a + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=CH, O=WISeKey, OU=Copyright (c) 2005, OU=OISTE Foundation Endorsed, CN=OISTE WISeKey Global Root GA CA + Validity + Not Before: Dec 11 16:03:44 2005 GMT + Not After : Dec 11 16:09:51 2037 GMT + Subject: C=CH, O=WISeKey, OU=Copyright (c) 2005, OU=OISTE Foundation Endorsed, CN=OISTE WISeKey Global Root GA CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (2048 bit) + Modulus (2048 bit): + 00:cb:4f:b3:00:9b:3d:36:dd:f9:d1:49:6a:6b:10: + 49:1f:ec:d8:2b:b2:c6:f8:32:81:29:43:95:4c:9a: + 19:23:21:15:45:de:e3:c8:1c:51:55:5b:ae:93:e8: + 37:ff:2b:6b:e9:d4:ea:be:2a:dd:a8:51:2b:d7:66: + c3:61:5c:60:02:c8:f5:ce:72:7b:3b:b8:f2:4e:65: + 08:9a:cd:a4:6a:19:c1:01:bb:73:a6:d7:f6:c3:dd: + cd:bc:a4:8b:b5:99:61:b8:01:a2:a3:d4:4d:d4:05: + 3d:91:ad:f8:b4:08:71:64:af:70:f1:1c:6b:7e:f6: + c3:77:9d:24:73:7b:e4:0c:8c:e1:d9:36:e1:99:8b: + 05:99:0b:ed:45:31:09:ca:c2:00:db:f7:72:a0:96: + aa:95:87:d0:8e:c7:b6:61:73:0d:76:66:8c:dc:1b: + b4:63:a2:9f:7f:93:13:30:f1:a1:27:db:d9:ff:2c: + 55:88:91:a0:e0:4f:07:b0:28:56:8c:18:1b:97:44: + 8e:89:dd:e0:17:6e:e7:2a:ef:8f:39:0a:31:84:82: + d8:40:14:49:2e:7a:41:e4:a7:fe:e3:64:cc:c1:59: + 71:4b:2c:21:a7:5b:7d:e0:1d:d1:2e:81:9b:c3:d8: + 68:f7:bd:96:1b:ac:70:b1:16:14:0b:db:60:b9:26: + 01:05 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Key Usage: + Digital Signature, Certificate Sign, CRL Sign + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Subject Key Identifier: + B3:03:7E:AE:36:BC:B0:79:D1:DC:94:26:B6:11:BE:21:B2:69:86:94 + 1.3.6.1.4.1.311.21.1: + ... + Signature Algorithm: sha1WithRSAEncryption + 4b:a1:ff:0b:87:6e:b3:f9:c1:43:b1:48:f3:28:c0:1d:2e:c9: + 09:41:fa:94:00:1c:a4:a4:ab:49:4f:8f:3d:1e:ef:4d:6f:bd: + bc:a4:f6:f2:26:30:c9:10:ca:1d:88:fb:74:19:1f:85:45:bd: + b0:6c:51:f9:36:7e:db:f5:4c:32:3a:41:4f:5b:47:cf:e8:0b: + 2d:b6:c4:19:9d:74:c5:47:c6:3b:6a:0f:ac:14:db:3c:f4:73: + 9c:a9:05:df:00:dc:74:78:fa:f8:35:60:59:02:13:18:7c:bc: + fb:4d:b0:20:6d:43:bb:60:30:7a:67:33:5c:c5:99:d1:f8:2d: + 39:52:73:fb:8c:aa:97:25:5c:72:d9:08:1e:ab:4e:3c:e3:81: + 31:9f:03:a6:fb:c0:fe:29:88:55:da:84:d5:50:03:b6:e2:84: + a3:a6:36:aa:11:3a:01:e1:18:4b:d6:44:68:b3:3d:f9:53:74: + 84:b3:46:91:46:96:00:b7:80:2c:b6:e1:e3:10:e2:db:a2:e7: + 28:8f:01:96:62:16:3e:00:e3:1c:a5:36:81:18:a2:4c:52:76: + c0:11:a3:6e:e6:1d:ba:e3:5a:be:36:53:c5:3e:75:8f:86:69: + 29:58:53:b5:9c:bb:6f:9f:5c:c5:18:ec:dd:2f:e1:98:c9:fc: + be:df:0a:0d +SHA1 Fingerprint=59:22:A1:E1:5A:EA:16:35:21:F8:98:39:6A:46:46:B0:44:1B:0F:A9 diff --git a/tests/cluecode/data/copyrights/ABC_cpp-Case_cpp.cpp b/tests/cluecode/data/copyrights/ABC_cpp-Case_cpp.cpp new file mode 100644 index 00000000000..ce313fa7155 --- /dev/null +++ b/tests/cluecode/data/copyrights/ABC_cpp-Case_cpp.cpp @@ -0,0 +1,14 @@ +/****************************** + ABC DEF + + ABC Company + + www.abcCompany.com + + ----------------------- + File: testCase.cpp + Project: testProject + ----------------------- + Copyright (C) ABC Company + + **********************************/ \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/ABC_file_cpp-File_cpp.cpp b/tests/cluecode/data/copyrights/ABC_file_cpp-File_cpp.cpp new file mode 100644 index 00000000000..f3046f0ea99 --- /dev/null +++ b/tests/cluecode/data/copyrights/ABC_file_cpp-File_cpp.cpp @@ -0,0 +1,14 @@ +/****************************** + ABC DEF + + ABC Company + + www.abcCompany.com + + ----------------------- + File: testCase.cpp + Project: testProject + ----------------------- + Copyright (C) ABC Company + +**********************************/ \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/copyright_in_COPYING_gpl-COPYING_gpl.gpl b/tests/cluecode/data/copyrights/COPYING_gpl-COPYING_gpl.gpl similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_COPYING_gpl-COPYING_gpl.gpl rename to tests/cluecode/data/copyrights/COPYING_gpl-COPYING_gpl.gpl diff --git a/tests/cluecode/data/copyrights/copyright_in_COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi b/tests/cluecode/data/copyrights/COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi rename to tests/cluecode/data/copyrights/COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi diff --git a/tests/cluecode/data/copyrights/copyright_in_README-README b/tests/cluecode/data/copyrights/README-README similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_README-README rename to tests/cluecode/data/copyrights/README-README diff --git a/tests/cluecode/data/copyrights/copyright_Yocto-SPDX.pdf b/tests/cluecode/data/copyrights/Yocto-SPDX.pdf similarity index 100% rename from tests/cluecode/data/copyrights/copyright_Yocto-SPDX.pdf rename to tests/cluecode/data/copyrights/Yocto-SPDX.pdf diff --git a/tests/cluecode/data/copyrights/abc b/tests/cluecode/data/copyrights/abc new file mode 100644 index 00000000000..e31f8194906 --- /dev/null +++ b/tests/cluecode/data/copyrights/abc @@ -0,0 +1,2 @@ +#!/bin/sh +# Copyright (C) 2006 abc.org diff --git a/tests/cluecode/data/copyrights/abc_loss_of_holder_c-c.c b/tests/cluecode/data/copyrights/abc_loss_of_holder_c-c.c new file mode 100644 index 00000000000..949051f3f20 --- /dev/null +++ b/tests/cluecode/data/copyrights/abc_loss_of_holder_c-c.c @@ -0,0 +1,2 @@ +//copyright abc 2001 +//all rights reserved diff --git a/tests/cluecode/data/copyrights/abiword_common.copyright b/tests/cluecode/data/copyrights/abiword_common.copyright new file mode 100644 index 00000000000..69c723fb618 --- /dev/null +++ b/tests/cluecode/data/copyrights/abiword_common.copyright @@ -0,0 +1,152 @@ +This package was debianized by: + + Masayuki Hatta (mhatta) on Sun, 22 Mar 2009 18:42:01 +0900 + +It was downloaded from: + + http://www.abisource.com/download/ + +Upstream Authors: + + AbiSource, Inc, along with many volunteers + + See AUTHORS for (almost) complete list of contributors. + +Copyright: + + Copyright (C) 1998- AbiSource, Inc. & Co. + +License: + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +On Debian systems, the complete text of the GNU General Public License +version 2 can be found in `/usr/share/common-licenses/GPL-2'. the +complete text of the GNU General Public License version 3 can be found +in `/usr/share/common-licenses/GPL-3'. + +The Debian packaging is: + + Copyright (C) 2009 Masayuki Hatta (mhatta) + Copyright (C) 2009 Patrik Fimml + +and is licensed under the GPL version 3, see above. + + +o About Trademark + +(See also http://www.abisource.com/information/license/tm_guide.phtml) + +Trademark Usage Guidelines + +AbiSource Trademarks + +AbiSource, AbiWord, AbiCalc, AbiFile, AbiSuite, AbiShow and other +AbiSource graphics, logos and service names are trademarks of Dom +Lachowicz. These trademarks may not be used in connection with any +product or service that is not AbiSource's, in any manner that is +likely to cause confusion among customers, or in any manner that +disparages or discredits AbiSource. + +Trademarks and the GPL + +AbiSource software products, such as AbiWord, are copyrighted works +released under the terms of the GNU General Public License +(GPL). Verbatim copies of such works may be made and distributed, by +anyone, in accordance with the terms of the GPL without violating the +AbiSource trademarks. The GPL also grants you certain rights to make +and distribute derivative works based on the source code to AbiSource +products. + +The GPL does not grant you any right to use AbiSource trademarks in +connection with these derivative works. AbiSource trademarks may not +be used in connection with any such derivative works unless that usage +is explicitly and specifically licensed, in writing, from Dom +Lachowicz. + +Personal exemption + +As a specific exception, AbiSource freely licenses the use of certain +of its trademarks solely in combination with the suffix "Personal" +when applied to derivative works based on an AbiSource GPL +product. Thus, for example, you are free to use the mark "AbiWord +Personal" in connection with derivative works that are based on +"AbiWord". To help maintain this distinction, AbiSource releases the +sources to its GPL products with Personal-based trademarks. + +We are not lawyers + +Trademark and copyright issues are, at heart, legal matters. We've +tried to keep this explanation as simple and common-sense as possible, +but if you have any questions about when and how to use AbiSource +trademarks, your best bet is to ask a lawyer. We are not lawyers. + +We are not evil + +Our goal is very simple. We want to make sure our software stays Open +Source, no matter what. That's why we chose the GPL. We also want +everyone to know which products are ours. That's why we are so picky +about our trademarks. + +o "AbiWord" vs. "AbiWord Personal" for Debian? + +Maybe this is informative for now. + +> From: Dom Lachowicz +> Date: 20 July 2004 22:08:34 BST +> To: Andy Korvemaker, abiword-dev@abisource.com +> Subject: Re: Abiword being removed from Debian/unstable? +> +> +> I'm not sure if this is the reason or not, but please +> see: +> +> http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=258918 +> +> For the record, I've recently acquired the AbiWord +> trademarks and whatnot. I haven't had a chance to +> update the TM information on the website. +> +> To be expressly clear here for any Debian guys that +> read this message: +> +> Within reason, I don't care if you use "AbiWord" vs. +> "AbiWord Personal." In fact, I'd prefer it if you used +> "AbiWord." +> +> Within reason, I don't care if you use the "official" +> artwork or the "personal" artwork. In fact, I'd prefer +> it if you used the "official" artwork. +> +> I do begin to care if you use my trademarks to promote +> other products, or in ways that disparage my +> trademarks or products. If you "forked" AbiWord, you +> couldn't use the trademarks. But you're clearly not +> going to do that. The USPTO has more info and case law +> on this sort of thing. +> +> Debian and the other distros are clearly distributing +> AbiWord, and providing a beneficial service to the +> community. Even though Debian's version might have a +> few patches against our "mainline" branch, I don't +> believe it constitutes a "fork." As such, I think that +> it is fine (if not preferable) for you guys to use the +> official name and artwork in your distribution. +> +> So, you have my blessing to call your AbiWord + +> patches "AbiWord". You can use the official artwork +> too. +> +> Dom +> diff --git a/tests/cluecode/data/copyrights/acme_c-c.c b/tests/cluecode/data/copyrights/acme_c-c.c new file mode 100644 index 00000000000..23dbd722d5d --- /dev/null +++ b/tests/cluecode/data/copyrights/acme_c-c.c @@ -0,0 +1 @@ +/* Copyright © 2000 ACME, Inc., All Rights Reserved */ \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/activefieldattribute_cs-ActiveFieldAttribute_cs.cs b/tests/cluecode/data/copyrights/activefieldattribute_cs-ActiveFieldAttribute_cs.cs new file mode 100644 index 00000000000..bcad9a64c59 --- /dev/null +++ b/tests/cluecode/data/copyrights/activefieldattribute_cs-ActiveFieldAttribute_cs.cs @@ -0,0 +1,40 @@ +/* + * Ra-Brix - A Modular-based Framework for building + * Web Applications Copyright 2009 - Thomas Hansen + * thomas@ra-ajax.org. Unless permission is + * explicitly given this code is licensed under the + * GNU Affero GPL version 3 which can be found in the + * license.txt file on disc. + * + */ + +using System; + +namespace Ra.Brix.Data +{ + /** + * Used to mark entity objects as serializable. If a property is + * marked with this attribute then it will be possible to serialise + * that property. Notice that you still need to mark you classes with the + * ActiveRecordAttribute. Also only properties, and not fields and such + * can be marked as serializable with this attribute. + */ + [AttributeUsage(AttributeTargets.Property, AllowMultiple=false)] + public class ActiveFieldAttribute : Attribute + { + /** + * If true then this is a one-to-x relationship which + * means that the type owns this instance and will also delete + * the instance if the object itself is deleted. If it is false + * then this indicate a many-to-x relationship + * and means that the object does NOT own this property and the + * property will NOT be deleted when the object is deleted. + * If it is false then the property will also NOT be saved whenever + * the not owning object is being saved. + * Default value is true - which means that the object will + * be saved when parent object is saved, and also deleted when + * the parent object is being deleted. + */ + public bool IsOwner = true; + } +} diff --git a/tests/cluecode/data/copyrights/copyright_license_text_adaptive_v1_0-Adaptive v.0 b/tests/cluecode/data/copyrights/adaptive_v1_0-Adaptive v.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_adaptive_v1_0-Adaptive v.0 rename to tests/cluecode/data/copyrights/adaptive_v1_0-Adaptive v.0 diff --git a/tests/cluecode/data/copyrights/addr_c-addr_c.c b/tests/cluecode/data/copyrights/addr_c-addr_c.c new file mode 100644 index 00000000000..d3bb4b93fb9 --- /dev/null +++ b/tests/cluecode/data/copyrights/addr_c-addr_c.c @@ -0,0 +1,23 @@ +/**************************************************************/ +/* ADDR.C */ +/* Author: John Doe, 7/2000 */ +/* Copyright 1999 Cornell University. All rights reserved. */ +/* Copyright 2000 Jon Doe. All rights reserved. */ +/* See license.txt for further information. */ +/**************************************************************/ + +#include "string.h" +#include "sys.h" + +tst_id tst_put(tst_id *id) { + tst_id id ; + memcpy(&id, *tst_id,sizeof(id)); + return id ; +} + +tst_id tst_get() { + tst_id id ; + memset(&id, 0, sizeof(id)) ; + return id ; +} + diff --git a/tests/cluecode/data/copyrights/adler_inflate_c-inflate_c.c b/tests/cluecode/data/copyrights/adler_inflate_c-inflate_c.c new file mode 100644 index 00000000000..a12a78b33e0 --- /dev/null +++ b/tests/cluecode/data/copyrights/adler_inflate_c-inflate_c.c @@ -0,0 +1,952 @@ +/* inflate.c -- Not copyrighted 1992 by Mark Adler + version c10p1, 10 January 1993 */ + +/* You can do whatever you like with this source file, though I would + prefer that if you modify it and redistribute it that you include + comments to that effect with your name and the date. Thank you. + [The history has been moved to the file ChangeLog.] + */ + +/* + Inflate deflated (PKZIP's method 8 compressed) data. The compression + method searches for as much of the current string of bytes (up to a + length of 258) in the previous 32K bytes. If it doesn't find any + matches (of at least length 3), it codes the next byte. Otherwise, it + codes the length of the matched string and its distance backwards from + the current position. There is a single Huffman code that codes both + single bytes (called "literals") and match lengths. A second Huffman + code codes the distance information, which follows a length code. Each + length or distance code actually represents a base value and a number + of "extra" (sometimes zero) bits to get to add to the base value. At + the end of each deflated block is a special end-of-block (EOB) literal/ + length code. The decoding process is basically: get a literal/length + code; if EOB then done; if a literal, emit the decoded byte; if a + length then get the distance and emit the referred-to bytes from the + sliding window of previously emitted data. + + There are (currently) three kinds of inflate blocks: stored, fixed, and + dynamic. The compressor deals with some chunk of data at a time, and + decides which method to use on a chunk-by-chunk basis. A chunk might + typically be 32K or 64K. If the chunk is uncompressible, then the + "stored" method is used. In this case, the bytes are simply stored as + is, eight bits per byte, with none of the above coding. The bytes are + preceded by a count, since there is no longer an EOB code. + + If the data is compressible, then either the fixed or dynamic methods + are used. In the dynamic method, the compressed data is preceded by + an encoding of the literal/length and distance Huffman codes that are + to be used to decode this block. The representation is itself Huffman + coded, and so is preceded by a description of that code. These code + descriptions take up a little space, and so for small blocks, there is + a predefined set of codes, called the fixed codes. The fixed method is + used if the block codes up smaller that way (usually for quite small + chunks), otherwise the dynamic method is used. In the latter case, the + codes are customized to the probabilities in the current block, and so + can code it much better than the pre-determined fixed codes. + + The Huffman codes themselves are decoded using a mutli-level table + lookup, in order to maximize the speed of decoding plus the speed of + building the decoding tables. See the comments below that precede the + lbits and dbits tuning parameters. + */ + + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarily, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + +#ifndef lint +static char rcsid[] = "$Id: inflate.c,v 0.10 1993/02/04 13:21:06 jloup Exp $"; +#endif + +#include "tailor.h" +#include "gzip.h" +#define slide window + +#include + +#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H) +# include +# include +#endif + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). + Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 + means that v is a literal, 16 < e < 32 means that v is a pointer to + the next table, which codes e - 16 bits, and lastly e == 99 indicates + an unused code. If a code with e == 99 is looked up, this implies an + error in the data. */ +struct huft { + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ + union { + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ + } v; +}; + + +/* Function prototypes */ +int huft_build OF((unsigned *, unsigned, unsigned, ush *, ush *, + struct huft **, int *)); +int huft_free OF((struct huft *)); +int inflate_codes OF((struct huft *, struct huft *, int, int)); +int inflate_stored OF((void)); +int inflate_fixed OF((void)); +int inflate_dynamic OF((void)); +int inflate_block OF((int *)); +int inflate OF((void)); + + +/* The inflate algorithm uses a sliding 32K byte window on the uncompressed + stream to find repeated byte strings. This is implemented here as a + circular buffer. The index is updated simply by incrementing and then + and'ing with 0x7fff (32K-1). */ +/* It is left to other modules to supply the 32K area. It is assumed + to be usable as if it were declared "uch slide[32768];" or as just + "uch *slide;" and then malloc'ed in the latter case. The definition + must be in unzip.h, included above. */ +/* unsigned wp; current position in slide */ +#define wp outcnt +#define flush_output(w) (wp=(w),flush_window()) + +/* Tables for deflate from PKZIP's appnote.txt. */ +static unsigned border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static ush cplens[] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* note: see note #13 above about the 258 in this list. */ +static ush cplext[] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +static ush cpdext[] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + + + +/* Macros for inflate() bit peeking and grabbing. + The usage is: + + NEEDBITS(j) + x = b & mask_bits[j]; + DUMPBITS(j) + + where NEEDBITS makes sure that b has at least j bits in it, and + DUMPBITS removes the bits from b. The macros use the variable k + for the number of bits in b. Normally, b and k are register + variables for speed, and are initialized at the begining of a + routine that uses these macros from a global bit buffer and count. + + If we assume that EOB will be the longest code, then we will never + ask for bits with NEEDBITS that are beyond the end of the stream. + So, NEEDBITS should not read any more bytes than are needed to + meet the request. Then no bytes need to be "returned" to the buffer + at the end of the last block. + + However, this assumption is not true for fixed blocks--the EOB code + is 7 bits, but the other literal/length codes can be 8 or 9 bits. + (The EOB code is shorter than other codes becuase fixed blocks are + generally short. So, while a block always has an EOB, many other + literal/length codes have a significantly lower probability of + showing up at all.) However, by making the first table have a + lookup of seven bits, the EOB code will be found in that first + lookup, and so will not require that too many bits be pulled from + the stream. + */ + +ulg bb; /* bit buffer */ +unsigned bk; /* bits in bit buffer */ + +ush mask_bits[] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +#ifdef CRYPT + uch cc; +# define NEXTBYTE() \ + (decrypt ? (cc = get_byte(), zdecode(cc), cc) : get_byte()) +#else +# define NEXTBYTE() (uch)get_byte() +#endif +#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} + + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +int lbits = 9; /* bits in base literal/length lookup table */ +int dbits = 6; /* bits in base distance lookup table */ + + +/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ + + +unsigned hufts; /* track memory usage */ + + +int huft_build(b, n, s, d, e, t, m) +unsigned *b; /* code lengths in bits (all assumed <= BMAX) */ +unsigned n; /* number of codes (assumed <= N_MAX) */ +unsigned s; /* number of simple-valued codes (0..s-1) */ +ush *d; /* list of base values for non-simple codes */ +ush *e; /* list of extra bits for non-simple codes */ +struct huft **t; /* result: starting table */ +int *m; /* maximum lookup bits, returns actual */ +/* Given a list of code lengths and a maximum table size, make a set of + tables to decode that set of codes. Return zero on success, one if + the given code set is incomplete (the tables are still built in this + case), two if the input is invalid (all zero length codes or an + oversubscribed set of lengths), and three if not enough memory. */ +{ + unsigned a; /* counter for codes of length k */ + unsigned c[BMAX+1]; /* bit length count table */ + unsigned f; /* i repeats in table every f entries */ + int g; /* maximum code length */ + int h; /* table level */ + register unsigned i; /* counter, current code */ + register unsigned j; /* counter */ + register int k; /* number of bits in current code */ + int l; /* bits per table (returned in m) */ + register unsigned *p; /* pointer into c[], b[], or v[] */ + register struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + struct huft *u[BMAX]; /* table stack */ + unsigned v[N_MAX]; /* values in order of bit length */ + register int w; /* bits before this table == (l * h) */ + unsigned x[BMAX+1]; /* bit offsets, then code stack */ + unsigned *xp; /* pointer into x */ + int y; /* number of dummy codes added */ + unsigned z; /* number of entries in current table */ + + + /* Generate counts for each bit length */ + memzero(c, sizeof(c)); + p = b; i = n; + do { + Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n-i, *p)); + c[*p++]++; /* assume all entries <= BMAX */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (struct huft *)NULL; + *m = 0; + return 0; + } + + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((unsigned)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((unsigned)l > i) + l = i; + *m = l; + + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return 2; /* bad input: more codes than bits */ + if ((y -= c[i]) < 0) + return 2; + c[i] += y; + + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p++); + } + + + /* Make a table of values in order of bit lengths */ + p = b; i = 0; + do { + if ((j = *p++) != 0) + v[x[j]++] = i; + } while (++i < n); + + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { + a = c[k]; + while (a--) + { + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == + (struct huft *)NULL) + { + if (h) + huft_free(u[0]); + return 3; /* not enough memory */ + } + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->v.t)) = (struct huft *)NULL; + u[h] = ++q; /* table starts after link */ + + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } + } + + /* set up table entry in r */ + r.b = (uch)(k - w); + if (p >= v + n) + r.e = 99; /* out of values--invalid code */ + else if (*p < s) + { + r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = *p++; /* simple code is just the value */ + } + else + { + r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ + r.v.n = d[*p++ - s]; + } + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } + } + } + + + /* Return true (1) if we were given an incomplete table */ + return y != 0 && g != 1; +} + + + +int huft_free(t) +struct huft *t; /* table to free */ +/* Free the malloc'ed tables built by huft_build(), which makes a linked + list of the tables it made, with the links in a dummy first entry of + each table. */ +{ + register struct huft *p, *q; + + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + p = t; + while (p != (struct huft *)NULL) + { + q = (--p)->v.t; + free(p); + p = q; + } + return 0; +} + + +int inflate_codes(tl, td, bl, bd) +struct huft *tl, *td; /* literal/length and distance decoder tables */ +int bl, bd; /* number of bits decoded by tl[] and td[] */ +/* inflate (decompress) the codes in a deflated (compressed) block. + Return an error code or zero if it all goes ok. */ +{ + register unsigned e; /* table entry flag/number of extra bits */ + unsigned n, d; /* length and index for copy */ + unsigned w; /* current window position */ + struct huft *t; /* pointer to table entry */ + unsigned ml, md; /* masks for bl and bd bits */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + + /* make local copies of globals */ + b = bb; /* initialize bit buffer */ + k = bk; + w = wp; /* initialize window position */ + + /* inflate the coded data */ + ml = mask_bits[bl]; /* precompute masks for speed */ + md = mask_bits[bd]; + for (;;) /* do until end of block */ + { + NEEDBITS((unsigned)bl) + if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + DUMPBITS(t->b) + if (e == 16) /* then it's a literal */ + { + slide[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", slide[w-1])); + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if (e == 15) + break; + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)bd) + if ((e = (t = td + ((unsigned)b & md))->e) > 16) + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + DUMPBITS(t->b) + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + Tracevv((stderr,"\\[%d,%d]", w-d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if (w - d >= e) /* (this test assumes unsigned comparison) */ + { + memcpy(slide + w, slide + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + do { + slide[w++] = slide[d++]; + Tracevv((stderr, "%c", slide[w-1])); + } while (--e); + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + } while (n); + } + } + + + /* restore the globals from the locals */ + wp = w; /* restore global window pointer */ + bb = b; /* restore global bit buffer */ + bk = k; + + /* done */ + return 0; +} + + + +int inflate_stored() +/* "decompress" an inflated type 0 (stored) block. */ +{ + unsigned n; /* number of bytes in block */ + unsigned w; /* current window position */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + + /* make local copies of globals */ + b = bb; /* initialize bit buffer */ + k = bk; + w = wp; /* initialize window position */ + + + /* go to byte boundary */ + n = k & 7; + DUMPBITS(n); + + + /* get the length and its complement */ + NEEDBITS(16) + n = ((unsigned)b & 0xffff); + DUMPBITS(16) + NEEDBITS(16) + if (n != (unsigned)((~b) & 0xffff)) + return 1; /* error in compressed data */ + DUMPBITS(16) + + + /* read and output the compressed data */ + while (n--) + { + NEEDBITS(8) + slide[w++] = (uch)b; + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + DUMPBITS(8) + } + + + /* restore the globals from the locals */ + wp = w; /* restore global window pointer */ + bb = b; /* restore global bit buffer */ + bk = k; + return 0; +} + + + +int inflate_fixed() +/* decompress an inflated type 1 (fixed Huffman codes) block. We should + either replace this with a custom decoder, or at least precompute the + Huffman tables. */ +{ + int i; /* temporary variable */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned l[288]; /* length list for huft_build */ + + + /* set up literal table */ + for (i = 0; i < 144; i++) + l[i] = 8; + for (; i < 256; i++) + l[i] = 9; + for (; i < 280; i++) + l[i] = 7; + for (; i < 288; i++) /* make a complete, but wrong code set */ + l[i] = 8; + bl = 7; + if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) + return i; + + + /* set up distance table */ + for (i = 0; i < 30; i++) /* make an incomplete code set */ + l[i] = 5; + bd = 5; + if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1) + { + huft_free(tl); + return i; + } + + + /* decompress until an end-of-block code */ + if (inflate_codes(tl, td, bl, bd)) + return 1; + + + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); + return 0; +} + + + +int inflate_dynamic() +/* decompress an inflated type 2 (dynamic Huffman codes) block. */ +{ + int i; /* temporary variables */ + unsigned j; + unsigned l; /* last length */ + unsigned m; /* mask for bit lengths table */ + unsigned n; /* number of lengths to get */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned nb; /* number of bit length codes */ + unsigned nl; /* number of literal/length codes */ + unsigned nd; /* number of distance codes */ +#ifdef PKZIP_BUG_WORKAROUND + unsigned ll[288+32]; /* literal/length and distance code lengths */ +#else + unsigned ll[286+30]; /* literal/length and distance code lengths */ +#endif + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + + /* make local bit buffer */ + b = bb; + k = bk; + + + /* read in table lengths */ + NEEDBITS(5) + nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */ + DUMPBITS(5) + NEEDBITS(5) + nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */ + DUMPBITS(5) + NEEDBITS(4) + nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */ + DUMPBITS(4) +#ifdef PKZIP_BUG_WORKAROUND + if (nl > 288 || nd > 32) +#else + if (nl > 286 || nd > 30) +#endif + return 1; /* bad lengths */ + + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) + { + NEEDBITS(3) + ll[border[j]] = (unsigned)b & 7; + DUMPBITS(3) + } + for (; j < 19; j++) + ll[border[j]] = 0; + + + /* build decoding table for trees--single level, 7 bit lookup */ + bl = 7; + if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) + { + if (i == 1) + huft_free(tl); + return i; /* incomplete code set */ + } + + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[bl]; + i = l = 0; + while ((unsigned)i < n) + { + NEEDBITS((unsigned)bl) + j = (td = tl + ((unsigned)b & m))->b; + DUMPBITS(j) + j = td->v.n; + if (j < 16) /* length of code in bits (0..15) */ + ll[i++] = l = j; /* save last length in l */ + else if (j == 16) /* repeat last length 3 to 6 times */ + { + NEEDBITS(2) + j = 3 + ((unsigned)b & 3); + DUMPBITS(2) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = l; + } + else if (j == 17) /* 3 to 10 zero length codes */ + { + NEEDBITS(3) + j = 3 + ((unsigned)b & 7); + DUMPBITS(3) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = 0; + l = 0; + } + else /* j == 18: 11 to 138 zero length codes */ + { + NEEDBITS(7) + j = 11 + ((unsigned)b & 0x7f); + DUMPBITS(7) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = 0; + l = 0; + } + } + + + /* free decoding table for trees */ + huft_free(tl); + + + /* restore the global bit buffer */ + bb = b; + bk = k; + + + /* build the decoding tables for literal/length and distance codes */ + bl = lbits; + if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) + { + if (i == 1) { + fprintf(stderr, " incomplete literal tree\n"); + huft_free(tl); + } + return i; /* incomplete code set */ + } + bd = dbits; + if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) + { + if (i == 1) { + fprintf(stderr, " incomplete distance tree\n"); +#ifdef PKZIP_BUG_WORKAROUND + i = 0; + } +#else + huft_free(td); + } + huft_free(tl); + return i; /* incomplete code set */ +#endif + } + + + /* decompress until an end-of-block code */ + if (inflate_codes(tl, td, bl, bd)) + return 1; + + + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); + return 0; +} + + + +int inflate_block(e) +int *e; /* last block flag */ +/* decompress an inflated block */ +{ + unsigned t; /* block type */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + + /* make local bit buffer */ + b = bb; + k = bk; + + + /* read in last block bit */ + NEEDBITS(1) + *e = (int)b & 1; + DUMPBITS(1) + + + /* read in block type */ + NEEDBITS(2) + t = (unsigned)b & 3; + DUMPBITS(2) + + + /* restore the global bit buffer */ + bb = b; + bk = k; + + + /* inflate that block type */ + if (t == 2) + return inflate_dynamic(); + if (t == 0) + return inflate_stored(); + if (t == 1) + return inflate_fixed(); + + + /* bad block type */ + return 2; +} + + + +int inflate() +/* decompress an inflated entry */ +{ + int e; /* last block flag */ + int r; /* result code */ + unsigned h; /* maximum struct huft's malloc'ed */ + + + /* initialize window, bit buffer */ + wp = 0; + bk = 0; + bb = 0; + + + /* decompress until the last block */ + h = 0; + do { + hufts = 0; + if ((r = inflate_block(&e)) != 0) + return r; + if (hufts > h) + h = hufts; + } while (!e); + + /* Undo too much lookahead. The next read will be byte aligned so we + * can discard unused bits in the last meaningful byte. + */ + while (bk >= 8) { + bk -= 8; + inptr--; + } + + /* flush out slide */ + flush_output(wp); + + + /* return success */ +#ifdef DEBUG + fprintf(stderr, "<%u> ", h); +#endif /* DEBUG */ + return 0; +} diff --git a/tests/cluecode/data/copyrights/copyright_license_text_adobe-Adobe b/tests/cluecode/data/copyrights/adobe-Adobe similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_adobe-Adobe rename to tests/cluecode/data/copyrights/adobe-Adobe diff --git a/tests/cluecode/data/copyrights/copyright_adobe_flashplugin_copyright_label-adobe_flashplugin_copyright_label.label b/tests/cluecode/data/copyrights/adobe_flashplugin-adobe_flashplugin.label similarity index 100% rename from tests/cluecode/data/copyrights/copyright_adobe_flashplugin_copyright_label-adobe_flashplugin_copyright_label.label rename to tests/cluecode/data/copyrights/adobe_flashplugin-adobe_flashplugin.label diff --git a/tests/cluecode/data/copyrights/copyright_license_text_adobeflex2sdk-Adobeflex_sdk b/tests/cluecode/data/copyrights/adobeflex2sdk-Adobeflex_sdk similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_adobeflex2sdk-Adobeflex_sdk rename to tests/cluecode/data/copyrights/adobeflex2sdk-Adobeflex_sdk diff --git a/tests/cluecode/data/copyrights/copyright_license_text_afferogplv1-AfferoGPLv b/tests/cluecode/data/copyrights/afferogplv1-AfferoGPLv similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_afferogplv1-AfferoGPLv rename to tests/cluecode/data/copyrights/afferogplv1-AfferoGPLv diff --git a/tests/cluecode/data/copyrights/copyright_license_text_afferogplv2-AfferoGPLv b/tests/cluecode/data/copyrights/afferogplv2-AfferoGPLv similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_afferogplv2-AfferoGPLv rename to tests/cluecode/data/copyrights/afferogplv2-AfferoGPLv diff --git a/tests/cluecode/data/copyrights/copyright_license_text_afferogplv3-AfferoGPLv b/tests/cluecode/data/copyrights/afferogplv3-AfferoGPLv similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_afferogplv3-AfferoGPLv rename to tests/cluecode/data/copyrights/afferogplv3-AfferoGPLv diff --git a/tests/cluecode/data/copyrights/copyright_license_text_afl_v3_0-AFL_v.0 b/tests/cluecode/data/copyrights/afl_v3_0-AFL_v.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_afl_v3_0-AFL_v.0 rename to tests/cluecode/data/copyrights/afl_v3_0-AFL_v.0 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_aladdin_free_public_license-Aladdin Free Public License b/tests/cluecode/data/copyrights/aladdin_free_public_license-Aladdin Free Public License similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_aladdin_free_public_license-Aladdin Free Public License rename to tests/cluecode/data/copyrights/aladdin_free_public_license-Aladdin Free Public License diff --git a/tests/cluecode/data/copyrights/aleal-c.c b/tests/cluecode/data/copyrights/aleal-c.c new file mode 100644 index 00000000000..75d73228561 --- /dev/null +++ b/tests/cluecode/data/copyrights/aleal-c.c @@ -0,0 +1,3 @@ +/** +* copyright : (C) 2006 by aleal +*/ \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/copyright_license_text_amazondsb-AmazonDSb b/tests/cluecode/data/copyrights/amazondsb-AmazonDSb similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_amazondsb-AmazonDSb rename to tests/cluecode/data/copyrights/amazondsb-AmazonDSb diff --git a/tests/cluecode/data/copyrights/copyright_license_text_ampasbsd-AMPASBSD b/tests/cluecode/data/copyrights/ampasbsd-AMPASBSD similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_ampasbsd-AMPASBSD rename to tests/cluecode/data/copyrights/ampasbsd-AMPASBSD diff --git a/tests/cluecode/data/copyrights/andre_darcy-c.c b/tests/cluecode/data/copyrights/andre_darcy-c.c new file mode 100644 index 00000000000..383e586c5dd --- /dev/null +++ b/tests/cluecode/data/copyrights/andre_darcy-c.c @@ -0,0 +1,28 @@ +/* + * $Id: vtmodule.c 33125 2009-07-16 20:58:26Z dbochkov $ + * PyGres, version 2.2 A Python interface for PostgreSQL database. Written by + * D'Arcy J.M. Cain, (darcy@druid.net). Based heavily on code written by + * Pascal Andre, andre@chimay.via.ecp.fr. Copyright (c) 1995, Pascal Andre + * (andre@via.ecp.fr). + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written + * agreement is hereby granted, provided that the above copyright notice and + * this paragraph and the following two paragraphs appear in all copies or in + * any new file that contains a substantial portion of this file. + * + * IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, + * SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE + * AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE + * AUTHOR HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, + * ENHANCEMENTS, OR MODIFICATIONS. + * + * Further modifications copyright 1997, 1998, 1999 by D'Arcy J.M. Cain + * (darcy@druid.net) subject to the same terms and conditions as above. + * + */ \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/copyright_colin_android-bsdiff_c.c b/tests/cluecode/data/copyrights/android_c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_colin_android-bsdiff_c.c rename to tests/cluecode/data/copyrights/android_c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_apache2_debian_trailing_name_missed-apache_copyright_label.label b/tests/cluecode/data/copyrights/apache2_debian_trailing_name_missed-apache.label similarity index 100% rename from tests/cluecode/data/copyrights/copyright_apache2_debian_trailing_name_missed-apache_copyright_label.label rename to tests/cluecode/data/copyrights/apache2_debian_trailing_name_missed-apache.label diff --git a/tests/cluecode/data/copyrights/copyright_apache_in_html.html b/tests/cluecode/data/copyrights/apache_in_html.html similarity index 100% rename from tests/cluecode/data/copyrights/copyright_apache_in_html.html rename to tests/cluecode/data/copyrights/apache_in_html.html diff --git a/tests/cluecode/data/copyrights/apache_notice-NOTICE b/tests/cluecode/data/copyrights/apache_notice-NOTICE new file mode 100644 index 00000000000..e820f3230fe --- /dev/null +++ b/tests/cluecode/data/copyrights/apache_notice-NOTICE @@ -0,0 +1,35 @@ + ========================================================================= + == NOTICE file corresponding to section 4(d) of the Apache License, == + == Version 2.0, in this case for the Apache Xalan Java distribution. == + ========================================================================= + + Apache Xalan (Xalan serializer) + Copyright 1999-2006 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + ========================================================================= + + + ========================================================================= + Apache Xerces Java + Copyright 1999-2006 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of Apache Xerces Java in xercesImpl.jar and xml-apis.jar + + ========================================================================= + Apache xml-commons xml-apis (redistribution of xml-apis.jar) + + Apache XML Commons + Copyright 2001-2003,2006 The Apache Software Foundation. + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were originally based on the following: + - software copyright (c) 2000 World Wide Web Consortium, http://www.w3.org + diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apachev1_0-Apachev.0 b/tests/cluecode/data/copyrights/apachev1_0-Apachev.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apachev1_0-Apachev.0 rename to tests/cluecode/data/copyrights/apachev1_0-Apachev.0 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apachev1_1-Apachev.1 b/tests/cluecode/data/copyrights/apachev1_1-Apachev.1 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apachev1_1-Apachev.1 rename to tests/cluecode/data/copyrights/apachev1_1-Apachev.1 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apachev2_0b-Apachev_b.0b b/tests/cluecode/data/copyrights/apachev2_0b-Apachev_b.0b similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apachev2_0b-Apachev_b.0b rename to tests/cluecode/data/copyrights/apachev2_0b-Apachev_b.0b diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apple_common_documentation_license_v1_0-Apple Common Documentation License v.0 b/tests/cluecode/data/copyrights/apple_common_documentation_license_v1_0-Apple Common Documentation License v.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apple_common_documentation_license_v1_0-Apple Common Documentation License v.0 rename to tests/cluecode/data/copyrights/apple_common_documentation_license_v1_0-Apple Common Documentation License v.0 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apple_public_source_license_v1_0-Apple Public Source License v.0 b/tests/cluecode/data/copyrights/apple_public_source_license_v1_0-Apple Public Source License v.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apple_public_source_license_v1_0-Apple Public Source License v.0 rename to tests/cluecode/data/copyrights/apple_public_source_license_v1_0-Apple Public Source License v.0 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apple_public_source_license_v1_1-Apple Public Source License v.1 b/tests/cluecode/data/copyrights/apple_public_source_license_v1_1-Apple Public Source License v.1 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apple_public_source_license_v1_1-Apple Public Source License v.1 rename to tests/cluecode/data/copyrights/apple_public_source_license_v1_1-Apple Public Source License v.1 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apple_public_source_license_v1_2-Apple Public Source License v.2 b/tests/cluecode/data/copyrights/apple_public_source_license_v1_2-Apple Public Source License v.2 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apple_public_source_license_v1_2-Apple Public Source License v.2 rename to tests/cluecode/data/copyrights/apple_public_source_license_v1_2-Apple Public Source License v.2 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_apslv2_0-APSLv.0 b/tests/cluecode/data/copyrights/apslv2_0-APSLv.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_apslv2_0-APSLv.0 rename to tests/cluecode/data/copyrights/apslv2_0-APSLv.0 diff --git a/tests/cluecode/data/copyrights/aptitude-aptitude.label b/tests/cluecode/data/copyrights/aptitude-aptitude.label new file mode 100644 index 00000000000..228714bc833 --- /dev/null +++ b/tests/cluecode/data/copyrights/aptitude-aptitude.label @@ -0,0 +1,6 @@ +Copyright 1999-2005 Daniel Burrows + +The upstream web site for aptitude is +http://people.debian.org/~dburrows/aptitude . + +License: GPL (/usr/share/common-licenses/GPL) diff --git a/tests/cluecode/data/copyrights/copyright_license_text_artistic_v1_0-Artistic v.0 b/tests/cluecode/data/copyrights/artistic_v1_0-Artistic v.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_artistic_v1_0-Artistic v.0 rename to tests/cluecode/data/copyrights/artistic_v1_0-Artistic v.0 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_artistic_v1_0_short-Artistic v_ short.0 short b/tests/cluecode/data/copyrights/artistic_v1_0_short-Artistic v_ short.0 short similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_artistic_v1_0_short-Artistic v_ short.0 short rename to tests/cluecode/data/copyrights/artistic_v1_0_short-Artistic v_ short.0 short diff --git a/tests/cluecode/data/copyrights/copyright_license_text_artistic_v2_0beta4-Artistic v_beta.0beta4 b/tests/cluecode/data/copyrights/artistic_v2_0beta4-Artistic v_beta.0beta4 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_artistic_v2_0beta4-Artistic v_beta.0beta4 rename to tests/cluecode/data/copyrights/artistic_v2_0beta4-Artistic v_beta.0beta4 diff --git a/tests/cluecode/data/copyrights/copyright_license_text_artisticv2_0-Artisticv.0 b/tests/cluecode/data/copyrights/artisticv2_0-Artisticv.0 similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_artisticv2_0-Artisticv.0 rename to tests/cluecode/data/copyrights/artisticv2_0-Artisticv.0 diff --git a/tests/cluecode/data/copyrights/atheros_spanning_lines-py.py b/tests/cluecode/data/copyrights/atheros_spanning_lines-py.py new file mode 100644 index 00000000000..1298b24d32b --- /dev/null +++ b/tests/cluecode/data/copyrights/atheros_spanning_lines-py.py @@ -0,0 +1,16 @@ +# /***************************************************************************\ +# ** Copyright © 2000 Atheros Communications, Inc., All Rights Reserved ** +# ** Copyright © 2001 Atheros Communications, Inc., All Rights Reserved ** +# ** ** +# ** Atheros and the Atheros logo and design are trademarks of Atheros ** +# ** Communications, Inc. ** +# ** ** +# ** Sample Code from Microsoft Windows 2000 Driver Development Kit is ** +# ** used under license from Microsoft Corporation and was developed for ** +# ** Microsoft by Intel Corp., Hillsboro, Oregon: Copyright (c) 1994-1997 ** +# ** by Intel Corporation. ** +# ** ** +# ** $Id$ ** +# \**************************************************************************/ +# +# #ifndef _PCI_H diff --git a/tests/cluecode/data/copyrights/att_in_c-9_c.c b/tests/cluecode/data/copyrights/att_in_c-9_c.c new file mode 100644 index 00000000000..f475109bc3d --- /dev/null +++ b/tests/cluecode/data/copyrights/att_in_c-9_c.c @@ -0,0 +1,18 @@ +/**************************************************************** + * + * The author of this software is David M. Gay. + * + * Copyright (c) 1991 by AT&T. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose without fee is hereby granted, provided that this entire notice + * is included in all copies of any software which is or includes a copy + * or modification of this software and in all copies of the supporting + * documentation for such software. + * + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR AT&T MAKES ANY + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY + * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. + * + ***************************************************************/ \ No newline at end of file diff --git a/tests/cluecode/data/copyrights/copyright_license_text_attributionassurancelicense-AttributionAssuranceLicense b/tests/cluecode/data/copyrights/attributionassurancelicense-AttributionAssuranceLicense similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_attributionassurancelicense-AttributionAssuranceLicense rename to tests/cluecode/data/copyrights/attributionassurancelicense-AttributionAssuranceLicense diff --git a/tests/cluecode/data/copyrights/audio_c-c.c b/tests/cluecode/data/copyrights/audio_c-c.c new file mode 100644 index 00000000000..014fbcd080d --- /dev/null +++ b/tests/cluecode/data/copyrights/audio_c-c.c @@ -0,0 +1,5 @@ +/* + ITU-T G.723 Speech Coder ANSI-C Source Code Version 5.00 + copyright (c) 1995, AudioCodes, DSP Group, France Telecom, + Universite de Sherbrooke. All rights reserved. +*/ diff --git a/tests/cluecode/data/copyrights/babkin_txt.txt b/tests/cluecode/data/copyrights/babkin_txt.txt new file mode 100644 index 00000000000..b75b9d7dcd2 --- /dev/null +++ b/tests/cluecode/data/copyrights/babkin_txt.txt @@ -0,0 +1,5 @@ +Copyright (c) North +Copyright (c) South +Copyright (c) 2134 abc +Copyright (c) 2001 by the TTF2PT1 project +Copyright (c) 2001 by Sergey Babkin diff --git a/tests/cluecode/data/copyrights/copyright_in_bash-shell_sh.sh b/tests/cluecode/data/copyrights/bash-shell_sh.sh similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_bash-shell_sh.sh rename to tests/cluecode/data/copyrights/bash-shell_sh.sh diff --git a/tests/cluecode/data/copyrights/copyright_license_text_bigelow_holmes-Bigelow&Holmes b/tests/cluecode/data/copyrights/bigelow_holmes-Bigelow&Holmes similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_bigelow_holmes-Bigelow&Holmes rename to tests/cluecode/data/copyrights/bigelow_holmes-Bigelow&Holmes diff --git a/tests/cluecode/data/copyrights/copyright_in_binary_lib-php_embed_lib.lib b/tests/cluecode/data/copyrights/binary_lib-php_embed_lib.lib similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_binary_lib-php_embed_lib.lib rename to tests/cluecode/data/copyrights/binary_lib-php_embed_lib.lib diff --git a/tests/cluecode/data/copyrights/copyright_license_text_bitstream-Bi_ream b/tests/cluecode/data/copyrights/bitstream-Bi_ream similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_bitstream-Bi_ream rename to tests/cluecode/data/copyrights/bitstream-Bi_ream diff --git a/tests/cluecode/data/copyrights/blender_debian-blender.copyright b/tests/cluecode/data/copyrights/blender_debian-blender.copyright new file mode 100644 index 00000000000..9a3f99b71df --- /dev/null +++ b/tests/cluecode/data/copyrights/blender_debian-blender.copyright @@ -0,0 +1,57 @@ +Format-Specification: http://wiki.debian.org/Proposals/CopyrightFormat +Upstream-Author: Blender Foundation +Debianized-By: Masayuki Hatta (mhatta) +Debianized-Date: Mon, 3 May 2004 15:16:26 +0900 +Original-Source-Location: http://download.blender.org/source/ + + +Files: * +Copyright: © 2002-2008 Blender Foundation +License: GPL-2+ + | This program is free software; you can redistribute it and/or + | modify it under the terms of the GNU General Public License + | as published by the Free Software Foundation; either version 2 + | of the License, or (at your option) any later version. + | + | This program is distributed in the hope that it will be useful, + | but WITHOUT ANY WARRANTY; without even the implied warranty of + | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + | GNU General Public License for more details. + | + | You should have received a copy of the GNU General Public License along + | with this program; if not, write to the Free Software Foundation, Inc., + | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + | + | + | On Debian systems, the complete text of the GNU General Public License + | version 2 can be found in “/usr/share/common-licenses/GPL-2”. + + +Files: debian/* +Copyright: © 2004-2005 Masayuki Hatta + © 2005-2007 Florian Ernst + © 2007-2008 Cyril Brulebois +License: GPL-2+ + | This program is free software; you can redistribute it and/or + | modify it under the terms of the GNU General Public License + | as published by the Free Software Foundation; either version 2 + | of the License, or (at your option) any later version. + | + | This program is distributed in the hope that it will be useful, + | but WITHOUT ANY WARRANTY; without even the implied warranty of + | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + | GNU General Public License for more details. + | + | You should have received a copy of the GNU General Public License along + | with this program; if not, write to the Free Software Foundation, Inc., + | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + | + | + | On Debian systems, the complete text of the GNU General Public License + | version 2 can be found in “/usr/share/common-licenses/GPL-2”. + + + +Files: extern/{bFTGL,ffmpeg,libmp3lame,libopenjpeg,xvidcore,x264} +Removed since they are embedded code copies of software available in +main or software not acceptable in main. diff --git a/tests/cluecode/data/copyrights/copyright_blue_sky_dash_in_name-c.c b/tests/cluecode/data/copyrights/blue_sky_dash_in_name-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_blue_sky_dash_in_name-c.c rename to tests/cluecode/data/copyrights/blue_sky_dash_in_name-c.c diff --git a/tests/cluecode/data/copyrights/copyright_bouncy_license-LICENSE b/tests/cluecode/data/copyrights/bouncy_license-LICENSE similarity index 100% rename from tests/cluecode/data/copyrights/copyright_bouncy_license-LICENSE rename to tests/cluecode/data/copyrights/bouncy_license-LICENSE diff --git a/tests/cluecode/data/copyrights/copyright_bouncy_notice-9_NOTICE b/tests/cluecode/data/copyrights/bouncy_notice-9_NOTICE similarity index 100% rename from tests/cluecode/data/copyrights/copyright_bouncy_notice-9_NOTICE rename to tests/cluecode/data/copyrights/bouncy_notice-9_NOTICE diff --git a/tests/cluecode/data/copyrights/copyright_license_text_bsdnrl-BSDNRL b/tests/cluecode/data/copyrights/bsdnrl-BSDNRL similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_bsdnrl-BSDNRL rename to tests/cluecode/data/copyrights/bsdnrl-BSDNRL diff --git a/tests/cluecode/data/copyrights/copyright_btt_plot1_py-btt_plot_py.py b/tests/cluecode/data/copyrights/btt_plot1_py-btt_plot_py.py similarity index 100% rename from tests/cluecode/data/copyrights/copyright_btt_plot1_py-btt_plot_py.py rename to tests/cluecode/data/copyrights/btt_plot1_py-btt_plot_py.py diff --git a/tests/cluecode/data/copyrights/copyright_in_c-c.c b/tests/cluecode/data/copyrights/c-c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_c-c.c rename to tests/cluecode/data/copyrights/c-c.c diff --git a/tests/cluecode/data/copyrights/copyright_in_c_include-h.h b/tests/cluecode/data/copyrights/c_include-h.h similarity index 100% rename from tests/cluecode/data/copyrights/copyright_in_c_include-h.h rename to tests/cluecode/data/copyrights/c_include-h.h diff --git a/tests/cluecode/data/copyrights/copyright_copyright_camelcase_br_diagnostics_h-br_diagnostics_h.h b/tests/cluecode/data/copyrights/camelcase_br_diagnostics_h-br_diagnostics_h.h similarity index 100% rename from tests/cluecode/data/copyrights/copyright_copyright_camelcase_br_diagnostics_h-br_diagnostics_h.h rename to tests/cluecode/data/copyrights/camelcase_br_diagnostics_h-br_diagnostics_h.h diff --git a/tests/cluecode/data/copyrights/copyright_camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c b/tests/cluecode/data/copyrights/camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c similarity index 100% rename from tests/cluecode/data/copyrights/copyright_camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c rename to tests/cluecode/data/copyrights/camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c diff --git a/tests/cluecode/data/copyrights/copyright_ccube_txt.txt b/tests/cluecode/data/copyrights/ccube_txt.txt similarity index 100% rename from tests/cluecode/data/copyrights/copyright_ccube_txt.txt rename to tests/cluecode/data/copyrights/ccube_txt.txt diff --git a/tests/cluecode/data/copyrights/copyright_cedrik_java-java.java b/tests/cluecode/data/copyrights/cedrik_java-java.java similarity index 100% rename from tests/cluecode/data/copyrights/copyright_cedrik_java-java.java rename to tests/cluecode/data/copyrights/cedrik_java-java.java diff --git a/tests/cluecode/data/copyrights/copyright_cern-TestMatrix_D_java.java b/tests/cluecode/data/copyrights/cern-TestMatrix_D_java.java similarity index 100% rename from tests/cluecode/data/copyrights/copyright_cern-TestMatrix_D_java.java rename to tests/cluecode/data/copyrights/cern-TestMatrix_D_java.java diff --git a/tests/cluecode/data/copyrights/copyright_cern_matrix2d_java-TestMatrix_D_java.java b/tests/cluecode/data/copyrights/cern_matrix2d_java-TestMatrix_D_java.java similarity index 100% rename from tests/cluecode/data/copyrights/copyright_cern_matrix2d_java-TestMatrix_D_java.java rename to tests/cluecode/data/copyrights/cern_matrix2d_java-TestMatrix_D_java.java diff --git a/tests/cluecode/data/copyrights/copyright_chameleon_assembly-9_9_setjmp_S.S b/tests/cluecode/data/copyrights/chameleon_assembly-9_9_setjmp_S.S similarity index 100% rename from tests/cluecode/data/copyrights/copyright_chameleon_assembly-9_9_setjmp_S.S rename to tests/cluecode/data/copyrights/chameleon_assembly-9_9_setjmp_S.S diff --git a/tests/cluecode/data/copyrights/copyright_license_text_cnri-CNRI b/tests/cluecode/data/copyrights/cnri-CNRI similarity index 100% rename from tests/cluecode/data/copyrights/copyright_license_text_cnri-CNRI rename to tests/cluecode/data/copyrights/cnri-CNRI diff --git a/tests/cluecode/data/copyrights/copyright_co_cust-copyright_java.java b/tests/cluecode/data/copyrights/co_cust-java.java similarity index 100% rename from tests/cluecode/data/copyrights/copyright_co_cust-copyright_java.java rename to tests/cluecode/data/copyrights/co_cust-java.java diff --git a/tests/cluecode/data/copyrights/colin_android-bsdiff_c.c b/tests/cluecode/data/copyrights/colin_android-bsdiff_c.c new file mode 100644 index 00000000000..b6d342b7a8e --- /dev/null +++ b/tests/cluecode/data/copyrights/colin_android-bsdiff_c.c @@ -0,0 +1,410 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Most of this code comes from bsdiff.c from the bsdiff-4.3 + * distribution, which is: + */ + +/*- + * Copyright 2003-2005 Colin Percival + * All rights reserved + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted providing that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define MIN(x,y) (((x)<(y)) ? (x) : (y)) + +static void split(off_t *I,off_t *V,off_t start,off_t len,off_t h) +{ + off_t i,j,k,x,tmp,jj,kk; + + if(len<16) { + for(k=start;kstart) split(I,V,start,jj-start,h); + + for(i=0;ikk) split(I,V,kk,start+len-kk,h); +} + +static void qsufsort(off_t *I,off_t *V,u_char *old,off_t oldsize) +{ + off_t buckets[256]; + off_t i,h,len; + + for(i=0;i<256;i++) buckets[i]=0; + for(i=0;i0;i--) buckets[i]=buckets[i-1]; + buckets[0]=0; + + for(i=0;iy) { + *pos=I[st]; + return x; + } else { + *pos=I[en]; + return y; + } + }; + + x=st+(en-st)/2; + if(memcmp(old+I[x],new,MIN(oldsize-I[x],newsize))<0) { + return search(I,old,oldsize,new,newsize,x,en,pos); + } else { + return search(I,old,oldsize,new,newsize,st,x,pos); + }; +} + +static void offtout(off_t x,u_char *buf) +{ + off_t y; + + if(x<0) y=-x; else y=x; + + buf[0]=y%256;y-=buf[0]; + y=y/256;buf[1]=y%256;y-=buf[1]; + y=y/256;buf[2]=y%256;y-=buf[2]; + y=y/256;buf[3]=y%256;y-=buf[3]; + y=y/256;buf[4]=y%256;y-=buf[4]; + y=y/256;buf[5]=y%256;y-=buf[5]; + y=y/256;buf[6]=y%256;y-=buf[6]; + y=y/256;buf[7]=y%256; + + if(x<0) buf[7]|=0x80; +} + +// This is main() from bsdiff.c, with the following changes: +// +// - old, oldsize, new, newsize are arguments; we don't load this +// data from files. old and new are owned by the caller; we +// don't free them at the end. +// +// - the "I" block of memory is owned by the caller, who passes a +// pointer to *I, which can be NULL. This way if we call +// bsdiff() multiple times with the same 'old' data, we only do +// the qsufsort() step the first time. +// +int bsdiff(u_char* old, off_t oldsize, off_t** IP, u_char* new, off_t newsize, + const char* patch_filename) +{ + int fd; + off_t *I; + off_t scan,pos,len; + off_t lastscan,lastpos,lastoffset; + off_t oldscore,scsc; + off_t s,Sf,lenf,Sb,lenb; + off_t overlap,Ss,lens; + off_t i; + off_t dblen,eblen; + u_char *db,*eb; + u_char buf[8]; + u_char header[32]; + FILE * pf; + BZFILE * pfbz2; + int bz2err; + + if (*IP == NULL) { + off_t* V; + *IP = malloc((oldsize+1) * sizeof(off_t)); + V = malloc((oldsize+1) * sizeof(off_t)); + qsufsort(*IP, V, old, oldsize); + free(V); + } + I = *IP; + + if(((db=malloc(newsize+1))==NULL) || + ((eb=malloc(newsize+1))==NULL)) err(1,NULL); + dblen=0; + eblen=0; + + /* Create the patch file */ + if ((pf = fopen(patch_filename, "w")) == NULL) + err(1, "%s", patch_filename); + + /* Header is + 0 8 "BSDIFF40" + 8 8 length of bzip2ed ctrl block + 16 8 length of bzip2ed diff block + 24 8 length of new file */ + /* File is + 0 32 Header + 32 ?? Bzip2ed ctrl block + ?? ?? Bzip2ed diff block + ?? ?? Bzip2ed extra block */ + memcpy(header,"BSDIFF40",8); + offtout(0, header + 8); + offtout(0, header + 16); + offtout(newsize, header + 24); + if (fwrite(header, 32, 1, pf) != 1) + err(1, "fwrite(%s)", patch_filename); + + /* Compute the differences, writing ctrl as we go */ + if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL) + errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err); + scan=0;len=0; + lastscan=0;lastpos=0;lastoffset=0; + while(scanoldscore+8)) break; + + if((scan+lastoffsetSf*2-lenf) { Sf=s; lenf=i; }; + }; + + lenb=0; + if(scan=lastscan+i)&&(pos>=i);i++) { + if(old[pos-i]==new[scan-i]) s++; + if(s*2-i>Sb*2-lenb) { Sb=s; lenb=i; }; + }; + }; + + if(lastscan+lenf>scan-lenb) { + overlap=(lastscan+lenf)-(scan-lenb); + s=0;Ss=0;lens=0; + for(i=0;iSs) { Ss=s; lens=i+1; }; + }; + + lenf+=lens-overlap; + lenb-=lens; + }; + + for(i=0;i', + # 'Nathan Mittler ', ] check_detection(expected, test_file, what='authors') diff --git a/tests/cluecode/test_copyrights.py b/tests/cluecode/test_copyrights.py index 26a0dbc9bc5..9f7bdca7700 100644 --- a/tests/cluecode/test_copyrights.py +++ b/tests/cluecode/test_copyrights.py @@ -118,7 +118,7 @@ def test_is_candidate_should_not_select_line_with_only_two_digit_numbers(self): line = 'template struct v_iter > { typedef typename V::item10 type; typedef v_iter > next; };' assert not cluecode.copyrights.is_candidate(line) - def test_is_candidate_should_select_line_with_copyright_sign(self): + def test_is_candidate_should_select_line_with_sign(self): line = 'template struct v_iter (c) { typedef typename V::item10 type; typedef v_iter > next; };' assert cluecode.copyrights.is_candidate(line) @@ -149,8 +149,8 @@ def test_is_candidate_should_select_line_with_iso_date_year(self): class TestCopyrightDetector(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') - def test_copyright_detect(self): - location = self.get_test_loc('copyrights/copyright_essential_smoke-ibm_c.c') + def test_detect(self): + location = self.get_test_loc('copyrights/essential_smoke-ibm_c.c') expected = [ 'Copyright IBM and others (c) 2008', 'Copyright Eclipse, IBM and others (c) 2008' @@ -169,8 +169,8 @@ def test_company_name_in_java(self): ] check_detection(expected, test_file) - def test_copyright_03e16f6c_0(self): - test_file = self.get_test_loc('copyrights/copyright_03e16f6c_0-e_f_c.0') + def test_03e16f6c_0(self): + test_file = self.get_test_loc('copyrights/03e16f6c_0-e_f_c.0') expected = [ 'Copyright (c) 1997 Microsoft Corp.', 'Copyright (c) 1997 Microsoft Corp.', @@ -180,87 +180,87 @@ def test_copyright_03e16f6c_0(self): expected_in_results=True, results_in_expected=False) - def test_copyright_3a3b02ce_0(self): + def test_3a3b02ce_0(self): # this is a certificate and the actual copyright holder is not clear: # could be either Wisekey or OISTE Foundation. - test_file = self.get_test_loc('copyrights/copyright_3a3b02ce_0-a_b_ce.0') + test_file = self.get_test_loc('copyrights/3a3b02ce_0-a_b_ce.0') expected = [ 'Copyright (c) 2005, OU OISTE Foundation Endorsed, CN OISTE WISeKey Global Root', 'Copyright (c) 2005, OU OISTE Foundation Endorsed, CN OISTE WISeKey Global Root', ] check_detection(expected, test_file) - def test_copyright_ABC_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_ABC_cpp-Case_cpp.cpp') + def test_ABC_cpp(self): + test_file = self.get_test_loc('copyrights/ABC_cpp-Case_cpp.cpp') expected = [ 'Copyright (c) ABC Company', ] check_detection(expected, test_file) - def test_copyright_ABC_file_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_ABC_file_cpp-File_cpp.cpp') + def test_ABC_file_cpp(self): + test_file = self.get_test_loc('copyrights/ABC_file_cpp-File_cpp.cpp') expected = [ 'Copyright (c) ABC Company', ] check_detection(expected, test_file) - def test_copyright_false_positive_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_false_positive_in_c-false_positives_c.c') + def test_false_positive_in_c(self): + test_file = self.get_test_loc('copyrights/false_positive_in_c-false_positives_c.c') expected = [] check_detection(expected, test_file) - def test_copyright_false_positive_in_js(self): - test_file = self.get_test_loc('copyrights/copyright_false_positive_in_js-editor_beta_de_js.js') + def test_false_positive_in_js(self): + test_file = self.get_test_loc('copyrights/false_positive_in_js-editor_beta_de_js.js') expected = [] check_detection(expected, test_file) - def test_copyright_false_positive_in_license(self): - test_file = self.get_test_loc('copyrights/copyright_false_positive_in_license-LICENSE') + def test_false_positive_in_license(self): + test_file = self.get_test_loc('copyrights/false_positive_in_license-LICENSE') expected = [] check_detection(expected, test_file) - def test_copyright_heunrich_c(self): - test_file = self.get_test_loc('copyrights/copyright_heunrich_c-c.c') + def test_heunrich_c(self): + test_file = self.get_test_loc('copyrights/heunrich_c-c.c') expected = [ 'Copyright (c) 2000 HEUNRICH HERTZ INSTITUTE', ] check_detection(expected, test_file) - def test_copyright_isc(self): - test_file = self.get_test_loc('copyrights/copyright_isc-c.c') + def test_isc(self): + test_file = self.get_test_loc('copyrights/isc-c.c') expected = [ 'Copyright (c) 1998-2000 The Internet Software Consortium.', ] check_detection(expected, test_file) - def test_copyright_no_copyright_in_class_file_1(self): - test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_1-PersistentArrayHolder_class.class') + def test_no_class_file_1(self): + test_file = self.get_test_loc('copyrights/no_class_file_1-PersistentArrayHolder_class.class') expected = [] check_detection(expected, test_file) - def test_copyright_sample_py(self): - test_file = self.get_test_loc('copyrights/copyright_sample_py-py.py') + def test_sample_py(self): + test_file = self.get_test_loc('copyrights/sample_py-py.py') expected = [ 'COPYRIGHT 2006 ABC ABC', ] check_detection(expected, test_file) - def test_copyright_abc(self): - test_file = self.get_test_loc('copyrights/copyright_abc') + def test_abc(self): + test_file = self.get_test_loc('copyrights/abc') expected = [ 'Copyright (c) 2006 abc.org', ] check_detection(expected, test_file) - def test_copyright_abc_loss_of_holder_c(self): - test_file = self.get_test_loc('copyrights/copyright_abc_loss_of_holder_c-c.c') + def test_abc_loss_of_holder_c(self): + test_file = self.get_test_loc('copyrights/abc_loss_of_holder_c-c.c') expected = [ 'copyright abc 2001', ] check_detection(expected, test_file) - def test_copyright_abiword_common_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_abiword_common_copyright-abiword_common_copyright.copyright') + def test_abiword_common(self): + test_file = self.get_test_loc('copyrights/abiword_common.copyright') expected = [ 'Copyright (c) 1998- AbiSource, Inc. & Co.', 'Copyright (c) 2009 Masayuki Hatta', @@ -268,45 +268,45 @@ def test_copyright_abiword_common_copyright(self): ] check_detection(expected, test_file) - def test_copyright_acme_c(self): - test_file = self.get_test_loc('copyrights/copyright_acme_c-c.c') + def test_acme_c(self): + test_file = self.get_test_loc('copyrights/acme_c-c.c') expected = [ 'Copyright (c) 2000 ACME, Inc.', ] check_detection(expected, test_file) - def test_copyright_activefieldattribute_cs(self): - test_file = self.get_test_loc('copyrights/copyright_activefieldattribute_cs-ActiveFieldAttribute_cs.cs') + def test_activefieldattribute_cs(self): + test_file = self.get_test_loc('copyrights/activefieldattribute_cs-ActiveFieldAttribute_cs.cs') expected = [ 'Web Applications Copyright 2009 - Thomas Hansen thomas@ra-ajax.org.', ] check_detection(expected, test_file) - def test_copyright_addr_c(self): - test_file = self.get_test_loc('copyrights/copyright_addr_c-addr_c.c') + def test_addr_c(self): + test_file = self.get_test_loc('copyrights/addr_c-addr_c.c') expected = [ 'Copyright 1999 Cornell University.', 'Copyright 2000 Jon Doe.', ] check_detection(expected, test_file) - def test_copyright_apostrophe_in_name(self): - test_file = self.get_test_loc('copyrights/copyright_with_apos.txt') + def test_apostrophe_in_name(self): + test_file = self.get_test_loc('copyrights/with_apos.txt') expected = [ "Copyright Marco d'Itri ", "Copyright Marco d'Itri", ] check_detection(expected, test_file) - def test_copyright_adler_inflate_c(self): - test_file = self.get_test_loc('copyrights/copyright_adler_inflate_c-inflate_c.c') + def test_adler_inflate_c(self): + test_file = self.get_test_loc('copyrights/adler_inflate_c-inflate_c.c') expected = [ 'Not copyrighted 1992 by Mark Adler', ] check_detection(expected, test_file) - def test_copyright_adobe_flashplugin_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_adobe_flashplugin_copyright_label-adobe_flashplugin_copyright_label.label') + def test_adobe_flashplugin(self): + test_file = self.get_test_loc('copyrights/adobe_flashplugin-adobe_flashplugin.label') expected = [ 'Copyright (c) 1996 - 2008. Adobe Systems Incorporated', '(c) 2001-2009, Takuo KITAME, Bart Martens, and Canonical, LTD', @@ -315,31 +315,31 @@ def test_copyright_adobe_flashplugin_copyright_label(self): expected_in_results=False, results_in_expected=True) - def test_copyright_aleal(self): - test_file = self.get_test_loc('copyrights/copyright_aleal-c.c') + def test_aleal(self): + test_file = self.get_test_loc('copyrights/aleal-c.c') expected = [ 'copyright (c) 2006 by aleal', ] check_detection(expected, test_file) - def test_copyright_andre_darcy(self): - test_file = self.get_test_loc('copyrights/copyright_andre_darcy-c.c') + def test_andre_darcy(self): + test_file = self.get_test_loc('copyrights/andre_darcy-c.c') expected = [ 'Copyright (c) 1995, Pascal Andre (andre@via.ecp.fr).', "copyright 1997, 1998, 1999 by D'Arcy J.M. Cain (darcy@druid.net)", ] check_detection(expected, test_file) - def test_copyright_android_c(self): - test_file = self.get_test_loc('copyrights/copyright_android_c-c.c') + def test_android_c(self): + test_file = self.get_test_loc('copyrights/android_c-c.c') expected = [ 'Copyright (c) 2009 The Android Open Source Project', 'Copyright 2003-2005 Colin Percival', ] check_detection(expected, test_file) - def test_copyright_apache2_debian_trailing_name_missed(self): - test_file = self.get_test_loc('copyrights/copyright_apache2_debian_trailing_name_missed-apache_copyright_label.label') + def test_apache2_debian_trailing_name_missed(self): + test_file = self.get_test_loc('copyrights/apache2_debian_trailing_name_missed-apache.label') expected = [ 'copyright Steinar H. Gunderson and Knut Auvor Grythe ', 'Copyright (c) 1996-1997 Cisco Systems, Inc.', @@ -373,8 +373,8 @@ def test_copyright_apache2_debian_trailing_name_missed(self): ] check_detection(expected, test_file) - def test_copyright_apache_notice(self): - test_file = self.get_test_loc('copyrights/copyright_apache_notice-NOTICE') + def test_apache_notice(self): + test_file = self.get_test_loc('copyrights/apache_notice-NOTICE') expected = [ 'Copyright 1999-2006 The Apache Software Foundation', 'Copyright 1999-2006 The Apache Software Foundation', @@ -383,15 +383,15 @@ def test_copyright_apache_notice(self): ] check_detection(expected, test_file) - def test_copyright_aptitude_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_aptitude_copyright_label-aptitude_copyright_label.label') + def test_aptitude(self): + test_file = self.get_test_loc('copyrights/aptitude-aptitude.label') expected = [ 'Copyright 1999-2005 Daniel Burrows ', ] check_detection(expected, test_file) - def test_copyright_atheros_spanning_lines(self): - test_file = self.get_test_loc('copyrights/copyright_atheros_spanning_lines-py.py') + def test_atheros_spanning_lines(self): + test_file = self.get_test_loc('copyrights/atheros_spanning_lines-py.py') expected = [ 'Copyright (c) 2000 Atheros Communications, Inc.', 'Copyright (c) 2001 Atheros Communications, Inc.', @@ -399,22 +399,22 @@ def test_copyright_atheros_spanning_lines(self): ] check_detection(expected, test_file) - def test_copyright_att_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_att_in_c-9_c.c') + def test_att_in_c(self): + test_file = self.get_test_loc('copyrights/att_in_c-9_c.c') expected = [ 'Copyright (c) 1991 by AT&T.', ] check_detection(expected, test_file) - def test_copyright_audio_c(self): - test_file = self.get_test_loc('copyrights/copyright_audio_c-c.c') + def test_audio_c(self): + test_file = self.get_test_loc('copyrights/audio_c-c.c') expected = [ 'copyright (c) 1995, AudioCodes, DSP Group, France Telecom, Universite de Sherbrooke.', ] check_detection(expected, test_file) - def test_copyright_babkin_txt(self): - test_file = self.get_test_loc('copyrights/copyright_babkin_txt.txt') + def test_babkin_txt(self): + test_file = self.get_test_loc('copyrights/babkin_txt.txt') expected = [ 'Copyright (c) North', 'Copyright (c) South', @@ -423,8 +423,8 @@ def test_copyright_babkin_txt(self): ] check_detection(expected, test_file) - def test_copyright_blender_debian(self): - test_file = self.get_test_loc('copyrights/copyright_blender_debian-blender_copyright.copyright') + def test_blender_debian(self): + test_file = self.get_test_loc('copyrights/blender_debian-blender.copyright') expected = [ 'Copyright (c) 2002-2008 Blender Foundation', 'Copyright (c) 2004-2005 Masayuki Hatta ', @@ -433,8 +433,8 @@ def test_copyright_blender_debian(self): ] check_detection(expected, test_file) - def test_copyright_blue_sky_dash_in_name(self): - test_file = self.get_test_loc('copyrights/copyright_blue_sky_dash_in_name-c.c') + def test_blue_sky_dash_in_name(self): + test_file = self.get_test_loc('copyrights/blue_sky_dash_in_name-c.c') expected = [ 'Copyright (c) 1995, 1996 - Blue Sky Software Corp. -', ] @@ -442,43 +442,43 @@ def test_copyright_blue_sky_dash_in_name(self): expected_in_results=False, results_in_expected=True) - def test_copyright_bouncy_license(self): - test_file = self.get_test_loc('copyrights/copyright_bouncy_license-LICENSE') + def test_bouncy_license(self): + test_file = self.get_test_loc('copyrights/bouncy_license-LICENSE') expected = [ 'Copyright (c) 2000-2005 The Legion Of The Bouncy Castle (http://www.bouncycastle.org)', ] check_detection(expected, test_file) - def test_copyright_bouncy_notice(self): - test_file = self.get_test_loc('copyrights/copyright_bouncy_notice-9_NOTICE') + def test_bouncy_notice(self): + test_file = self.get_test_loc('copyrights/bouncy_notice-9_NOTICE') expected = [ 'Copyright (c) 2000-2005 The Legion Of The Bouncy Castle (http://www.bouncycastle.org)', ] check_detection(expected, test_file) - def test_copyright_btt_plot1_py(self): - test_file = self.get_test_loc('copyrights/copyright_btt_plot1_py-btt_plot_py.py') + def test_btt_plot1_py(self): + test_file = self.get_test_loc('copyrights/btt_plot1_py-btt_plot_py.py') expected = [ '(c) Copyright 2009 Hewlett-Packard Development Company, L.P.', ] check_detection(expected, test_file) - def test_copyright_camelcase_bug_br_fcc_thread_psipstack_c(self): - test_file = self.get_test_loc('copyrights/copyright_camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c') + def test_camelcase_bug_br_fcc_thread_psipstack_c(self): + test_file = self.get_test_loc('copyrights/camelcase_bug_br_fcc_thread_psipstack_c-br_fcc_thread_psipstack_c.c') expected = [ 'Copyright 2010-2011 by BitRouter', ] check_detection(expected, test_file) - def test_copyright_ccube_txt(self): - test_file = self.get_test_loc('copyrights/copyright_ccube_txt.txt') + def test_ccube_txt(self): + test_file = self.get_test_loc('copyrights/ccube_txt.txt') expected = [ 'Copyright (c) 2001 C-Cube Microsystems.', ] check_detection(expected, test_file) - def test_copyright_cedrik_java(self): - test_file = self.get_test_loc('copyrights/copyright_cedrik_java-java.java') + def test_cedrik_java(self): + test_file = self.get_test_loc('copyrights/cedrik_java-java.java') expected = [ 'copyright (c) 2005-2006 Cedrik LIME', ] @@ -486,15 +486,15 @@ def test_copyright_cedrik_java(self): expected_in_results=True, results_in_expected=False) - def test_copyright_cern(self): - test_file = self.get_test_loc('copyrights/copyright_cern-TestMatrix_D_java.java') + def test_cern(self): + test_file = self.get_test_loc('copyrights/cern-TestMatrix_D_java.java') expected = [ 'Copyright 1999 CERN - European Organization for Nuclear Research.', ] check_detection(expected, test_file) - def test_copyright_cern_matrix2d_java(self): - test_file = self.get_test_loc('copyrights/copyright_cern_matrix2d_java-TestMatrix_D_java.java') + def test_cern_matrix2d_java(self): + test_file = self.get_test_loc('copyrights/cern_matrix2d_java-TestMatrix_D_java.java') expected = [ 'Copyright 1999 CERN - European Organization for Nuclear Research.', 'Copyright (c) 1998

Company PIERSOL Engineering Inc.', @@ -502,45 +502,45 @@ def test_copyright_cern_matrix2d_java(self): ] check_detection(expected, test_file) - def test_copyright_chameleon_assembly(self): - test_file = self.get_test_loc('copyrights/copyright_chameleon_assembly-9_9_setjmp_S.S') + def test_chameleon_assembly(self): + test_file = self.get_test_loc('copyrights/chameleon_assembly-9_9_setjmp_S.S') expected = [ 'Copyright Chameleon Systems, 1999', ] check_detection(expected, test_file) - def test_copyright_co_cust(self): - test_file = self.get_test_loc('copyrights/copyright_co_cust-copyright_java.java') + def test_co_cust(self): + test_file = self.get_test_loc('copyrights/co_cust-java.java') expected = [ 'Copyright (c) 2009

Company Customer Identity Hidden', ] check_detection(expected, test_file) - def test_copyright_colin_android(self): - test_file = self.get_test_loc('copyrights/copyright_colin_android-bsdiff_c.c') + def test_colin_android(self): + test_file = self.get_test_loc('copyrights/colin_android-bsdiff_c.c') expected = [ 'Copyright (c) 2009 The Android Open Source Project', 'Copyright 2003-2005 Colin Percival', ] check_detection(expected, test_file) - def test_copyright_company_in_txt(self): - test_file = self.get_test_loc('copyrights/copyright_company_in_txt-9.txt') + def test_company_in_txt(self): + test_file = self.get_test_loc('copyrights/company_in_txt-9.txt') expected = [ 'Copyright (c) 2008-2011 Company Name Incorporated', ] check_detection(expected, test_file) - def test_copyright_complex_4_line_statement_in_text(self): - test_file = self.get_test_loc('copyrights/copyright_complex_4_line_statement_in_text-9.txt') + def test_complex_4_line_statement_in_text(self): + test_file = self.get_test_loc('copyrights/complex_4_line_statement_in_text-9.txt') expected = [ 'Copyright 2002 Jonas Borgstrom 2002 Daniel Lundin 2002 CodeFactory AB', 'Copyright (c) 1994 The Regents of the University of California', ] check_detection(expected, test_file) - def test_copyright_complex_notice(self): - test_file = self.get_test_loc('copyrights/copyright_complex_notice-NOTICE') + def test_complex_notice(self): + test_file = self.get_test_loc('copyrights/complex_notice-NOTICE') expected = [ 'Copyright (c) 2003, Steven G. Kargl', 'Copyright (c) 2003 Mike Barcroft ', @@ -572,8 +572,8 @@ def test_copyright_complex_notice(self): ] check_detection(expected, test_file) - def test_copyright_complex_notice_sun_microsystems_on_multiple_lines(self): - test_file = self.get_test_loc('copyrights/copyright_complex_notice_sun_microsystems_on_multiple_lines-NOTICE') + def test_complex_notice_sun_microsystems_on_multiple_lines(self): + test_file = self.get_test_loc('copyrights/complex_notice_sun_microsystems_on_multiple_lines-NOTICE') expected = [ 'Copyright 1999-2006 The Apache Software Foundation', 'copyright (c) 1999-2002, Lotus Development Corporation., http://www.lotus.com.', @@ -586,29 +586,29 @@ def test_copyright_complex_notice_sun_microsystems_on_multiple_lines(self): ] check_detection(expected, test_file) - def test_copyright_config(self): - test_file = self.get_test_loc('copyrights/copyright_config-config_guess.guess') + def test_config(self): + test_file = self.get_test_loc('copyrights/config-config_guess.guess') expected = [ 'Copyright (c) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_config1_guess(self): - test_file = self.get_test_loc('copyrights/copyright_config1_guess-config_guess.guess') + def test_config1_guess(self): + test_file = self.get_test_loc('copyrights/config1_guess-config_guess.guess') expected = [ 'Copyright (c) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_copyright_camelcase_br_diagnostics_h(self): - test_file = self.get_test_loc('copyrights/copyright_copyright_camelcase_br_diagnostics_h-br_diagnostics_h.h') + def test_camelcase_br_diagnostics_h(self): + test_file = self.get_test_loc('copyrights/camelcase_br_diagnostics_h-br_diagnostics_h.h') expected = [ 'Copyright 2011 by BitRouter', ] check_detection(expected, test_file) - def test_copyright_coreutils_debian(self): - test_file = self.get_test_loc('copyrights/copyright_coreutils_debian-coreutils_copyright.copyright') + def test_coreutils_debian(self): + test_file = self.get_test_loc('copyrights/coreutils_debian-coreutils.copyright') expected = [ 'Copyright (c) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.', 'Copyright (c) 1990, 1993, 1994 The Regents of the University of California', @@ -632,22 +632,22 @@ def test_copyright_coreutils_debian(self): ] check_detection(expected, test_file) - def test_copyright_dag_c(self): - test_file = self.get_test_loc('copyrights/copyright_dag_c-s_fabsl_c.c') + def test_dag_c(self): + test_file = self.get_test_loc('copyrights/dag_c-s_fabsl_c.c') expected = [ 'Copyright (c) 2003 Dag-Erling Coidan Smrgrav', ] check_detection(expected, test_file) - def test_copyright_dag_elring_notice(self): - test_file = self.get_test_loc('copyrights/copyright_dag_elring_notice-NOTICE') + def test_dag_elring_notice(self): + test_file = self.get_test_loc('copyrights/dag_elring_notice-NOTICE') expected = [ 'Copyright (c) 2003 Dag-Erling Codan Smrgrav', ] check_detection(expected, test_file) - def test_copyright_dash_in_name(self): - test_file = self.get_test_loc('copyrights/copyright_dash_in_name-Makefile') + def test_dash_in_name(self): + test_file = self.get_test_loc('copyrights/dash_in_name-Makefile') expected = [ '(c) 2011 - Anycompany, LLC', ] @@ -655,43 +655,43 @@ def test_copyright_dash_in_name(self): expected_in_results=False, results_in_expected=True) - def test_copyright_dasher_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_dasher_copyright_label-dasher_copyright_label.label') + def test_dasher(self): + test_file = self.get_test_loc('copyrights/dasher-dasher.label') expected = [ 'Copyright (c) 1998-2008 The Dasher Project', ] check_detection(expected, test_file) - def test_copyright_date_range_dahua_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_date_range_dahua_in_c-c.c') + def test_date_range_dahua_in_c(self): + test_file = self.get_test_loc('copyrights/date_range_dahua_in_c-c.c') expected = [ '(c) Copyright 2006 to 2007 Dahua Digital.', ] check_detection(expected, test_file) - def test_copyright_date_range_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_date_range_in_c-c.c') + def test_date_range_in_c(self): + test_file = self.get_test_loc('copyrights/date_range_in_c-c.c') expected = [ 'Copyright (c) ImageSilicon Tech. (2006 - 2007)', ] check_detection(expected, test_file) - def test_copyright_date_range_in_c_2(self): - test_file = self.get_test_loc('copyrights/copyright_date_range_in_c_2-c.c') + def test_date_range_in_c_2(self): + test_file = self.get_test_loc('copyrights/date_range_in_c_2-c.c') expected = [ '(c) Copyright 2005 to 2007 ImageSilicon? Tech.,ltd', ] check_detection(expected, test_file) - def test_copyright_debian_archive_keyring_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_debian_archive_keyring_copyright-debian_archive_keyring_copyright.copyright') + def test_debian_archive_keyring(self): + test_file = self.get_test_loc('copyrights/debian_archive_keyring-debian_archive_keyring.copyright') expected = [ 'Copyright (c) 2006 Michael Vogt ', ] check_detection(expected, test_file) - def test_copyright_debian_lib_1(self): - test_file = self.get_test_loc('copyrights/copyright_debian_lib_1-libmono_cairo_cil_copyright_label.label') + def test_debian_lib_1(self): + test_file = self.get_test_loc('copyrights/debian_lib_1-libmono_cairo_cil.label') expected = [ 'Copyright 2004 The Apache Software Foundation', 'Copyright (c) 2001-2005 Novell', @@ -711,8 +711,8 @@ def test_copyright_debian_lib_1(self): ] check_detection(expected, test_file) - def test_copyright_debian_lib_2(self): - test_file = self.get_test_loc('copyrights/copyright_debian_lib_2-libmono_cairo_cil_copyright.copyright') + def test_debian_lib_2(self): + test_file = self.get_test_loc('copyrights/debian_lib_2-libmono_cairo_cil.copyright') expected = [ 'Copyright 2004 The Apache Software Foundation', 'Copyright (c) 2001-2005 Novell', @@ -732,8 +732,8 @@ def test_copyright_debian_lib_2(self): ] check_detection(expected, test_file) - def test_copyright_debian_lib_3(self): - test_file = self.get_test_loc('copyrights/copyright_debian_lib_3-libmono_security_cil_copyright.copyright') + def test_debian_lib_3(self): + test_file = self.get_test_loc('copyrights/debian_lib_3-libmono_security_cil.copyright') expected = [ 'Copyright 2004 The Apache Software Foundation', 'Copyright (c) 2001-2005 Novell', @@ -753,8 +753,8 @@ def test_copyright_debian_lib_3(self): ] check_detection(expected, test_file) - def test_copyright_debian_multi_names_on_one_line(self): - test_file = self.get_test_loc('copyrights/copyright_debian_multi_names_on_one_line-libgdata__copyright.copyright') + def test_debian_multi_names_on_one_line(self): + test_file = self.get_test_loc('copyrights/debian_multi_names_on_one_line-libgdata.copyright') expected = [ 'Copyright 1999-2004 Ximian, Inc. 1999-2005 Novell, Inc.', 'copyright 2000-2003 Ximian, Inc. , 2003 Gergo Erdi', @@ -780,8 +780,8 @@ def test_copyright_debian_multi_names_on_one_line(self): # expected_in_results=False, # results_in_expected=True) - def test_copyright_dionysos_c(self): - test_file = self.get_test_loc('copyrights/copyright_dionysos_c-c.c') + def test_dionysos_c(self): + test_file = self.get_test_loc('copyrights/dionysos_c-c.c') expected = [ 'COPYRIGHT (c) 2006 - 2009 DIONYSOS', 'COPYRIGHT (c) ADIONYSOS 2006 - 2009', @@ -796,15 +796,15 @@ def test_copyright_dionysos_c(self): ] check_detection(expected, test_file) - def test_copyright_disclaimed(self): - test_file = self.get_test_loc('copyrights/copyright_disclaimed-c.c') + def test_disclaimed(self): + test_file = self.get_test_loc('copyrights/disclaimed-c.c') expected = [ 'Copyright disclaimed 2003 by Andrew Clarke', ] check_detection(expected, test_file) - def test_copyright_djvulibre_desktop_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_djvulibre_desktop_copyright-djvulibre_desktop_copyright.copyright') + def test_djvulibre_desktop(self): + test_file = self.get_test_loc('copyrights/djvulibre_desktop-djvulibre_desktop.copyright') expected = [ 'Copyright (c) 2002 Leon Bottou and Yann Le Cun', 'Copyright (c) 2001 AT&T', @@ -812,8 +812,8 @@ def test_copyright_djvulibre_desktop_copyright(self): ] check_detection(expected, test_file) - def test_copyright_docbook_xsl_doc_html_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_docbook_xsl_doc_html_copyright-docbook_xsl_doc_html_copyright.copyright') + def test_docbook_xsl_doc_html(self): + test_file = self.get_test_loc('copyrights/docbook_xsl_doc_html-docbook_xsl_doc_html.copyright') expected = [ 'Copyright (c) 1999-2007 Norman Walsh', 'Copyright (c) 2003 Jiri Kosek', @@ -822,15 +822,15 @@ def test_copyright_docbook_xsl_doc_html_copyright(self): ] check_detection(expected, test_file) - def test_copyright_drand48_c(self): - test_file = self.get_test_loc('copyrights/copyright_drand48_c-drand_c.c') + def test_drand48_c(self): + test_file = self.get_test_loc('copyrights/drand48_c-drand_c.c') expected = [ 'Copyright (c) 1993 Martin Birgmeier', ] check_detection(expected, test_file) - def test_copyright_ed_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_ed_copyright-ed_copyright.copyright') + def test_ed(self): + test_file = self.get_test_loc('copyrights/ed-ed.copyright') expected = [ 'Copyright (c) 1993, 1994 Andrew Moore , Talke Studio', 'Copyright (c) 2006, 2007 Antonio Diaz Diaz', @@ -839,88 +839,88 @@ def test_copyright_ed_copyright(self): ] check_detection(expected, test_file) - def test_copyright_epiphany_browser_data_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_epiphany_browser_data_copyright_label-epiphany_browser_data_copyright_label.label') + def test_epiphany_browser_data(self): + test_file = self.get_test_loc('copyrights/epiphany_browser_data-epiphany_browser_data.label') expected = [ 'Copyright (c) 2004 the Initial Developer.', '(c) 2003-2007, the Debian GNOME team ', ] check_detection(expected, test_file) - def test_copyright_eric_young_c(self): - test_file = self.get_test_loc('copyrights/copyright_eric_young_c-c.c') + def test_eric_young_c(self): + test_file = self.get_test_loc('copyrights/eric_young_c-c.c') expected = [ 'Copyright (c) 1995-1997 Eric Young (eay@mincom.oz.au)', ] check_detection(expected, test_file) - def test_copyright_errno_atheros(self): - test_file = self.get_test_loc('copyrights/copyright_errno_atheros-c.c') + def test_errno_atheros(self): + test_file = self.get_test_loc('copyrights/errno_atheros-c.c') expected = [ 'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.', ] check_detection(expected, test_file) - def test_copyright_errno_atheros_ah_h(self): - test_file = self.get_test_loc('copyrights/copyright_errno_atheros_ah_h-ah_h.h') + def test_errno_atheros_ah_h(self): + test_file = self.get_test_loc('copyrights/errno_atheros_ah_h-ah_h.h') expected = [ 'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.', ] check_detection(expected, test_file) - def test_copyright_errno_c(self): - test_file = self.get_test_loc('copyrights/copyright_errno_c-c.c') + def test_errno_c(self): + test_file = self.get_test_loc('copyrights/errno_c-c.c') expected = [ 'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.', ] check_detection(expected, test_file) - def test_copyright_esmertec_java(self): - test_file = self.get_test_loc('copyrights/copyright_esmertec_java-java.java') + def test_esmertec_java(self): + test_file = self.get_test_loc('copyrights/esmertec_java-java.java') expected = [ 'Copyright (c) 2008 Esmertec AG', 'Copyright (c) 2008 The Android Open Source Project', ] check_detection(expected, test_file) - def test_copyright_essential_smoke(self): - test_file = self.get_test_loc('copyrights/copyright_essential_smoke-ibm_c.c') + def test_essential_smoke(self): + test_file = self.get_test_loc('copyrights/essential_smoke-ibm_c.c') expected = [ 'Copyright IBM and others (c) 2008', 'Copyright Eclipse, IBM and others (c) 2008', ] check_detection(expected, test_file) - def test_copyright_expat_h(self): - test_file = self.get_test_loc('copyrights/copyright_expat_h-expat_h.h') + def test_expat_h(self): + test_file = self.get_test_loc('copyrights/expat_h-expat_h.h') expected = [ 'Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd', ] check_detection(expected, test_file) - def test_copyright_ext_all_js(self): - test_file = self.get_test_loc('copyrights/copyright_ext_all_js-ext_all_js.js') + def test_ext_all_js(self): + test_file = self.get_test_loc('copyrights/ext_all_js-ext_all_js.js') expected = [ 'Copyright (c) 2006-2009 Ext JS, LLC', ] check_detection(expected, test_file) - def test_copyright_extjs_c(self): - test_file = self.get_test_loc('copyrights/copyright_extjs_c-c.c') + def test_extjs_c(self): + test_file = self.get_test_loc('copyrights/extjs_c-c.c') expected = [ 'Copyright (c) 2006-2007, Ext JS, LLC.', ] check_detection(expected, test_file) - def test_copyright_fsf_py(self): - test_file = self.get_test_loc('copyrights/copyright_fsf_py-999_py.py') + def test_fsf_py(self): + test_file = self.get_test_loc('copyrights/fsf_py-999_py.py') expected = [ 'Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_gailly(self): - test_file = self.get_test_loc('copyrights/copyright_gailly-c.c') + def test_gailly(self): + test_file = self.get_test_loc('copyrights/gailly-c.c') expected = [ 'Copyright (c) 1992-1993 Jean-loup Gailly.', 'Copyright (c) 1992-1993 Jean-loup Gailly', @@ -928,15 +928,15 @@ def test_copyright_gailly(self): ] check_detection(expected, test_file) - def test_copyright_geoff_js(self): - test_file = self.get_test_loc('copyrights/copyright_geoff_js-js.js') + def test_geoff_js(self): + test_file = self.get_test_loc('copyrights/geoff_js-js.js') expected = [ 'Copyright (c) 2007-2008 Geoff Stearns, Michael Williams, and Bobby van der Sluis', ] check_detection(expected, test_file) - def test_copyright_gnome_session_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_gnome_session_copyright-gnome_session_copyright.copyright') + def test_gnome_session(self): + test_file = self.get_test_loc('copyrights/gnome_session-gnome_session.copyright') expected = [ 'Copyright (c) 1999-2009 Red Hat, Inc.', 'Copyright (c) 1999-2007 Novell, Inc.', @@ -953,8 +953,8 @@ def test_copyright_gnome_session_copyright(self): ] check_detection(expected, test_file) - def test_copyright_gnome_system_monitor_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_gnome_system_monitor_copyright-gnome_system_monitor_copyright.copyright') + def test_gnome_system_monitor(self): + test_file = self.get_test_loc('copyrights/gnome_system_monitor-gnome_system_monitor.copyright') expected = [ 'Copyright Holders: Kevin Vandersloot Erik Johnsson ', ] @@ -962,8 +962,8 @@ def test_copyright_gnome_system_monitor_copyright(self): expected_in_results=False, results_in_expected=True) - def test_copyright_gnome_system_monitor_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_gnome_system_monitor_copyright_label-gnome_system_monitor_copyright_label.label') + def test_gnome_system_monitor_label(self): + test_file = self.get_test_loc('copyrights/gnome_system_monitor-gnome_system_monitor.label') expected = [ 'Copyright Holders: Kevin Vandersloot Erik Johnsson ', ] @@ -971,8 +971,8 @@ def test_copyright_gnome_system_monitor_copyright_label(self): expected_in_results=False, results_in_expected=True) - def test_copyright_gobjc_4_3_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_gobjc_4_3_copyright-gobjc__copyright.copyright') + def test_gobjc_4_3(self): + test_file = self.get_test_loc('copyrights/gobjc_4_3-gobjc.copyright') expected = [ 'Copyright (c) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.', 'copyright Free Software Foundation', @@ -981,46 +981,46 @@ def test_copyright_gobjc_4_3_copyright(self): ] check_detection(expected, test_file) - def test_copyright_google_closure_templates_java_html(self): - test_file = self.get_test_loc('copyrights/copyright_google_closure_templates_java_html-html.html') + def test_google_closure_templates_java_html(self): + test_file = self.get_test_loc('copyrights/google_closure_templates_java_html-html.html') expected = [ '(c) 2009 Google', ] check_detection(expected, test_file) - def test_copyright_google_view_layout1_xml(self): - test_file = self.get_test_loc('copyrights/copyright_google_view_layout1_xml-view_layout_xml.xml') + def test_google_view_layout1_xml(self): + test_file = self.get_test_loc('copyrights/google_view_layout1_xml-view_layout_xml.xml') expected = [ 'Copyright (c) 2008 Google Inc.', ] check_detection(expected, test_file) - def test_copyright_group(self): - test_file = self.get_test_loc('copyrights/copyright_group-c.c') + def test_group(self): + test_file = self.get_test_loc('copyrights/group-c.c') expected = [ 'Copyright (c) 2014 ARRis Group, Inc.', 'Copyright (c) 2013 ARRIS Group, Inc.', ] check_detection(expected, test_file) - def test_copyright_gsoap(self): - test_file = self.get_test_loc('copyrights/copyright_gsoap-gSOAP') + def test_gsoap(self): + test_file = self.get_test_loc('copyrights/gsoap-gSOAP') expected = [ 'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.', 'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.', ] check_detection(expected, test_file) - def test_copyright_gstreamer0_fluendo_mp3_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_gstreamer0_fluendo_mp3_copyright-gstreamer__fluendo_mp_copyright.copyright') + def test_gstreamer0_fluendo_mp3(self): + test_file = self.get_test_loc('copyrights/gstreamer0_fluendo_mp3-gstreamer_fluendo_mp.copyright') expected = [ 'Copyright (c) 2005,2006 Fluendo', 'Copyright 2005 Fluendo', ] check_detection(expected, test_file) - def test_copyright_hall(self): - test_file = self.get_test_loc('copyrights/copyright_hall-copyright.txt') + def test_hall(self): + test_file = self.get_test_loc('copyrights/hall-copyright.txt') expected = [ 'Copyright (c) 2004, Richard S. Hall', 'Copyright (c) 2004, Didier Donsez', @@ -1028,8 +1028,8 @@ def test_copyright_hall(self): ] check_detection(expected, test_file) - def test_copyright_hans_jurgen_htm(self): - test_file = self.get_test_loc('copyrights/copyright_hans_jurgen_htm-9_html.html') + def test_hans_jurgen_htm(self): + test_file = self.get_test_loc('copyrights/hans_jurgen_htm-9_html.html') expected = [ 'Copyright (c) 2006 by Hans-Jurgen Koch.', ] @@ -1037,22 +1037,22 @@ def test_copyright_hans_jurgen_htm(self): expected_in_results=True, results_in_expected=False) - def test_copyright_hansen_cs(self): - test_file = self.get_test_loc('copyrights/copyright_hansen_cs-cs.cs') + def test_hansen_cs(self): + test_file = self.get_test_loc('copyrights/hansen_cs-cs.cs') expected = [ 'Web Applications Copyright 2009 - Thomas Hansen thomas@ra-ajax.org.', ] check_detection(expected, test_file) - def test_copyright_hciattach_qualcomm1_c(self): - test_file = self.get_test_loc('copyrights/copyright_hciattach_qualcomm1_c-hciattach_qualcomm_c.c') + def test_hciattach_qualcomm1_c(self): + test_file = self.get_test_loc('copyrights/hciattach_qualcomm1_c-hciattach_qualcomm_c.c') expected = [ 'Copyright (c) 2005-2010 Marcel Holtmann ', ] check_detection(expected, test_file) - def test_copyright_hibernate_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_hibernate_copyright_label-hibernate_copyright_label.label') + def test_hibernate(self): + test_file = self.get_test_loc('copyrights/hibernate-hibernate.label') expected = [ 'Copyright (c) 2004-2006 Bernard Blackham ', 'copyright (c) 2004-2006 Cameron Patrick ', @@ -1060,24 +1060,24 @@ def test_copyright_hibernate_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_holtmann(self): - test_file = self.get_test_loc('copyrights/copyright_holtmann-hciattach_qualcomm_c.c') + def test_holtmann(self): + test_file = self.get_test_loc('copyrights/holtmann-hciattach_qualcomm_c.c') expected = [ 'Copyright (c) 2005-2010 Marcel Holtmann ', 'Copyright (c) 2010, Code Aurora Forum.', ] check_detection(expected, test_file) - def test_copyright_hostapd_cli_c(self): - test_file = self.get_test_loc('copyrights/copyright_hostapd_cli_c-hostapd_cli_c.c') + def test_hostapd_cli_c(self): + test_file = self.get_test_loc('copyrights/hostapd_cli_c-hostapd_cli_c.c') expected = [ 'Copyright (c) 2004-2005, Jouni Malinen ', 'Copyright (c) 2004-2005, Jouni Malinen ', ] check_detection(expected, test_file) - def test_copyright_hp_notice(self): - test_file = self.get_test_loc('copyrights/copyright_hp_notice-NOTICE') + def test_hp_notice(self): + test_file = self.get_test_loc('copyrights/hp_notice-NOTICE') expected = [ '(c) Copyright 2007 Hewlett-Packard Development Company, L.P.', '(c) Copyright 2008 Hewlett-Packard Development Company, L.P.', @@ -1090,8 +1090,8 @@ def test_copyright_hp_notice(self): ] check_detection(expected, test_file) - def test_copyright_hpijs_ppds_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_hpijs_ppds_copyright_label-hpijs_ppds_copyright_label.label') + def test_hpijs_ppds(self): + test_file = self.get_test_loc('copyrights/hpijs_ppds-hpijs_ppds.label') expected = [ 'Copyright (c) 2003-2004 by Torsten Landschoff ', 'Copyright (c) 2004-2006 by Henrique de Moraes Holschuh ', @@ -1100,8 +1100,8 @@ def test_copyright_hpijs_ppds_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_ibm_c(self): - test_file = self.get_test_loc('copyrights/copyright_ibm_c-ibm_c.c') + def test_ibm_c(self): + test_file = self.get_test_loc('copyrights/ibm_c-ibm_c.c') expected = [ 'Copyright (c) ibm technologies 2008', 'Copyright (c) IBM Corporation 2008', @@ -1112,8 +1112,8 @@ def test_copyright_ibm_c(self): ] check_detection(expected, test_file) - def test_copyright_icedax_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_icedax_copyright_label-icedax_copyright_label.label') + def test_icedax(self): + test_file = self.get_test_loc('copyrights/icedax-icedax.label') expected = [ 'Copyright 1998-2003 Heiko Eissfeldt', '(c) Peter Widow', @@ -1130,15 +1130,15 @@ def test_copyright_icedax_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_ifrename_c(self): - test_file = self.get_test_loc('copyrights/copyright_ifrename_c-ifrename_c.c') + def test_ifrename_c(self): + test_file = self.get_test_loc('copyrights/ifrename_c-ifrename_c.c') expected = [ 'Copyright (c) 2004 Jean Tourrilhes ', ] check_detection(expected, test_file) - def test_copyright_illinois_html(self): - test_file = self.get_test_loc('copyrights/copyright_illinois_html-9_html.html') + def test_illinois_html(self): + test_file = self.get_test_loc('copyrights/illinois_html-9_html.html') expected = [ 'Copyright 1999,2000,2001,2002,2003,2004 The Board of Trustees of the University of Illinois', ] @@ -1146,30 +1146,30 @@ def test_copyright_illinois_html(self): expected_in_results=False, results_in_expected=True) - def test_copyright_in_COPYING_gpl(self): - test_file = self.get_test_loc('copyrights/copyright_in_COPYING_gpl-COPYING_gpl.gpl') + def test_COPYING_gpl(self): + test_file = self.get_test_loc('copyrights/COPYING_gpl-COPYING_gpl.gpl') expected = [ 'Copyright (c) 1989, 1991 Free Software Foundation, Inc.', 'copyrighted by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_in_COPYRIGHT_madwifi(self): - test_file = self.get_test_loc('copyrights/copyright_in_COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi') + def test_COPYRIGHT_madwifi(self): + test_file = self.get_test_loc('copyrights/COPYRIGHT_madwifi-COPYRIGHT_madwifi.madwifi') expected = [ 'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.', ] check_detection(expected, test_file) - def test_copyright_in_README(self): - test_file = self.get_test_loc('copyrights/copyright_in_README-README') + def test_README(self): + test_file = self.get_test_loc('copyrights/README-README') expected = [ 'Copyright (c) 2002-2006, Jouni Malinen ', ] check_detection(expected, test_file) - def test_copyright_in_bash(self): - test_file = self.get_test_loc('copyrights/copyright_in_bash-shell_sh.sh') + def test_bash(self): + test_file = self.get_test_loc('copyrights/bash-shell_sh.sh') expected = [ 'Copyright (c) 2008 Hewlett-Packard Development Company, L.P.', ] @@ -1177,7 +1177,7 @@ def test_copyright_in_bash(self): expected_in_results=False, results_in_expected=True) - def test_copyright_in_binary_file_with_metadata(self): + def test_binary_file_with_metadata(self): test_file = self.get_test_loc('copyrights/mp4_with_metadata.mp4') expected = [ 'copyright (c) 2016 Philippe', @@ -1186,21 +1186,21 @@ def test_copyright_in_binary_file_with_metadata(self): check_detection(expected, test_file) @expectedFailure - def test_copyright_in_windows_binary_lib(self): - test_file = self.get_test_loc('copyrights/copyright_in_binary_lib-php_embed_lib.lib') + def test_windows_binary_lib(self): + test_file = self.get_test_loc('copyrights/binary_lib-php_embed_lib.lib') expected = [ 'Copyright nexB and others (c) 2012', ] check_detection(expected, test_file) - def test_copyright_in_windows_binary_dll_ignore_leading_junk(self): + def test_windows_binary_dll_ignore_leading_junk(self): test_file = self.get_test_loc('copyrights/windows.dll') expected = [ 'Copyright nexB and others (c) 2012' ] check_detection(expected, test_file) - def test_copyright_in_elf_binary_treats_new_lines_as_spaces(self): + def test_elf_binary_treats_new_lines_as_spaces(self): test_file = self.get_test_loc('copyrights/tor.bin') expected = [ u'Copyright (c) 2001-2004, Roger Dingledine', @@ -1209,15 +1209,15 @@ def test_copyright_in_elf_binary_treats_new_lines_as_spaces(self): ] check_detection(expected, test_file, what='copyrights') - def test_copyright_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_in_c-c.c') + def test_c(self): + test_file = self.get_test_loc('copyrights/c-c.c') expected = [ 'COPYRIGHT (c) STMicroelectronics 2005.', ] check_detection(expected, test_file) - def test_copyright_in_c_include(self): - test_file = self.get_test_loc('copyrights/copyright_in_c_include-h.h') + def test_c_include(self): + test_file = self.get_test_loc('copyrights/c_include-h.h') expected = [ 'COPYRIGHT (c) ST-Microelectronics 1998.', ] @@ -1225,143 +1225,142 @@ def test_copyright_in_c_include(self): expected_in_results=False, results_in_expected=True) - def test_copyright_in_dll_approximate(self): - test_file = self.get_test_loc('copyrights/copyright_in_dll-9_msvci_dll.dll') + def test_dll_approximate(self): + test_file = self.get_test_loc('copyrights/dll-9_msvci_dll.dll') expected = [ 'Copyright Myself and Me, Inc QjT F4P', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_in_dll_exact(self): - test_file = self.get_test_loc('copyrights/copyright_in_dll-9_msvci_dll.dll') + def test_dll_exact(self): + test_file = self.get_test_loc('copyrights/dll-9_msvci_dll.dll') expected = [ 'Copyright Myself and Me, Inc', ] check_detection(expected, test_file) - - def test_copyright_in_h(self): - test_file = self.get_test_loc('copyrights/copyright_in_h-h.h') + def test_h(self): + test_file = self.get_test_loc('copyrights/h-h.h') expected = [ 'COPYRIGHT (c) ST-Microelectronics 1998.', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_in_html_comments(self): - test_file = self.get_test_loc('copyrights/copyright_in_html_comments-html.html') + def test_html_comments(self): + test_file = self.get_test_loc('copyrights/html_comments-html.html') expected = [ 'Copyright 2008 ABCD, LLC.', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_in_html_incorrect(self): - test_file = self.get_test_loc('copyrights/copyright_in_html_incorrect-detail_9_html.html') + def test_html_incorrect(self): + test_file = self.get_test_loc('copyrights/html_incorrect-detail_9_html.html') expected = [ 'A12 Oe (c) 2004-2009', ] check_detection(expected, test_file) - def test_copyright_in_maven_pom_xstream(self): - test_file = self.get_test_loc('copyrights/copyright_in_maven_pom_xstream-pom_xml.xml') + def test_maven_pom_xstream(self): + test_file = self.get_test_loc('copyrights/maven_pom_xstream-pom_xml.xml') expected = [ 'Copyright (c) 2006 Joe Walnes.', 'Copyright (c) 2006, 2007, 2008 XStream committers.', ] check_detection(expected, test_file) - def test_copyright_in_media(self): - test_file = self.get_test_loc('copyrights/copyright_in_media-a_png.png') + def test_media(self): + test_file = self.get_test_loc('copyrights/media-a_png.png') expected = [ 'Copyright nexB and others (c) 2012', ] check_detection(expected, test_file) - def test_copyright_in_phps(self): - test_file = self.get_test_loc('copyrights/copyright_in_phps-phps.phps') + def test_phps(self): + test_file = self.get_test_loc('copyrights/phps-phps.phps') expected = [ 'copyright 2005 Michal Migurski', ] check_detection(expected, test_file) - def test_copyright_in_postcript(self): - test_file = self.get_test_loc('copyrights/copyright_in_postcript-9__ps.ps') + def test_postcript(self): + test_file = self.get_test_loc('copyrights/postcript-9_ps.ps') expected = [ 'Copyright 1999 Radical Eye Software', ] check_detection(expected, test_file) - def test_copyright_in_txt(self): - test_file = self.get_test_loc('copyrights/copyright_in_txt.txt') + def test_txt(self): + test_file = self.get_test_loc('copyrights/txt.txt') expected = [ 'Copyright ?2004-2006 Company', ] check_detection(expected, test_file) - def test_copyright_in_visio_doc(self): - test_file = self.get_test_loc('copyrights/copyright_in_visio_doc-Glitch_ERD_vsd.vsd') + def test_visio_doc(self): + test_file = self.get_test_loc('copyrights/visio_doc-Glitch_ERD_vsd.vsd') expected = [] check_detection(expected, test_file) - def test_copyright_inria_loss_of_holder_c(self): - test_file = self.get_test_loc('copyrights/copyright_inria_loss_of_holder_c-c.c') + def test_inria_loss_of_holder_c(self): + test_file = self.get_test_loc('copyrights/inria_loss_of_holder_c-c.c') expected = [ 'Copyright (c) 2000,2002,2003 INRIA, France Telecom', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_java(self): - test_file = self.get_test_loc('copyrights/copyright_java-java.java') + def test_java(self): + test_file = self.get_test_loc('copyrights/java-java.java') expected = [ 'Copyright (c) 1992-2002 by P.J. Plauger.', ] check_detection(expected, test_file) - def test_copyright_java_passing(self): - test_file = self.get_test_loc('copyrights/copyright_java-java.java') + def test_java_passing(self): + test_file = self.get_test_loc('copyrights/java-java.java') expected = [ 'Copyright (c) 1992-2002 by P.J.', ] check_detection(expected, test_file) - def test_copyright_jdoe(self): - test_file = self.get_test_loc('copyrights/copyright_jdoe-copyright_c.c') + def test_jdoe(self): + test_file = self.get_test_loc('copyrights/jdoe-c.c') expected = [ 'Copyright 2009 J-Doe.', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_json_in_phps(self): - test_file = self.get_test_loc('copyrights/copyright_json_in_phps-JSON_phps.phps') + def test_json_in_phps(self): + test_file = self.get_test_loc('copyrights/json_in_phps-JSON_phps.phps') expected = [ 'copyright 2005 Michal Migurski', ] check_detection(expected, test_file) - def test_copyright_json_in_phps_incorrect(self): - test_file = self.get_test_loc('copyrights/copyright_json_in_phps_incorrect-JSON_phps.phps') + def test_json_in_phps_incorrect(self): + test_file = self.get_test_loc('copyrights/json_in_phps_incorrect-JSON_phps.phps') expected = [] check_detection(expected, test_file) - def test_copyright_json_phps_html_incorrect(self): - test_file = self.get_test_loc('copyrights/copyright_json_phps_html_incorrect-JSON_phps_html.html') + def test_json_phps_html_incorrect(self): + test_file = self.get_test_loc('copyrights/json_phps_html_incorrect-JSON_phps_html.html') expected = [] check_detection(expected, test_file) @expectedFailure - def test_copyright_json_phps_html(self): - test_file = self.get_test_loc('copyrights/copyright_json_phps_html-JSON_phps_html.html') + def test_json_phps_html(self): + test_file = self.get_test_loc('copyrights/json_phps_html-JSON_phps_html.html') expected = [ 'copyright 2005 Michal Migurski', ] check_detection(expected, test_file) - def test_copyright_jsp_all_CAPS(self): - test_file = self.get_test_loc('copyrights/copyright_jsp_all_CAPS-jsp.jsp') + def test_jsp_all_CAPS(self): + test_file = self.get_test_loc('copyrights/jsp_all_CAPS-jsp.jsp') expected = [ 'copyright 2005-2006 Cedrik LIME', ] @@ -1369,8 +1368,8 @@ def test_copyright_jsp_all_CAPS(self): expected_in_results=False, results_in_expected=True) - def test_copyright_kaboom_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_kaboom_copyright-kaboom_copyright.copyright') + def test_kaboom(self): + test_file = self.get_test_loc('copyrights/kaboom-kaboom.copyright') expected = [ 'Copyright (c) 2009 Sune Vuorela ', 'Copyright (c) 2007-2009 George Kiagiadakis ', @@ -1379,8 +1378,8 @@ def test_copyright_kaboom_copyright(self): ] check_detection(expected, test_file) - def test_copyright_kbuild_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_kbuild_copyright-kbuild_copyright.copyright') + def test_kbuild(self): + test_file = self.get_test_loc('copyrights/kbuild-kbuild.copyright') expected = [ 'Copyright (c) 2005-2009 Knut St. Osmundsen ', 'Copyright (c) 1991-1993 The Regents of the University of California', @@ -1391,16 +1390,16 @@ def test_copyright_kbuild_copyright(self): ] check_detection(expected, test_file) - def test_copyright_kde_l10n_zhcn_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_kde_l10n_zhcn_copyright-kde_l_n_zhcn_copyright.copyright') + def test_kde_l10n_zhcn(self): + test_file = self.get_test_loc('copyrights/kde_l10n_zhcn-kde_l_n_zhcn.copyright') expected = [ 'Copyright (c) 1996-2009 The KDE Translation teams ', '(c) 2007-2009, Debian Qt/KDE Maintainers', ] check_detection(expected, test_file) - def test_copyright_leonardo_c(self): - test_file = self.get_test_loc('copyrights/copyright_leonardo_c-c.c') + def test_leonardo_c(self): + test_file = self.get_test_loc('copyrights/leonardo_c-c.c') expected = [ 'Copyright (c) 1994 by Leonardo DaVinci Societe', ] @@ -1408,8 +1407,8 @@ def test_copyright_leonardo_c(self): expected_in_results=False, results_in_expected=True) - def test_copyright_libadns1_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libadns1_copyright-libadns_copyright.copyright') + def test_libadns1(self): + test_file = self.get_test_loc('copyrights/libadns1-libadns.copyright') expected = [ 'Copyright 1997-2000 Ian Jackson', 'Copyright 1999 Tony Finch', @@ -1417,8 +1416,8 @@ def test_copyright_libadns1_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libc6_i686_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libc6_i686_copyright-libc_i_copyright.copyright') + def test_libc6_i686(self): + test_file = self.get_test_loc('copyrights/libc6_i686-libc_i.copyright') expected = [ 'Copyright (c) 1991,92,93,94,95,96,97,98,99,2000,2001,2002,2003,2004,2005, 2006,2007,2008 Free Software Foundation, Inc.', 'Copyright (c) 1991,92,93,94,95,96,97,98,99,2000,2001,2002,2003,2004,2005, 2006,2007,2008 Free Software Foundation, Inc.', @@ -1431,8 +1430,8 @@ def test_copyright_libc6_i686_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libcdio10_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libcdio10_copyright_label-libcdio_copyright_label.label') + def test_libcdio10(self): + test_file = self.get_test_loc('copyrights/libcdio10-libcdio.label') expected = [ 'Copyright (c) 1999, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Rocky Bernstein ', 'Copyright (c) 2000, 2001, 2003, 2004, 2005, 2008 Herbert Valerio Riedel', @@ -1456,16 +1455,16 @@ def test_copyright_libcdio10_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_libcelt0_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libcelt0_copyright-libcelt_copyright.copyright') + def test_libcelt0(self): + test_file = self.get_test_loc('copyrights/libcelt0-libcelt.copyright') expected = [ 'Copyright 2005-2007 Christopher Montgomery , Jean-Marc Valin , Timothy Terriberry', '(c) 2008, Ron', ] check_detection(expected, test_file) - def test_copyright_libcompress_raw_zlib_perl_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libcompress_raw_zlib_perl_copyright-libcompress_raw_zlib_perl_copyright.copyright') + def test_libcompress_raw_zlib_perl(self): + test_file = self.get_test_loc('copyrights/libcompress_raw_zlib_perl-libcompress_raw_zlib_perl.copyright') expected = [ 'Copyright 2005-2009, Paul Marquess ', 'Copyright 1995-2005, Jean-loup Gailly ', @@ -1475,15 +1474,15 @@ def test_copyright_libcompress_raw_zlib_perl_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libcpufreq0_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libcpufreq0_copyright-libcpufreq_copyright.copyright') + def test_libcpufreq0(self): + test_file = self.get_test_loc('copyrights/libcpufreq0-libcpufreq.copyright') expected = [ 'Copyright 2004-2006 Dominik Brodowski', ] check_detection(expected, test_file) - def test_copyright_libcrypt_ssleay_perl_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libcrypt_ssleay_perl_copyright-libcrypt_ssleay_perl_copyright.copyright') + def test_libcrypt_ssleay_perl(self): + test_file = self.get_test_loc('copyrights/libcrypt_ssleay_perl-libcrypt_ssleay_perl.copyright') expected = [ 'Copyright (c) 1999-2003 Joshua Chamas', 'Copyright (c) 1998 Gisle Aas', @@ -1491,34 +1490,34 @@ def test_copyright_libcrypt_ssleay_perl_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libepc_ui_1_0_1_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libepc_ui_1_0_1_copyright-libepc_ui__copyright.copyright') + def test_libepc_ui_1_0_1(self): + test_file = self.get_test_loc('copyrights/libepc_ui_1_0_1-libepc_ui.copyright') expected = [ 'Copyright (c) 2007, 2008 Openismus GmbH', ] check_detection(expected, test_file) - def test_copyright_libepc_ui_1_0_2_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libepc_ui_1_0_2_copyright_label-libepc_ui__copyright_label.label') + def test_libepc_ui_1_0_2(self): + test_file = self.get_test_loc('copyrights/libepc_ui_1_0_2-libepc_ui.label') expected = [ 'Copyright (c) 2007, 2008 Openismus GmbH', ] check_detection(expected, test_file) - def test_copyright_libfltk1_1_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libfltk1_1_copyright-libfltk_copyright.copyright') + def test_libfltk1_1(self): + test_file = self.get_test_loc('copyrights/libfltk1_1-libfltk.copyright') expected = [ 'Copyright (c) 1998-2009 Bill Spitzak spitzak@users.sourceforge.net', ] check_detection(expected, test_file) - def test_copyright_libgail18_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libgail18_copyright_label-libgail_copyright_label.label') + def test_libgail18(self): + test_file = self.get_test_loc('copyrights/libgail18-libgail.label') expected = [] check_detection(expected, test_file) - def test_copyright_libggiwmh0_target_x_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libggiwmh0_target_x_copyright-libggiwmh_target_x_copyright.copyright') + def test_libggiwmh0_target_x(self): + test_file = self.get_test_loc('copyrights/libggiwmh0_target_x-libggiwmh_target_x.copyright') expected = [ 'Copyright (c) 2005 Eric Faurot eric.faurot@gmail.com', 'Copyright (c) 2004 Peter Ekberg peda@lysator.liu.se', @@ -1529,8 +1528,8 @@ def test_copyright_libggiwmh0_target_x_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libgnome_desktop_2_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libgnome_desktop_2_copyright-libgnome_desktop__copyright.copyright') + def test_libgnome_desktop_2(self): + test_file = self.get_test_loc('copyrights/libgnome_desktop_2-libgnome_desktop.copyright') expected = [ 'Copyright (c) 1999, 2000 Red Hat Inc.', 'Copyright (c) 2001 Sid Vicious', @@ -1540,56 +1539,56 @@ def test_copyright_libgnome_desktop_2_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libgnome_media0_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libgnome_media0_copyright-libgnome_media_copyright.copyright') + def test_libgnome_media0(self): + test_file = self.get_test_loc('copyrights/libgnome_media0-libgnome_media.copyright') expected = [] check_detection(expected, test_file) - def test_copyright_libgoffice_0_8_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libgoffice_0_8_copyright_label-libgoffice__copyright_label.label') + def test_libgoffice_0_8(self): + test_file = self.get_test_loc('copyrights/libgoffice_0_8-libgoffice.label') expected = [ 'Copyright (c) 2003-2008 Jody Goldberg (jody@gnome.org) and others.', ] check_detection(expected, test_file) - def test_copyright_libgtkhtml2_0_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libgtkhtml2_0_copyright-libgtkhtml_copyright.copyright') + def test_libgtkhtml2_0(self): + test_file = self.get_test_loc('copyrights/libgtkhtml2_0-libgtkhtml.copyright') expected = [ 'Copyright 1999,2000,2001 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_libisc44_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libisc44_copyright-libisc_copyright.copyright') + def test_libisc44(self): + test_file = self.get_test_loc('copyrights/libisc44-libisc.copyright') expected = [ 'Copyright (c) 1996-2001 Internet Software Consortium.', ] check_detection(expected, test_file) - def test_copyright_libisccfg30_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libisccfg30_copyright-libisccfg_copyright.copyright') + def test_libisccfg30(self): + test_file = self.get_test_loc('copyrights/libisccfg30-libisccfg.copyright') expected = [ 'Copyright (c) 1996-2001 Internet Software Consortium', ] check_detection(expected, test_file) - def test_copyright_libisccfg40_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libisccfg40_copyright-libisccfg_copyright.copyright') + def test_libisccfg40(self): + test_file = self.get_test_loc('copyrights/libisccfg40-libisccfg.copyright') expected = [ 'Copyright (c) 1996-2001 Internet Software Consortium', ] check_detection(expected, test_file) - def test_copyright_libjpeg62_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libjpeg62_copyright-libjpeg_copyright.copyright') + def test_libjpeg62(self): + test_file = self.get_test_loc('copyrights/libjpeg62-libjpeg.copyright') expected = [ 'copyright (c) 1991-1998, Thomas G. Lane', 'copyright by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_libkeyutils1_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libkeyutils1_copyright_label-libkeyutils_copyright_label.label') + def test_libkeyutils1(self): + test_file = self.get_test_loc('copyrights/libkeyutils1-libkeyutils.label') expected = [ 'Copyright (c) 2005 Red Hat', 'Copyright (c) 2005 Red Hat', @@ -1597,15 +1596,15 @@ def test_copyright_libkeyutils1_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_liblocale_gettext_perl_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_liblocale_gettext_perl_copyright_label-liblocale_get_perl_copyright_label.label') + def test_liblocale_gettext_perl(self): + test_file = self.get_test_loc('copyrights/liblocale_gettext_perl-liblocale_get_perl.label') expected = [ 'Copyright 1996..2005 by Phillip Vandry ', ] check_detection(expected, test_file) - def test_copyright_libopenraw1_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libopenraw1_copyright_label-libopenraw_copyright_label.label') + def test_libopenraw1(self): + test_file = self.get_test_loc('copyrights/libopenraw1-libopenraw.label') expected = [ 'Copyright (c) 2007, David Paleino ', 'Copyright (c) 2005-2009, Hubert Figuiere ', @@ -1620,16 +1619,16 @@ def test_copyright_libopenraw1_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_libopenthreads12_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libopenthreads12_copyright-libopenthreads_copyright.copyright') + def test_libopenthreads12(self): + test_file = self.get_test_loc('copyrights/libopenthreads12-libopenthreads.copyright') expected = [ 'Copyright (c) 2002 Robert Osfield', 'Copyright (c) 1998 Julian Smart , Robert Roebling', ] check_detection(expected, test_file) - def test_copyright_libpam_ck_connector_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libpam_ck_connector_copyright-libpam_ck_connector_copyright.copyright') + def test_libpam_ck_connector(self): + test_file = self.get_test_loc('copyrights/libpam_ck_connector-libpam_ck_connector.copyright') expected = [ 'Copyright (c) 2006 William Jon McCann ', 'Copyright (c) 2007 David Zeuthen ', @@ -1638,23 +1637,23 @@ def test_copyright_libpam_ck_connector_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libpoppler3_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libpoppler3_copyright-libpoppler_copyright.copyright') + def test_libpoppler3(self): + test_file = self.get_test_loc('copyrights/libpoppler3-libpoppler.copyright') expected = [ 'Copyright (c) 1996-2003 Glyph & Cog, LLC', ] check_detection(expected, test_file) - def test_copyright_libqt4_scripttools_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libqt4_scripttools_copyright-libqt_scripttools_copyright.copyright') + def test_libqt4_scripttools(self): + test_file = self.get_test_loc('copyrights/libqt4_scripttools-libqt_scripttools.copyright') expected = [ '(c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies)', '(c) 1994-2008 Trolltech ASA', ] check_detection(expected, test_file) - def test_copyright_libqtscript4_gui_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libqtscript4_gui_copyright-libqtscript_gui_copyright.copyright') + def test_libqtscript4_gui(self): + test_file = self.get_test_loc('copyrights/libqtscript4_gui-libqtscript_gui.copyright') expected = [ 'Copyright (c) 2009 Modestas Vainius ', 'Copyright (c) Trolltech ASA', @@ -1663,23 +1662,23 @@ def test_copyright_libqtscript4_gui_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libsocks4_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libsocks4_copyright-libsocks_copyright.copyright') + def test_libsocks4(self): + test_file = self.get_test_loc('copyrights/libsocks4-libsocks.copyright') expected = [ 'Copyright (c) 1989 Regents of the University of California.', 'Portions Copyright (c) 1993, 1994, 1995 by NEC Systems Laboratory', ] check_detection(expected, test_file) - def test_copyright_libsox_fmt_alsa_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libsox_fmt_alsa_copyright-libsox_fmt_alsa_copyright.copyright') + def test_libsox_fmt_alsa(self): + test_file = self.get_test_loc('copyrights/libsox_fmt_alsa-libsox_fmt_alsa.copyright') expected = [ 'Copyright 1991 Lance Norskog And Sundry Contributors', ] check_detection(expected, test_file) - def test_copyright_libspeex1_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libspeex1_copyright-libspeex_copyright.copyright') + def test_libspeex1(self): + test_file = self.get_test_loc('copyrights/libspeex1-libspeex.copyright') expected = [ 'Copyright 2002-2007 Xiph.org Foundation', 'Copyright 2002-2007 Jean-Marc Valin', @@ -1691,8 +1690,8 @@ def test_copyright_libspeex1_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libstlport4_6ldbl_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libstlport4_6ldbl_copyright_label-libstlport_ldbl_copyright_label.label') + def test_libstlport4_6ldbl(self): + test_file = self.get_test_loc('copyrights/libstlport4_6ldbl-libstlport_ldbl.label') expected = [ 'Copyright (c) 1994 Hewlett-Packard Company', 'Copyright (c) 1996-1999 Silicon Graphics Computer Systems, Inc.', @@ -1701,8 +1700,8 @@ def test_copyright_libstlport4_6ldbl_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_libtdb1_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libtdb1_copyright-libtdb_copyright.copyright') + def test_libtdb1(self): + test_file = self.get_test_loc('copyrights/libtdb1-libtdb.copyright') expected = [ 'Copyright (c) Andrew Tridgell 1999-2004', 'Copyright (c) Paul Rusty Russell 2000', @@ -1710,8 +1709,8 @@ def test_copyright_libtdb1_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libuim6_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libuim6_copyright-libuim_copyright.copyright') + def test_libuim6(self): + test_file = self.get_test_loc('copyrights/libuim6-libuim.copyright') expected = [ 'Copyright (c) 2003-2007 uim Project http://uim.freedesktop.org/', 'COPYRIGHT (c) 1988-1994 BY PARADIGM ASSOCIATES INCORPORATED', @@ -1726,8 +1725,8 @@ def test_copyright_libuim6_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libxext6_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libxext6_copyright-libxext_copyright.copyright') + def test_libxext6(self): + test_file = self.get_test_loc('copyrights/libxext6-libxext.copyright') expected = [ 'Copyright 1986, 1987, 1988, 1989, 1994, 1998 The Open Group', 'Copyright (c) 1996 Digital Equipment Corporation, Maynard, Massachusetts', @@ -1741,8 +1740,8 @@ def test_copyright_libxext6_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libxmlrpc_c3_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_libxmlrpc_c3_copyright-libxmlrpc_c_copyright.copyright') + def test_libxmlrpc_c3(self): + test_file = self.get_test_loc('copyrights/libxmlrpc_c3-libxmlrpc_c.copyright') expected = [ 'Copyright (c) 2001 by First Peer, Inc.', 'Copyright (c) 2001 by Eric Kidd.', @@ -1752,8 +1751,8 @@ def test_copyright_libxmlrpc_c3_copyright(self): ] check_detection(expected, test_file) - def test_copyright_libxt6_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_libxt6_copyright_label-libxt_copyright_label.label') + def test_libxt6(self): + test_file = self.get_test_loc('copyrights/libxt6-libxt.label') expected = [ 'Copyright 1987, 1988 by Digital Equipment Corporation , Maynard, Massachusetts', 'Copyright 1993 by Sun Microsystems, Inc. Mountain View', @@ -1763,38 +1762,38 @@ def test_copyright_libxt6_copyright_label(self): check_detection(expected, test_file) @expectedFailure - def test_copyright_license__qpl_v1_0_perfect(self): - test_file = self.get_test_loc('copyrights/copyright_license_qpl_v1_0_perfect-QPL_v.0') + def test_license_qpl_v1_0_perfect(self): + test_file = self.get_test_loc('copyrights/license_qpl_v1_0_perfect-QPL_v.0') expected = [ 'Copyright (c) 1999 Trolltech AS, Norway.', ] check_detection(expected, test_file) - def test_copyright_license_text_adaptive_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_adaptive_v1_0-Adaptive v.0') + def test_adaptive_v1_0(self): + test_file = self.get_test_loc('copyrights/adaptive_v1_0-Adaptive v.0') expected = [ '(c) Any Recipient', '(c) Each Recipient', ] check_detection(expected, test_file) - def test_copyright_license_text_adobe(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_adobe-Adobe') + def test_adobe(self): + test_file = self.get_test_loc('copyrights/adobe-Adobe') expected = [ 'Copyright (c) 2006 Adobe Systems Incorporated.', ] check_detection(expected, test_file) - def test_copyright_license_text_adobeflex2sdk(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_adobeflex2sdk-Adobeflex_sdk') + def test_adobeflex2sdk(self): + test_file = self.get_test_loc('copyrights/adobeflex2sdk-Adobeflex_sdk') expected = [ '(c) Adobe AIR', '(c) Material Improvement', ] check_detection(expected, test_file) - def test_copyright_license_text_afferogplv1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_afferogplv1-AfferoGPLv') + def test_afferogplv1(self): + test_file = self.get_test_loc('copyrights/afferogplv1-AfferoGPLv') expected = [ 'Copyright (c) 2002 Affero Inc.', 'copyright (c) 1989, 1991 Free Software Foundation, Inc.', @@ -1802,253 +1801,253 @@ def test_copyright_license_text_afferogplv1(self): ] check_detection(expected, test_file) - def test_copyright_license_text_afferogplv2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_afferogplv2-AfferoGPLv') + def test_afferogplv2(self): + test_file = self.get_test_loc('copyrights/afferogplv2-AfferoGPLv') expected = [ 'Copyright (c) 2007 Affero Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_afferogplv3(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_afferogplv3-AfferoGPLv') + def test_afferogplv3(self): + test_file = self.get_test_loc('copyrights/afferogplv3-AfferoGPLv') expected = [ 'Copyright (c) 2007 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_afl_v3_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_afl_v3_0-AFL_v.0') + def test_afl_v3_0(self): + test_file = self.get_test_loc('copyrights/afl_v3_0-AFL_v.0') expected = [ 'Copyright (c) 2005 Lawrence Rosen.', ] check_detection(expected, test_file) - def test_copyright_license_text_aladdin_free_public_license(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_aladdin_free_public_license-Aladdin Free Public License') + def test_aladdin_free_public_license(self): + test_file = self.get_test_loc('copyrights/aladdin_free_public_license-Aladdin Free Public License') expected = [ 'Copyright (c) 1994, 1995, 1997, 1998, 1999, 2000 Aladdin Enterprises, Menlo Park, California, U.S.A.', ] check_detection(expected, test_file) - def test_copyright_license_text_amazondsb(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_amazondsb-AmazonDSb') + def test_amazondsb(self): + test_file = self.get_test_loc('copyrights/amazondsb-AmazonDSb') expected = [ '(c) 2006 Amazon Digital Services, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_ampasbsd(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ampasbsd-AMPASBSD') + def test_ampasbsd(self): + test_file = self.get_test_loc('copyrights/ampasbsd-AMPASBSD') expected = [ 'Copyright (c) 2006 Academy of Motion Picture Arts and Sciences', ] check_detection(expected, test_file) - def test_copyright_license_text_apachev1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apachev1_0-Apachev.0') + def test_apachev1_0(self): + test_file = self.get_test_loc('copyrights/apachev1_0-Apachev.0') expected = [ 'Copyright (c) 1995-1999 The Apache Group.', ] check_detection(expected, test_file) - def test_copyright_license_text_apachev1_1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apachev1_1-Apachev.1') + def test_apachev1_1(self): + test_file = self.get_test_loc('copyrights/apachev1_1-Apachev.1') expected = [ 'Copyright (c) 2000 The Apache Software Foundation.', ] check_detection(expected, test_file) - def test_copyright_license_text_apachev2_0b(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apachev2_0b-Apachev_b.0b') + def test_apachev2_0b(self): + test_file = self.get_test_loc('copyrights/apachev2_0b-Apachev_b.0b') expected = [ 'Copyright 2000', ] check_detection(expected, test_file) - def test_copyright_license_text_apple_common_documentation_license_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apple_common_documentation_license_v1_0-Apple Common Documentation License v.0') + def test_apple_common_documentation_license_v1_0(self): + test_file = self.get_test_loc('copyrights/apple_common_documentation_license_v1_0-Apple Common Documentation License v.0') expected = [ 'Copyright (c) 2001 Apple Computer, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_apple_public_source_license_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apple_public_source_license_v1_0-Apple Public Source License v.0') + def test_apple_public_source_license_v1_0(self): + test_file = self.get_test_loc('copyrights/apple_public_source_license_v1_0-Apple Public Source License v.0') expected = [ 'Portions Copyright (c) 1999 Apple Computer, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_apple_public_source_license_v1_1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apple_public_source_license_v1_1-Apple Public Source License v.1') + def test_apple_public_source_license_v1_1(self): + test_file = self.get_test_loc('copyrights/apple_public_source_license_v1_1-Apple Public Source License v.1') expected = [ 'Portions Copyright (c) 1999-2000 Apple Computer, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_apple_public_source_license_v1_2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apple_public_source_license_v1_2-Apple Public Source License v.2') + def test_apple_public_source_license_v1_2(self): + test_file = self.get_test_loc('copyrights/apple_public_source_license_v1_2-Apple Public Source License v.2') expected = [ 'Portions Copyright (c) 1999-2003 Apple Computer, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_apslv2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_apslv2_0-APSLv.0') + def test_apslv2_0(self): + test_file = self.get_test_loc('copyrights/apslv2_0-APSLv.0') expected = [ 'Portions Copyright (c) 1999-2007 Apple Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_artistic_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_artistic_v1_0-Artistic v.0') + def test_artistic_v1_0(self): + test_file = self.get_test_loc('copyrights/artistic_v1_0-Artistic v.0') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_artistic_v1_0_short(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_artistic_v1_0_short-Artistic v_ short.0 short') + def test_artistic_v1_0_short(self): + test_file = self.get_test_loc('copyrights/artistic_v1_0_short-Artistic v_ short.0 short') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_artistic_v2_0beta4(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_artistic_v2_0beta4-Artistic v_beta.0beta4') + def test_artistic_v2_0beta4(self): + test_file = self.get_test_loc('copyrights/artistic_v2_0beta4-Artistic v_beta.0beta4') expected = [ 'Copyright (c) 2000, Larry Wall.', ] check_detection(expected, test_file) - def test_copyright_license_text_artisticv2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_artisticv2_0-Artisticv.0') + def test_artisticv2_0(self): + test_file = self.get_test_loc('copyrights/artisticv2_0-Artisticv.0') expected = [ 'Copyright (c) 2000-2006, The Perl Foundation.', ] check_detection(expected, test_file) - def test_copyright_license_text_attributionassurancelicense(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_attributionassurancelicense-AttributionAssuranceLicense') + def test_attributionassurancelicense(self): + test_file = self.get_test_loc('copyrights/attributionassurancelicense-AttributionAssuranceLicense') expected = [ 'Copyright (c) 2002 by AUTHOR', ] check_detection(expected, test_file) - def test_copyright_license_text_bigelow_holmes(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_bigelow_holmes-Bigelow&Holmes') + def test_bigelow_holmes(self): + test_file = self.get_test_loc('copyrights/bigelow_holmes-Bigelow&Holmes') expected = [ '(c) Copyright 1989 Sun Microsystems, Inc.', '(c) Copyright Bigelow & Holmes 1986, 1985.', ] check_detection(expected, test_file) - def test_copyright_license_text_bitstream(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_bitstream-Bi_ream') + def test_bitstream(self): + test_file = self.get_test_loc('copyrights/bitstream-Bi_ream') expected = [ 'Copyright (c) 2003 by Bitstream, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_bsdnrl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_bsdnrl-BSDNRL') + def test_bsdnrl(self): + test_file = self.get_test_loc('copyrights/bsdnrl-BSDNRL') expected = [ 'copyright by The Regents of the University of California.', ] check_detection(expected, test_file) - def test_copyright_license_text_cnri(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_cnri-CNRI') + def test_cnri(self): + test_file = self.get_test_loc('copyrights/cnri-CNRI') expected = [ 'Copyright (c) 1995-2000 Corporation for National Research Initiatives', ] check_detection(expected, test_file) - def test_copyright_license_text_condor_extra_For(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_condor_extra_For-Condor') + def test_condor_extra_For(self): + test_file = self.get_test_loc('copyrights/condor_extra_For-Condor') expected = [ 'Copyright 1990-2006 Condor Team, Computer Sciences Department, University of Wisconsin-Madison, Madison', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_license_text_doc(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_doc-DOC') + def test_doc(self): + test_file = self.get_test_loc('copyrights/doc-DOC') expected = [ 'copyrighted by Douglas C. Schmidt and his research group at Washington University, University of California, Irvine, and Vanderbilt University', 'Copyright (c) 1993-2008' ] check_detection(expected, test_file) - def test_copyright_license_text_dual_mpl_gpl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_dual_mpl_gpl-Dual MPL GPL') + def test_dual_mpl_gpl(self): + test_file = self.get_test_loc('copyrights/dual_mpl_gpl-Dual MPL GPL') expected = [ 'Copyright (c) 2002 the Initial Developer.', ] check_detection(expected, test_file) - def test_copyright_license_text_dualmpl_mit(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_dualmpl_mit-DualMPL_MIT') + def test_dualmpl_mit(self): + test_file = self.get_test_loc('copyrights/dualmpl_mit-DualMPL_MIT') expected = [ 'Copyright (c) 1998-2001, Daniel Stenberg, ', ] check_detection(expected, test_file) - def test_copyright_license_text_eclv1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_eclv1_0-ECLv.0') + def test_eclv1_0(self): + test_file = self.get_test_loc('copyrights/eclv1_0-ECLv.0') expected = [ 'Copyright (c) YeAr Name', ] check_detection(expected, test_file) - def test_copyright_license_text_ecosv2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ecosv2_0-eCosv.0') + def test_ecosv2_0(self): + test_file = self.get_test_loc('copyrights/ecosv2_0-eCosv.0') expected = [ 'Copyright (c) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_entessa(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_entessa-Entessa') + def test_entessa(self): + test_file = self.get_test_loc('copyrights/entessa-Entessa') expected = [ 'Copyright (c) 2003 Entessa, LLC.', ] check_detection(expected, test_file) - def test_copyright_license_text_eplv1_0b(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_eplv1_0b-EPLv_b.0b') + def test_eplv1_0b(self): + test_file = self.get_test_loc('copyrights/eplv1_0b-EPLv_b.0b') expected = [ 'Copyright (c) 2003, 2005 IBM Corporation and others.', ] check_detection(expected, test_file) - def test_copyright_license_text_eudatagrid(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_eudatagrid-EUDatagrid') + def test_eudatagrid(self): + test_file = self.get_test_loc('copyrights/eudatagrid-EUDatagrid') expected = [ 'Copyright (c) 2001 EU DataGrid.', ] check_detection(expected, test_file) - def test_copyright_license_text_eurosym_v2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_eurosym_v2-Eurosym_v.v2') + def test_eurosym_v2(self): + test_file = self.get_test_loc('copyrights/eurosym_v2-Eurosym_v.v2') expected = [ 'Copyright (c) 1999-2002 Henrik Theiling', ] check_detection(expected, test_file) - def test_copyright_license_text_frameworxv1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_frameworxv1_0-Frameworxv.0') + def test_frameworxv1_0(self): + test_file = self.get_test_loc('copyrights/frameworxv1_0-Frameworxv.0') expected = [ '(c) Source Code', '(c) THE FRAMEWORX COMPANY 2003', ] check_detection(expected, test_file) - def test_copyright_license_text_freebsd(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_freebsd-FreeBSD') + def test_freebsd(self): + test_file = self.get_test_loc('copyrights/freebsd-FreeBSD') expected = [ 'Copyright 1994-2006 The FreeBSD Project.', ] check_detection(expected, test_file) - def test_copyright_license_text_freetype(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_freetype-FreeType') + def test_freetype(self): + test_file = self.get_test_loc('copyrights/freetype-FreeType') expected = [ 'Copyright 1996-2002, 2006 by David Turner, Robert Wilhelm, and Werner Lemberg', 'copyright (c) The FreeType Project (www.freetype.org).', @@ -2056,22 +2055,22 @@ def test_copyright_license_text_freetype(self): ] check_detection(expected, test_file) - def test_copyright_license_text_gfdlv1_2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gfdlv1_2-GFDLv.2') + def test_gfdlv1_2(self): + test_file = self.get_test_loc('copyrights/gfdlv1_2-GFDLv.2') expected = [ 'Copyright (c) 2000,2001,2002 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_gfdlv1_3(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gfdlv1_3-GFDLv.3') + def test_gfdlv1_3(self): + test_file = self.get_test_loc('copyrights/gfdlv1_3-GFDLv.3') expected = [ 'Copyright (c) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_glide(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_glide-Glide') + def test_glide(self): + test_file = self.get_test_loc('copyrights/glide-Glide') expected = [ 'copyright notice (3dfx Interactive, Inc. 1999)', 'COPYRIGHT 3DFX INTERACTIVE, INC. 1999', @@ -2079,82 +2078,74 @@ def test_copyright_license_text_glide(self): ] check_detection(expected, test_file) - def test_copyright_license_text_gnuplot(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gnuplot-gnuplot') + def test_gnuplot(self): + test_file = self.get_test_loc('copyrights/gnuplot-gnuplot') expected = [ 'Copyright 1986 - 1993, 1998, 2004 Thomas Williams, Colin Kelley', ] check_detection(expected, test_file) - def test_copyright_license_text_gpl_v1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gpl_v1-GPL_v') + def test_gpl_v1(self): + test_file = self.get_test_loc('copyrights/gpl_v1-GPL_v') expected = [ 'Copyright (c) 1989 Free Software Foundation, Inc.', 'copyrighted by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_license_text_gpl_v2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gpl_v2-GPL_v') + def test_gpl_v2(self): + test_file = self.get_test_loc('copyrights/gpl_v2-GPL_v') expected = [ 'Copyright (c) 1989, 1991 Free Software Foundation, Inc.', 'copyrighted by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_license_text_gpl_v3(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gpl_v3-GPL_v') + def test_gpl_v3(self): + test_file = self.get_test_loc('copyrights/gpl_v3-GPL_v') expected = [ 'Copyright (c) 2007 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_gsoap(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_gsoap-gSOAP') - expected = [ - 'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.', - 'Copyright (c) 2001-2004 Robert A. van Engelen, Genivia inc.', - ] - check_detection(expected, test_file) - - def test_copyright_license_text_helix(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_helix-Helix') + def test_helix(self): + test_file = self.get_test_loc('copyrights/helix-Helix') expected = [ 'Copyright (c) 1995-2002 RealNetworks, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_hewlett_packard(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_hewlett_packard-Hewlett_Packard') + def test_hewlett_packard(self): + test_file = self.get_test_loc('copyrights/hewlett_packard-Hewlett_Packard') expected = [ '(c) HEWLETT-PACKARD COMPANY, 2004.', ] check_detection(expected, test_file) - def test_copyright_license_text_ibmpl_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ibmpl_v1_0-IBMPL_v.0') + def test_ibmpl_v1_0(self): + test_file = self.get_test_loc('copyrights/ibmpl_v1_0-IBMPL_v.0') expected = [ 'Copyright (c) 1996, 1999 International Business Machines Corporation and others.', ] check_detection(expected, test_file) - def test_copyright_license_text_ietf(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ietf-IETF') + def test_ietf(self): + test_file = self.get_test_loc('copyrights/ietf-IETF') expected = [ 'Copyright (c) The Internet Society (2003).', ] check_detection(expected, test_file) - def test_copyright_license_text_ijg(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ijg-IJG') + def test_ijg(self): + test_file = self.get_test_loc('copyrights/ijg-IJG') expected = [ 'copyright (c) 1991-1998, Thomas G. Lane.', 'copyright by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_license_text_imatix(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_imatix-iMatix') + def test_imatix(self): + test_file = self.get_test_loc('copyrights/imatix-iMatix') expected = [ 'Copyright 1991-2000 iMatix Corporation.', 'Copyright 1991-2000 iMatix Corporation', @@ -2164,311 +2155,304 @@ def test_copyright_license_text_imatix(self): ] check_detection(expected, test_file) - def test_copyright_license_text_imlib2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_imlib2-Imlib') + def test_imlib2(self): + test_file = self.get_test_loc('copyrights/imlib2-Imlib') expected = [ 'Copyright (c) 2000 Carsten Haitzler', ] check_detection(expected, test_file) - def test_copyright_license_text_intel(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_intel-Intel') + def test_intel(self): + test_file = self.get_test_loc('copyrights/intel-Intel') expected = [ 'Copyright (c) 2006, Intel Corporation.', ] check_detection(expected, test_file) - def test_copyright_license_text_jabber(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_jabber-Jabber') + def test_jabber(self): + test_file = self.get_test_loc('copyrights/jabber-Jabber') expected = [ 'Copyright (c) 1999-2000 Jabber.com, Inc.', 'Portions Copyright (c) 1998-1999 Jeremie Miller.', ] check_detection(expected, test_file) - def test_copyright_license_text_jpython(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_jpython-JPython') + def test_jpython(self): + test_file = self.get_test_loc('copyrights/jpython-JPython') expected = [ 'Copyright 1996-1999 Corporation for National Research Initiatives', ] check_detection(expected, test_file) - def test_copyright_license_text_larryrosen(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_larryrosen-LarryRosen') + def test_larryrosen(self): + test_file = self.get_test_loc('copyrights/larryrosen-LarryRosen') expected = [ 'Copyright (c) 2002 Lawrence E. Rosen.', ] check_detection(expected, test_file) - def test_copyright_license_text_latex_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_0-LaTeX_v.0') + def test_latex_v1_0(self): + test_file = self.get_test_loc('copyrights/latex_v1_0-LaTeX_v.0') expected = [ 'Copyright 1999 LaTeX3 Project', 'Copyright 2001 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_license_text_latex_v1_1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_1-LaTeX_v.1') + def test_latex_v1_1(self): + test_file = self.get_test_loc('copyrights/latex_v1_1-LaTeX_v.1') expected = [ 'Copyright 1999 LaTeX3 Project', 'Copyright 2001 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_license_text_latex_v1_2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_2-LaTeX_v.2') + def test_latex_v1_2(self): + test_file = self.get_test_loc('copyrights/latex_v1_2-LaTeX_v.2') expected = [ 'Copyright 1999 LaTeX3 Project', 'Copyright 2001 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_license_text_latex_v1_3a(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_3a-LaTeX_v_a.3a') + def test_latex_v1_3a(self): + test_file = self.get_test_loc('copyrights/latex_v1_3a-LaTeX_v_a.3a') expected = [ 'Copyright 1999 2002-04 LaTeX3 Project', 'Copyright 2003 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_license_text_latex_v1_3a_ref(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_3a_ref-LaTeX_v_a_ref.3a_ref') + def test_latex_v1_3a_ref(self): + test_file = self.get_test_loc('copyrights/latex_v1_3a_ref-LaTeX_v_a_ref.3a_ref') expected = [ 'Copyright 2003 Name', ] check_detection(expected, test_file) - def test_copyright_license_text_latex_v1_3c(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_latex_v1_3c-LaTeX_v_c.3c') + def test_latex_v1_3c(self): + test_file = self.get_test_loc('copyrights/latex_v1_3c-LaTeX_v_c.3c') expected = [ 'Copyright 1999 2002-2008 LaTeX3 Project', 'Copyright 2005 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_license_text_lgpl_v2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_v2_0-LGPL_v.0') + def test_lgpl_v2_0(self): + test_file = self.get_test_loc('copyrights/lgpl_v2_0-LGPL_v.0') expected = [ 'Copyright (c) 1991 Free Software Foundation, Inc.', 'copyrighted by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_license_text_lgpl_v2_1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_v2_1-LGPL_v.1') + def test_lgpl_v2_1(self): + test_file = self.get_test_loc('copyrights/lgpl_v2_1-LGPL_v.1') expected = [ 'Copyright (c) 1991, 1999 Free Software Foundation, Inc.', 'copyrighted by the Free Software Foundation', ] check_detection(expected, test_file) - def test_copyright_license_text_lgpl_v3(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_v3-LGPL_v') + def test_lgpl_v3(self): + test_file = self.get_test_loc('copyrights/lgpl_v3-LGPL_v') expected = [ 'Copyright (c) 2007 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_lgpl_wxwindows_library_licence_v3_0_variant(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_lgpl_wxwindows_library_licence_v3_0_variant-LGPL wxWindows Library Licence v_ variant.0 variant') + def test_lgpl_wxwindows_library_licence_v3_0_variant(self): + test_file = self.get_test_loc('copyrights/wxWindows Library .0 variant') expected = [ 'Copyright (c) 1998 Julian Smart, Robert Roebling', ] check_detection(expected, test_file) - def test_copyright_license_text_logica_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_logica_v1_0-Logica_v.0') + def test_logica_v1_0(self): + test_file = self.get_test_loc('copyrights/logica_v1_0-Logica_v.0') expected = [ 'Copyright (c) 1996-2001 Logica Mobile Networks Limited', 'Copyright (c) 1996-2001 Logica Mobile Networks Limited', ] check_detection(expected, test_file) - def test_copyright_license_text_luxi_fonts(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_luxi_fonts-Luxi_fonts') + def test_luxi_fonts(self): + test_file = self.get_test_loc('copyrights/luxi_fonts-Luxi_fonts') expected = [ 'copyright (c) 2001 by Bigelow & Holmes Inc.', 'copyright (c) 2001 by URW++ GmbH.', ] check_detection(expected, test_file) - def test_copyright_license_text_maia(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_maia-Maia') + def test_maia(self): + test_file = self.get_test_loc('copyrights/maia-Maia') expected = [ 'Copyright 2004 by Robert LeBlanc', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_adobeglyph(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_adobeglyph-MIT_AdobeGlyph') + def test_mit_adobeglyph(self): + test_file = self.get_test_loc('copyrights/mit_adobeglyph-MIT_AdobeGlyph') expected = [ 'Copyright (c) 1997,1998,2002,2007 Adobe Systems Incorporated', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_cmu(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_cmu-MIT_CMU') + def test_mit_cmu(self): + test_file = self.get_test_loc('copyrights/mit_cmu-MIT_CMU') expected = [ 'Copyright 1989, 1991, 1992 by Carnegie Mellon University', 'Copyright 1996, 1998-2000 The Regents of the University of California', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_danse(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_danse-MIT_danse') - expected = [ - 'Copyright (c) 2009 California Institute of Technology.', - ] - check_detection(expected, test_file) - - def test_copyright_license_text_mit_enna(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_enna-MIT_enna') + def test_mit_enna(self): + test_file = self.get_test_loc('copyrights/mit_enna-MIT_enna') expected = [ 'Copyright (c) 2000 Carsten Haitzler', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_hylafax(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_hylafax-MIT_hylafax') + def test_mit_hylafax(self): + test_file = self.get_test_loc('copyrights/mit_hylafax-MIT_hylafax') expected = [ 'Copyright (c) 1990-1996 Sam Leffler', 'Copyright (c) 1991-1996 Silicon Graphics, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_icu(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_icu-MIT_ICU') + def test_mit_icu(self): + test_file = self.get_test_loc('copyrights/mit_icu-MIT_ICU') expected = [ 'Copyright (c) 1995-2006 International Business Machines Corporation and others', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_lucent(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_lucent-MIT_Lucent') + def test_mit_lucent(self): + test_file = self.get_test_loc('copyrights/mit_lucent-MIT_Lucent') expected = [ 'Copyright (c) 1989-1998 by Lucent Technologies', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_mlton(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_mlton-MIT_MLton') + def test_mit_mlton(self): + test_file = self.get_test_loc('copyrights/mit_mlton-MIT_MLton') expected = [ 'Copyright (c) 1999-2006 Henry Cejtin, Matthew Fluet, Suresh Jagannathan, and Stephen Weeks.', 'Copyright (c) 1997-2000 by the NEC Research', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_oldstyle_disclaimer4(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_oldstyle_disclaimer4-MIT_OldStyle_disclaimer') + def test_mit_oldstyle_disclaimer4(self): + test_file = self.get_test_loc('copyrights/mit_oldstyle_disclaimer4-MIT_OldStyle_disclaimer') expected = [ 'Copyright (c) 2001, 2002, 2003, 2004, 2005 by The Regents of the University of California.', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_unicode(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_unicode-MIT_unicode') + def test_mit_unicode(self): + test_file = self.get_test_loc('copyrights/mit_unicode-MIT_unicode') expected = [ 'Copyright (c) 1991-2005 Unicode, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_mit_wordnet(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mit_wordnet-MIT_WordNet') + def test_mit_wordnet(self): + test_file = self.get_test_loc('copyrights/mit_wordnet-MIT_WordNet') expected = [ 'Copyright 2006 by Princeton University.', ] check_detection(expected, test_file) - def test_copyright_license_text_mitre(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mitre-MITRE') + def test_mitre(self): + test_file = self.get_test_loc('copyrights/mitre-MITRE') expected = [ 'Copyright (c) 1994-1999. The MITRE Corporation', ] check_detection(expected, test_file) - def test_copyright_license_text_ms_pl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ms_pl-Ms_PL') + def test_ms_pl(self): + test_file = self.get_test_loc('copyrights/ms_pl-Ms_PL') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_ms_rl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ms_rl-Ms_RL') + def test_ms_rl(self): + test_file = self.get_test_loc('copyrights/ms_rl-Ms_RL') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_ms_rsl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ms_rsl-Ms_RSL') + def test_ms_rsl(self): + test_file = self.get_test_loc('copyrights/ms_rsl-Ms_RSL') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_msntp(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_msntp-MSNTP') + def test_msntp(self): + test_file = self.get_test_loc('copyrights/msntp-MSNTP') expected = [ '(c) Copyright, University of Cambridge, 1996, 1997, 2000', '(c) Copyright University of Cambridge.', ] check_detection(expected, test_file) - def test_copyright_license_text_mysql_gplexception(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_mysql_gplexception-MySQL_gplexception') + def test_mysql_gplexception(self): + test_file = self.get_test_loc('copyrights/mysql_gplexception-MySQL_gplexception') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_naumen(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_naumen-Naumen') + def test_naumen(self): + test_file = self.get_test_loc('copyrights/naumen-Naumen') expected = [ 'Copyright (c) NAUMEN (tm) and Contributors.', ] check_detection(expected, test_file) - def test_copyright_license_text_netcomponents(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_netcomponents-NetComponents') + def test_netcomponents(self): + test_file = self.get_test_loc('copyrights/netcomponents-NetComponents') expected = [ 'Copyright (c) 1996-1999 Daniel F. Savarese.', ] check_detection(expected, test_file) - def test_copyright_license_text_nethack(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_nethack-Nethack') + def test_nethack(self): + test_file = self.get_test_loc('copyrights/nethack-Nethack') expected = [ 'Copyright (c) 1989 M. Stephenson', 'copyright 1988 Richard M. Stallman', ] check_detection(expected, test_file) - def test_copyright_license_text_nokia(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_nokia-Nokia') + def test_nokia(self): + test_file = self.get_test_loc('copyrights/nokia-Nokia') expected = [ 'Copyright (c) Nokia and others.', ] check_detection(expected, test_file) - def test_copyright_license_text_npl_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_npl_v1_0-NPL_v.0') + def test_npl_v1_0(self): + test_file = self.get_test_loc('copyrights/npl_v1_0-NPL_v.0') expected = [ 'Copyright (c) 1998 Netscape Communications Corporation.', ] check_detection(expected, test_file) - def test_copyright_license_text_nvidia_source(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_nvidia_source-Nvidia_source') + def test_nvidia_source(self): + test_file = self.get_test_loc('copyrights/nvidia_source-Nvidia_source') expected = [ 'Copyright (c) 1996-1998 NVIDIA, Corp.', 'Copyright (c) 1996-1998 NVIDIA, Corp.', ] check_detection(expected, test_file) - def test_copyright_license_text_oclc_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_oclc_v1_0-OCLC_v.0') + def test_oclc_v1_0(self): + test_file = self.get_test_loc('copyrights/oclc_v1_0-OCLC_v.0') expected = [ 'Copyright (c) 2000. OCLC Research.', 'Copyright (c) 2000- (insert then current year) OCLC OCLC Research and others.', ] check_detection(expected, test_file) - def test_copyright_license_text_oclc_v2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_oclc_v2_0-OCLC_v.0') + def test_oclc_v2_0(self): + test_file = self.get_test_loc('copyrights/oclc_v2_0-OCLC_v.0') expected = [ 'Copyright (c) 2002. OCLC Research.', 'Copyright (c) 2000- (insert then current year) OCLC Online Computer Library Center, Inc. and other contributors.', @@ -2476,124 +2460,131 @@ def test_copyright_license_text_oclc_v2_0(self): ] check_detection(expected, test_file) - def test_copyright_license_text_openldap(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_openldap-OpenLDAP') + def test_openldap(self): + test_file = self.get_test_loc('copyrights/openldap-OpenLDAP') expected = [ 'Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, California', ] check_detection(expected, test_file) - def test_copyright_license_text_openmotif(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_openmotif-OpenMotif') + def test_openmotif(self): + test_file = self.get_test_loc('copyrights/openmotif-OpenMotif') expected = [ 'Copyright (c) date here, The Open Group Ltd. and others.', ] check_detection(expected, test_file) - def test_copyright_license_text_openpbs(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_openpbs-OpenPBS') + def test_openpbs(self): + test_file = self.get_test_loc('copyrights/openpbs-OpenPBS') expected = [ 'Copyright (c) 1999-2000 Veridian Information Solutions, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_openpublicationref(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_openpublicationref-OpenPublicationref') + def test_openpublicationref(self): + test_file = self.get_test_loc('copyrights/openpublicationref-OpenPublicationref') expected = [ 'Copyright (c) 2000 by ThisOldHouse.', ] check_detection(expected, test_file) - def test_copyright_license_text_openssl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_openssl-OpenSSL') + def test_openssl_c(self): + test_file = self.get_test_loc('copyrights/openssl-c.c') + expected = [ + 'Copyright (c) 1995-1997 Eric Young (eay@mincom.oz.au)', + ] + check_detection(expected, test_file) + + def test_openssl(self): + test_file = self.get_test_loc('copyrights/openssl-OpenSSL') expected = [ 'Copyright (c) 1998-2000 The OpenSSL Project.', ] check_detection(expected, test_file) - def test_copyright_license_text_osl_v3_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_osl_v3_0-OSL_v.0') + def test_osl_v3_0(self): + test_file = self.get_test_loc('copyrights/osl_v3_0-OSL_v.0') expected = [ 'Copyright (c) 2005 Lawrence Rosen.', ] check_detection(expected, test_file) - def test_copyright_license_text_phorum(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_phorum-Phorum') + def test_phorum(self): + test_file = self.get_test_loc('copyrights/phorum-Phorum') expected = [ 'Copyright (c) 2001 The Phorum Development Team.', ] check_detection(expected, test_file) - def test_copyright_license_text_pine(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_pine-Pine') + def test_pine(self): + test_file = self.get_test_loc('copyrights/pine-Pine') expected = [ 'Copyright 1989-2007 by the University of Washington.', ] check_detection(expected, test_file) - def test_copyright_license_text_python_v1_6(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_python_v1_6-Python_v.6') + def test_python_v1_6(self): + test_file = self.get_test_loc('copyrights/python_v1_6-Python_v.6') expected = [ 'Copyright (c) 1995-2000 Corporation for National Research Initiatives', ] check_detection(expected, test_file) - def test_copyright_license_text_python_v1_6_1(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_python_v1_6_1-Python_v.1') + def test_python_v1_6_1(self): + test_file = self.get_test_loc('copyrights/python_v1_6_1-Python_v.1') expected = [ 'Copyright 1995-2001 Corporation for National Research Initiatives', ] check_detection(expected, test_file) - def test_copyright_license_text_python_v2(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_python_v2-Python_v') + def test_python_v2(self): + test_file = self.get_test_loc('copyrights/python_v2-Python_v') expected = [ 'Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation', 'Copyright (c) 1995-2001 Corporation for National Research Initiatives', ] check_detection(expected, test_file) - def test_copyright_license_text_qpl_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_qpl_v1_0-QPL_v.0') + def test_qpl_v1_0(self): + test_file = self.get_test_loc('copyrights/qpl_v1_0-QPL_v.0') expected = [ 'Copyright (c) 1999 Trolltech AS', ] check_detection(expected, test_file) - def test_copyright_license_text_realcsl_v2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_realcsl_v2_0-RealCSL_v.0') + def test_realcsl_v2_0(self): + test_file = self.get_test_loc('copyrights/realcsl_v2_0-RealCSL_v.0') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_realpsl_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_realpsl_v1_0-RealPSL_v.0') + def test_realpsl_v1_0(self): + test_file = self.get_test_loc('copyrights/realpsl_v1_0-RealPSL_v.0') expected = [ 'Copyright (c) 1995-2002 RealNetworks, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_realpsl_v1_0ref(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_realpsl_v1_0ref-RealPSL_v_ref.0ref') + def test_realpsl_v1_0ref(self): + test_file = self.get_test_loc('copyrights/realpsl_v1_0ref-RealPSL_v_ref.0ref') expected = [ 'Copyright (c) 1995-2004 RealNetworks, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_reciprocal_v1_5(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_reciprocal_v1_5-Reciprocal_v.5') + def test_reciprocal_v1_5(self): + test_file = self.get_test_loc('copyrights/reciprocal_v1_5-Reciprocal_v.5') expected = [ 'Copyright (c) 2001-2007 Technical Pursuit Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_redhateula(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_redhateula-RedHatEULA') + def test_redhateula(self): + test_file = self.get_test_loc('copyrights/redhateula-RedHatEULA') expected = [] check_detection(expected, test_file) - def test_copyright_license_text_redhatref(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_redhatref-RedHatref') + def test_redhatref(self): + test_file = self.get_test_loc('copyrights/redhatref-RedHatref') expected = [ 'Copyright (c) 2005 Red Hat, Inc.', 'Copyright (c) 1995-2005 Red Hat, Inc. and others.', @@ -2601,16 +2592,16 @@ def test_copyright_license_text_redhatref(self): ] check_detection(expected, test_file) - def test_copyright_license_text_ricoh_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_ricoh_v1_0-Ricoh_v.0') + def test_ricoh_v1_0(self): + test_file = self.get_test_loc('copyrights/ricoh_v1_0-Ricoh_v.0') expected = [ 'Ricoh Silicon Valley, Inc. are Copyright (c) 1995-1999.', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_license_text_scilab(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_scilab-Scilab') + def test_scilab(self): + test_file = self.get_test_loc('copyrights/scilab-Scilab') expected = [ 'Scilab (c) INRIA-ENPC.', 'Scilab (c) INRIA-ENPC.', @@ -2622,79 +2613,79 @@ def test_copyright_license_text_scilab(self): ] check_detection(expected, test_file) - def test_copyright_license_text_sgi_cid_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_sgi_cid_v1_0-SGI_CID_v.0') + def test_sgi_cid_v1_0(self): + test_file = self.get_test_loc('copyrights/sgi_cid_v1_0-SGI_CID_v.0') expected = [ 'Copyright (c) 1994-1999 Silicon Graphics, Inc.', 'Copyright (c) 1994-1999 Silicon Graphics, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_sgi_glx_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_sgi_glx_v1_0-SGI_GLX_v.0') + def test_sgi_glx_v1_0(self): + test_file = self.get_test_loc('copyrights/sgi_glx_v1_0-SGI_GLX_v.0') expected = [ '(c) 1991-9 Silicon Graphics, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_sissl_v1_1refa(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_sissl_v1_1refa-SISSL_v_refa.1refa') + def test_sissl_v1_1refa(self): + test_file = self.get_test_loc('copyrights/sissl_v1_1refa-SISSL_v_refa.1refa') expected = [ 'Copyright 2000 by Sun Microsystems, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_sleepycat(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_sleepycat-Sleepycat') + def test_sleepycat(self): + test_file = self.get_test_loc('copyrights/sleepycat-Sleepycat') expected = [ 'Copyright (c) 1990-1999 Sleepycat Software.', ] check_detection(expected, test_file) - def test_copyright_license_text_sybaseopenwatcom_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_sybaseopenwatcom_v1_0-SybaseOpenWatcom_v.0') + def test_sybaseopenwatcom_v1_0(self): + test_file = self.get_test_loc('copyrights/sybaseopenwatcom_v1_0-SybaseOpenWatcom_v.0') expected = [ 'Portions Copyright (c) 1983-2002 Sybase, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_uofu_rfpl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_uofu_rfpl-UofU_RFPL') + def test_uofu_rfpl(self): + test_file = self.get_test_loc('copyrights/uofu_rfpl-UofU_RFPL') expected = [ 'Copyright (c) 2001, 1998 University of Utah.', ] check_detection(expected, test_file) - def test_copyright_license_text_vovida_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_vovida_v1_0-Vovida_v.0') + def test_vovida_v1_0(self): + test_file = self.get_test_loc('copyrights/vovida_v1_0-Vovida_v.0') expected = [ 'Copyright (c) 2000 Vovida Networks, Inc.', ] check_detection(expected, test_file) - def test_copyright_license_text_wtfpl(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_wtfpl-WTFPL') + def test_wtfpl(self): + test_file = self.get_test_loc('copyrights/wtfpl-WTFPL') expected = [ 'Copyright (c) 2004 Sam Hocevar', ] check_detection(expected, test_file) - def test_copyright_license_text_x_net(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_x_net-X_Net.Net') + def test_x_net(self): + test_file = self.get_test_loc('copyrights/x_net-X_Net.Net') expected = [ 'Copyright (c) 2000-2001 X.Net, Inc. Lafayette, California', ] check_detection(expected, test_file) - def test_copyright_license_text_zend(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_zend-Zend') + def test_zend(self): + test_file = self.get_test_loc('copyrights/zend-Zend') expected = [ 'Copyright (c) 1999-2002 Zend Technologies Ltd.', ] check_detection(expected, test_file) - def test_copyright_license_text_zliback(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_zliback-zLibAck') + def test_zliback(self): + test_file = self.get_test_loc('copyrights/zliback-zLibAck') expected = [ 'Portions Copyright (c) 2002-2007 Charlie Poole', 'Copyright (c) 2002-2004 James W. Newkirk, Michael C. Two, Alexei A. Vorontsov', @@ -2702,110 +2693,117 @@ def test_copyright_license_text_zliback(self): ] check_detection(expected, test_file) - def test_copyright_license_text_zope_v1_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_zope_v1_0-Zope_v.0') + def test_zope_v1_0(self): + test_file = self.get_test_loc('copyrights/zope_v1_0-Zope_v.0') expected = [ 'Copyright (c) Digital Creations.', ] check_detection(expected, test_file) - def test_copyright_license_text_zope_v2_0(self): - test_file = self.get_test_loc('copyrights/copyright_license_text_zope_v2_0-Zope_v.0') + def test_zope_v2_0(self): + test_file = self.get_test_loc('copyrights/zope_v2_0-Zope_v.0') expected = [ 'Copyright (c) Zope Corporation (tm) and Contributors.', ] check_detection(expected, test_file) - def test_copyright_linux_source_2_6_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_linux_source_2_6_copyright-linux_source__copyright.copyright') + def test_linux_source_2_6(self): + test_file = self.get_test_loc('copyrights/linux_source_2_6-linux_source.copyright') expected = [ 'copyrighted by Linus Torvalds and others.', ] check_detection(expected, test_file) - def test_copyright_loss_of_holder_c(self): - test_file = self.get_test_loc('copyrights/copyright_loss_of_holder_c-c.c') + def test_loss_of_holder_c(self): + test_file = self.get_test_loc('copyrights/loss_of_holder_c-c.c') expected = [ 'COPYRIGHT (c) DIONYSOS 2006 - 2009', ] check_detection(expected, test_file) - def test_copyright_matroska_demux1_c(self): - test_file = self.get_test_loc('copyrights/copyright_matroska_demux1_c-matroska_demux_c.c') + def test_matroska_demux1_c(self): + test_file = self.get_test_loc('copyrights/matroska_demux1_c-matroska_demux_c.c') expected = [ '(c) 2003 Ronald Bultje ', '(c) 2011 Debarshi Ray ', ] check_detection(expected, test_file) - def test_copyright_matroska_demux_c(self): - test_file = self.get_test_loc('copyrights/copyright_matroska_demux_c-matroska_demux_c.c') + def test_matroska_demux_c(self): + test_file = self.get_test_loc('copyrights/matroska_demux_c-matroska_demux_c.c') expected = [ '(c) 2006 Tim-Philipp Muller', '(c) 2008 Sebastian Droge ', ] check_detection(expected, test_file) - def test_copyright_matroska_demux_muller_c(self): - test_file = self.get_test_loc('copyrights/copyright_matroska_demux_muller_c-matroska_demux_c.c') + def test_matroska_demux_muller_c(self): + test_file = self.get_test_loc('copyrights/matroska_demux_muller_c-matroska_demux_c.c') expected = [ '(c) 2006 Tim-Philipp Muller', '(c) 2008 Sebastian Droge ', ] check_detection(expected, test_file) - def test_copyright_memcmp_assembly(self): - test_file = self.get_test_loc('copyrights/copyright_memcmp_assembly-9_9_memcmp_S.S') + def test_memcmp_assembly(self): + test_file = self.get_test_loc('copyrights/memcmp_assembly-9_9_memcmp_S.S') expected = [ 'Copyright (c) 2007 ARC International (UK) LTD', ] check_detection(expected, test_file) - def test_copyright_mergesort_java(self): - test_file = self.get_test_loc('copyrights/copyright_mergesort_java-MergeSort_java.java') + def test_mergesort_java(self): + test_file = self.get_test_loc('copyrights/mergesort_java-MergeSort_java.java') expected = [ 'Copyright (c) 1998 Sun Microsystems, Inc.', ] check_detection(expected, test_file) - def test_copyright_michal_txt(self): - test_file = self.get_test_loc('copyrights/copyright_michal_txt.txt') + def test_michal_txt(self): + test_file = self.get_test_loc('copyrights/michal_txt.txt') expected = [ 'copyright 2005 Michal Migurski', ] check_detection(expected, test_file) - def test_copyright_mips1_be_elf_hal_o_uu(self): - test_file = self.get_test_loc('copyrights/copyright_mips1_be_elf_hal_o_uu-mips_be_elf_hal_o_uu.uu') + def test_mips1_be_elf_hal_o_uu(self): + test_file = self.get_test_loc('copyrights/mips1_be_elf_hal_o_uu-mips_be_elf_hal_o_uu.uu') expected = [ 'Copyright (c) 2002-2006 Sam Leffler, Errno Consulting, Atheros Communications, Inc.', ] check_detection(expected, test_file) - def test_copyright_missing_statement_file_txt(self): - test_file = self.get_test_loc('copyrights/copyright_missing_statement_file_txt-file.txt') + def test_missing_statement_file_txt(self): + test_file = self.get_test_loc('copyrights/missing_statement_file_txt-file.txt') expected = [ 'Copyright 2003-2009 The Apache Geronimo development community', 'Copyright (c) 2000-2005 The Legion Of The Bouncy Castle (http://www.bouncycastle.org)', ] check_detection(expected, test_file) - def test_copyright_mit(self): - test_file = self.get_test_loc('copyrights/copyright_mit.txt') + def test_mit(self): + test_file = self.get_test_loc('copyrights/mit.txt') expected = [ 'Copyright 2010-2011 by MitSomething', ] check_detection(expected, test_file) - def test_copyright_mit_danse(self): - test_file = self.get_test_loc('copyrights/copyright_mit_danse-MIT_Danse') + def test_mit_danse(self): + test_file = self.get_test_loc('copyrights/mit_danse') expected = [ 'Copyright (c) 2009 California Institute of Technology.', ] check_detection(expected, test_file) - def test_copyright_mixedcaps_c(self): - test_file = self.get_test_loc('copyrights/copyright_mixedcaps_c-mixedcaps_c.c') + def test_mit_danse_mojibake(self): + test_file = self.get_test_loc('copyrights/mit_danse-mojibake') + expected = [ + 'Copyright (c) 2009 California Institute of Technology.', + ] + check_detection(expected, test_file) + + def test_mixedcaps_c(self): + test_file = self.get_test_loc('copyrights/mixedcaps_c-mixedcaps_c.c') expected = [ 'COPYRIGHT (c) 2006 MyCompany2 MYCOP', 'copyright (c) 2006 MyCompany2 MYCOP', @@ -2828,15 +2826,15 @@ def test_copyright_mixedcaps_c(self): expected_in_results=False, results_in_expected=True) - def test_copyright_mixedcase_company_name_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_mixedcase_company_name_in_c-lowercase_company_c.c') + def test_mixedcase_company_name_in_c(self): + test_file = self.get_test_loc('copyrights/mixedcase_company_name_in_c-lowercase_company_c.c') expected = [ 'Copyright (c) 2001 nexB', ] check_detection(expected, test_file) - def test_copyright_mkisofs_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_mkisofs_copyright-mkisofs_copyright.copyright') + def test_mkisofs(self): + test_file = self.get_test_loc('copyrights/mkisofs-mkisofs.copyright') expected = [ 'Copyright 1998-2003 Heiko Eissfeldt', '(c) Peter Widow', @@ -2853,37 +2851,37 @@ def test_copyright_mkisofs_copyright(self): ] check_detection(expected, test_file) - def test_copyright_moto_broad(self): - test_file = self.get_test_loc('copyrights/copyright_moto_broad-c.c') + def test_moto_broad(self): + test_file = self.get_test_loc('copyrights/moto_broad-c.c') expected = [ 'COPYRIGHT (c) 2005 MOTOROLA, BROADBAND COMMUNICATIONS SECTOR', ] check_detection(expected, test_file) - def test_copyright_motorola_c(self): - test_file = self.get_test_loc('copyrights/copyright_motorola_c-c.c') + def test_motorola_c(self): + test_file = self.get_test_loc('copyrights/motorola_c-c.c') expected = [ 'Copyright (c) 2003, 2010 Motorola, Inc.', ] check_detection(expected, test_file) - def test_copyright_motorola_mobility_c(self): - test_file = self.get_test_loc('copyrights/copyright_motorola_mobility_c-c.c') + def test_motorola_mobility_c(self): + test_file = self.get_test_loc('copyrights/motorola_mobility_c-c.c') expected = [ 'Copyright (c) 2009 Motorola, Inc.', 'Copyright (c) 2011 Motorola Mobility, Inc.', ] check_detection(expected, test_file) - def test_copyright_mplayer_skin_blue_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_mplayer_skin_blue_copyright-mplayer_skin_blue_copyright.copyright') + def test_mplayer_skin_blue(self): + test_file = self.get_test_loc('copyrights/mplayer_skin_blue-mplayer_skin_blue.copyright') expected = [ 'Copyright (c) 2005-06 Franciszek Wilamowski, xenomorph@irc.pl', ] check_detection(expected, test_file) - def test_copyright_muller(self): - test_file = self.get_test_loc('copyrights/copyright_muller-c.c') + def test_muller(self): + test_file = self.get_test_loc('copyrights/muller-c.c') expected = [ '(c) 2003 Ronald Bultje ', '(c) 2006 Tim-Philipp Muller', @@ -2892,8 +2890,8 @@ def test_copyright_muller(self): ] check_detection(expected, test_file) - def test_copyright_multiline(self): - test_file = self.get_test_loc('copyrights/copyright_multiline-Historical.txt') + def test_multiline(self): + test_file = self.get_test_loc('copyrights/multiline-Historical.txt') expected = [ 'COPYRIGHT (c) 1990-1994 BY GEORGE J. CARRETTE, CONCORD, MASSACHUSETTS.', ] @@ -2901,114 +2899,114 @@ def test_copyright_multiline(self): expected_in_results=False, results_in_expected=True) - def test_copyright_multiline_george(self): - test_file = self.get_test_loc('copyrights/copyright_multiline_george-Historical.txt') + def test_multiline_george(self): + test_file = self.get_test_loc('copyrights/multiline_george-Historical.txt') expected = [ 'COPYRIGHT (c) 1990-1994 BY GEORGE', ] check_detection(expected, test_file) - def test_copyright_mycorp_c(self): - test_file = self.get_test_loc('copyrights/copyright_mycorp_c-c.c') + def test_mycorp_c(self): + test_file = self.get_test_loc('copyrights/mycorp_c-c.c') expected = [ 'Copyright (c) 2012 MyCorp Inc.', ] check_detection(expected, test_file) - def test_copyright_name_before_copyright_c(self): - test_file = self.get_test_loc('copyrights/copyright_name_before_copyright_c-c.c') + def test_name_before_c(self): + test_file = self.get_test_loc('copyrights/name_before_c-c.c') expected = [ 'Russ Dill 2001-2003', 'Vladimir Oleynik (c) 2003' ] check_detection(expected, test_file) - def test_copyright_name_sign_year(self): - test_file = self.get_test_loc('copyrights/copyright_name_sign_year_correct-c.c') + def test_name_sign_year(self): + test_file = self.get_test_loc('copyrights/name_sign_year_correct-c.c') expected = [ 'Copyright (c) 2008 Daisy Ltd. http://www.daisy.com', 'Daisy (c) 1997 - 2008', ] check_detection(expected, test_file) - def test_copyright_naumen_txt(self): - test_file = self.get_test_loc('copyrights/copyright_naumen_txt.txt') + def test_naumen_txt(self): + test_file = self.get_test_loc('copyrights/naumen_txt.txt') expected = [ 'Copyright (c) NAUMEN (tm) and Contributors.', ] check_detection(expected, test_file) - def test_copyright_ncurses_bin_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_ncurses_bin_copyright-ncurses_bin_copyright.copyright') + def test_ncurses_bin(self): + test_file = self.get_test_loc('copyrights/ncurses_bin-ncurses_bin.copyright') expected = [ 'Copyright (c) 1998 Free Software Foundation, Inc.', ] check_detection(expected, test_file) - def test_copyright_nederlof(self): - test_file = self.get_test_loc('copyrights/copyright_nederlof.txt') + def test_nederlof(self): + test_file = self.get_test_loc('copyrights/nederlof.txt') expected = [ '(c) 2005 - Peter Nederlof', ] check_detection(expected, test_file) - def test_copyright_trailing_copyleft(self): - test_file = self.get_test_loc('copyrights/copyright_trailing_copyleft.txt') + def test_trailing_copyleft(self): + test_file = self.get_test_loc('copyrights/trailing_copyleft.txt') expected = [ 'Copyright (c) 1992 Ronald S. Karr', ] check_detection(expected, test_file) - def test_copyright_no_copyright_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_c-c.c') + def test_no_c(self): + test_file = self.get_test_loc('copyrights/no_c-c.c') expected = [] check_detection(expected, test_file) - def test_copyright_no_copyright_in_class_file_2(self): - test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_2-PersistentElementHolder_class.class') + def test_no_class_file_2(self): + test_file = self.get_test_loc('copyrights/no_class_file_2-PersistentElementHolder_class.class') expected = [] check_detection(expected, test_file) - def test_copyright_no_copyright_in_class_file_3(self): - test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_3-PersistentIndexedElementHolder_class.class') + def test_no_class_file_3(self): + test_file = self.get_test_loc('copyrights/no_class_file_3-PersistentIndexedElementHolder_class.class') expected = [] check_detection(expected, test_file) - def test_copyright_no_copyright_in_class_file_4(self): - test_file = self.get_test_loc('copyrights/copyright_no_copyright_in_class_file_4-PersistentListElementHolder_class.class') + def test_no_class_file_4(self): + test_file = self.get_test_loc('copyrights/no_class_file_4-PersistentListElementHolder_class.class') expected = [] check_detection(expected, test_file) - def test_copyright_no_holder_java(self): - test_file = self.get_test_loc('copyrights/copyright_no_holder_java-java.java') + def test_no_holder_java(self): + test_file = self.get_test_loc('copyrights/no_holder_java-java.java') expected = [ 'Copyright (c) 2005', ] check_detection(expected, test_file) - def test_copyright_nokia_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_nokia_cpp-cpp.cpp') + def test_nokia_cpp(self): + test_file = self.get_test_loc('copyrights/nokia_cpp-cpp.cpp') expected = [ 'Copyright (c) 2002, Nokia Mobile Phones.', ] check_detection(expected, test_file) - def test_copyright_north_c(self): - test_file = self.get_test_loc('copyrights/copyright_north_c-99_c.c') + def test_north_c(self): + test_file = self.get_test_loc('copyrights/north_c-99_c.c') expected = [ 'Copyright (c) 2010 42North Inc.', ] check_detection(expected, test_file) - def test_copyright_notice2(self): - test_file = self.get_test_loc('copyrights/copyright_notice2-9_NOTICE') + def test_notice2(self): + test_file = self.get_test_loc('copyrights/notice2-9_NOTICE') expected = [ 'Copyright 2003-2009 The Apache Geronimo development community', ] check_detection(expected, test_file) - def test_copyright_notice2_txt(self): - test_file = self.get_test_loc('copyrights/copyright_notice2_txt-NOTICE.txt') + def test_notice2_txt(self): + test_file = self.get_test_loc('copyrights/notice2_txt-NOTICE.txt') expected = [ 'Copyright (c) 2004, Richard S. Hall', 'Copyright (c) 2002,2003, Stefan Haustein, Oberhausen', @@ -3017,15 +3015,15 @@ def test_copyright_notice2_txt(self): ] check_detection(expected, test_file) - def test_copyright_notice_name_before_statement(self): - test_file = self.get_test_loc('copyrights/copyright_notice_name_before_statement-NOTICE') + def test_notice_name_before_statement(self): + test_file = self.get_test_loc('copyrights/notice_name_before_statement-NOTICE') expected = [ 'at iClick, Inc., software copyright (c) 1999.', ] check_detection(expected, test_file) - def test_copyright_notice_txt(self): - test_file = self.get_test_loc('copyrights/copyright_notice_txt-NOTICE.txt') + def test_notice_txt(self): + test_file = self.get_test_loc('copyrights/notice_txt-NOTICE.txt') expected = [ 'Copyright 2003-2010 The Knopflerfish Project http://www.knopflerfish.org', 'Copyright (c) OSGi Alliance (2000, 2009).', @@ -3042,15 +3040,15 @@ def test_copyright_notice_txt(self): ] check_detection(expected, test_file) - def test_copyright_o_brien_style_name(self): - test_file = self.get_test_loc('copyrights/copyright_o_brien_style_name.txt') + def test_o_brien_style_name(self): + test_file = self.get_test_loc('copyrights/o_brien_style_name.txt') expected = [ "Copyright (c) 2001-2003, Patrick K. O'Brien", ] check_detection(expected, test_file) - def test_copyright_oberhummer_c_code(self): - test_file = self.get_test_loc('copyrights/copyright_oberhummer_c_code-c.c') + def test_oberhummer_c_code(self): + test_file = self.get_test_loc('copyrights/oberhummer_c_code-c.c') expected = [ 'Copyright (c) 2005 Markus Franz Xaver Johannes Oberhumer', 'Copyright (c) 2004 Markus Franz Xaver Johannes Oberhumer', @@ -3065,8 +3063,8 @@ def test_copyright_oberhummer_c_code(self): ] check_detection(expected, test_file) - def test_copyright_oberhummer_text(self): - test_file = self.get_test_loc('copyrights/copyright_oberhummer_text.txt') + def test_oberhummer_text(self): + test_file = self.get_test_loc('copyrights/oberhummer_text.txt') expected = [ 'Copyright (c) 2005 Markus Franz Xaver Johannes Oberhumer', 'Copyright (c) 2004 Markus Franz Xaver Johannes Oberhumer', @@ -3081,23 +3079,23 @@ def test_copyright_oberhummer_text(self): ] check_detection(expected, test_file) - def test_copyright_objectivec(self): - test_file = self.get_test_loc('copyrights/copyright_objectivec-objectiveC_m.m') + def test_objectivec(self): + test_file = self.get_test_loc('copyrights/objectivec-objectiveC_m.m') expected = [ 'Copyright (c) 2009 ABC', ] check_detection(expected, test_file) - def test_copyright_openhackware_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_openhackware_copyright_label-openhackware_copyright_label.label') + def test_openhackware(self): + test_file = self.get_test_loc('copyrights/openhackware-openhackware.label') expected = [ 'Copyright (c) 2004-2005 Jocelyn Mayer ', 'Copyright (c) 2004-2005 Fabrice Bellard', ] check_detection(expected, test_file) - def test_copyright_openoffice_org_report_builder_bin_copyright(self): - test_file = self.get_test_loc('copyrights/openoffice_org_report_builder_bin_copyright.copyright') + def test_openoffice_org_report_builder_bin(self): + test_file = self.get_test_loc('copyrights/openoffice_org_report_builder_bin.copyright') expected = [ 'Copyright (c) 2002-2009 Software in the Public Interest, Inc.', 'Copyright (c) 2002-2009 ooo-build/Go-OO Team', @@ -3251,23 +3249,16 @@ def test_copyright_openoffice_org_report_builder_bin_copyright(self): ] check_detection(expected, test_file) - def test_copyright_openoffice_org_report_builder_bin_copyright2(self): - test_file = self.get_test_loc('copyrights/copyright_openoffice_org_report_builder_bin_copyright2-openoffice_org_report_builder_bin_copyright.copyright2') + def test_openoffice_org_report_builder_bin_2(self): + test_file = self.get_test_loc('copyrights/openoffice_org_report_builder_bin_2-openoffice_org_report_builder_bin.copyright2') expected = [ 'Copyright (c) 1990, 1993, 1994, 1995 The Regents of the University of California', 'Copyright (c) 1995, 1996 The President and Fellows of Harvard University', ] check_detection(expected, test_file) - def test_copyright_openssl(self): - test_file = self.get_test_loc('copyrights/copyright_openssl-c.c') - expected = [ - 'Copyright (c) 1995-1997 Eric Young (eay@mincom.oz.au)', - ] - check_detection(expected, test_file) - - def test_copyright_partial_detection(self): - test_file = self.get_test_loc('copyrights/copyright_partial_detection.txt') + def test_partial_detection(self): + test_file = self.get_test_loc('copyrights/partial_detection.txt') expected = [ 'Copyright 1991 by the Massachusetts Institute of Technology', 'Copyright (c) 2001 AT&T', @@ -3287,8 +3278,8 @@ def test_copyright_partial_detection(self): ] check_detection(expected, test_file) - def test_copyright_partial_detection_mit(self): - test_file = self.get_test_loc('copyrights/copyright_partial_detection_mit.txt') + def test_partial_detection_mit(self): + test_file = self.get_test_loc('copyrights/partial_detection_mit.txt') expected = [ 'Copyright 1991 by the Massachusetts Institute of Technology', 'Copyright (c) 2001 AT&T', @@ -3308,8 +3299,8 @@ def test_copyright_partial_detection_mit(self): ] check_detection(expected, test_file) - def test_copyright_perl_base_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_perl_base_copyright-perl_base_copyright.copyright') + def test_perl_base(self): + test_file = self.get_test_loc('copyrights/perl_base-perl_base.copyright') expected = [ 'Copyright 1989-2001, Larry Wall', 'Copyright (c) 1995-2005 Jean-loup Gailly and Mark Adler', @@ -3323,39 +3314,39 @@ def test_copyright_perl_base_copyright(self): ] check_detection(expected, test_file) - def test_copyright_perl_module(self): - test_file = self.get_test_loc('copyrights/copyright_perl_module-pm.pm') + def test_perl_module(self): + test_file = self.get_test_loc('copyrights/perl_module-pm.pm') expected = [ 'Copyright (c) 1995-2000 Name Surname', ] check_detection(expected, test_file) - def test_copyright_peter_c(self): - test_file = self.get_test_loc('copyrights/copyright_peter_c-c.c') + def test_peter_c(self): + test_file = self.get_test_loc('copyrights/peter_c-c.c') expected = [ '(c) 2005 - Peter Nederlof', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_piersol(self): - test_file = self.get_test_loc('copyrights/copyright_piersol-TestMatrix_D_java.java') + def test_piersol(self): + test_file = self.get_test_loc('copyrights/piersol-TestMatrix_D_java.java') expected = [ 'Copyright (c) 1998 Company PIERSOL Engineering Inc.', 'Copyright (c) 1998 Company PIERSOL Engineering Inc.', ] check_detection(expected, test_file) - def test_copyright_piersol_ok(self): - test_file = self.get_test_loc('copyrights/copyright_piersol-TestMatrix_D_java.java') + def test_piersol_ok(self): + test_file = self.get_test_loc('copyrights/piersol-TestMatrix_D_java.java') expected = [ 'Copyright (c) 1998

Company PIERSOL Engineering Inc.', 'Copyright (c) 1998

Company PIERSOL Engineering Inc.', ] check_detection(expected, test_file) - def test_copyright_postgresql_8_3_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_postgresql_8_3_copyright_label-postgresql__copyright_label.label') + def test_postgresql_8_3(self): + test_file = self.get_test_loc('copyrights/postgresql_8_3-postgresql.label') expected = [ 'Portions Copyright (c) 1996-2003, The PostgreSQL Global Development Group', 'Portions Copyright (c) 1994, The Regents of the University of California', @@ -3364,36 +3355,36 @@ def test_copyright_postgresql_8_3_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_prof_informatics(self): - test_file = self.get_test_loc('copyrights/copyright_prof_informatics.txt') + def test_prof_informatics(self): + test_file = self.get_test_loc('copyrights/prof_informatics.txt') expected = [ 'Professional Informatics (c) 1994', ] check_detection(expected, test_file) - def test_copyright_professional_txt(self): - test_file = self.get_test_loc('copyrights/copyright_professional_txt-copyright.txt') + def test_professional_txt(self): + test_file = self.get_test_loc('copyrights/professional_txt-copyright.txt') expected = [ 'Professional Informatics (c) 1994', ] check_detection(expected, test_file) - def test_copyright_properties(self): - test_file = self.get_test_loc('copyrights/copyright_properties-properties.properties') + def test_properties(self): + test_file = self.get_test_loc('copyrights/properties-properties.properties') expected = [ '(c) 2004-2007 Restaurant.', ] check_detection(expected, test_file) - def test_copyright_psf_in_python(self): - test_file = self.get_test_loc('copyrights/copyright_psf_in_python-BitVector_py.py') + def test_psf_in_python(self): + test_file = self.get_test_loc('copyrights/psf_in_python-BitVector_py.py') expected = [ 'copyright (c) 2008 Avinash Kak. Python Software Foundation.', ] check_detection(expected, test_file) - def test_copyright_python_dateutil_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_python_dateutil_copyright-python_dateutil_copyright.copyright') + def test_python_dateutil(self): + test_file = self.get_test_loc('copyrights/python_dateutil-python_dateutil.copyright') expected = [ 'Copyright (c) 2001, 2002 Python Software Foundation', 'Copyright (c) 1995-2001 Corporation for National Research Initiatives', @@ -3401,15 +3392,15 @@ def test_copyright_python_dateutil_copyright(self): ] check_detection(expected, test_file) - def test_copyright_python_psyco_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_python_psyco_copyright-python_psyco_copyright.copyright') + def test_python_psyco(self): + test_file = self.get_test_loc('copyrights/python_psyco-python_psyco.copyright') expected = [ 'Copyright (c) 2001-2003 Armin Rigo', ] check_detection(expected, test_file) - def test_copyright_python_reportbug_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_python_reportbug_copyright_label-python_report_copyright_label.label') + def test_python_reportbug(self): + test_file = self.get_test_loc('copyrights/python_reportbug-python_report.label') expected = [ 'Copyright (c) 1999-2006 Chris Lawrence', 'Copyright (c) 2008-2009 Sandro Tosi ', @@ -3424,8 +3415,8 @@ def test_copyright_python_reportbug_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_python_software_properties_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_python_software_properties_copyright-python_software_properties_copyright.copyright') + def test_python_software_properties(self): + test_file = self.get_test_loc('copyrights/python_software_properties-python_software_properties.copyright') expected = [ 'Copyright 2004-2007 Canonical Ltd. 2004-2005 Michiel Sikkes 2006', ] @@ -3433,24 +3424,24 @@ def test_copyright_python_software_properties_copyright(self): expected_in_results=False, results_in_expected=True) - def test_copyright_red_hat_openoffice_org_report_builder_bin_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_red_hat_openoffice_org_report_builder_bin_copyright-openoffice_org_report_builder_bin_copyright.copyright') + def test_red_hat_openoffice_org_report_builder_bin(self): + test_file = self.get_test_loc('copyrights/red_hat_openoffice_org_report_builder_bin-openoffice_org_report_builder_bin.copyright') expected = [ 'Copyright (c) 2007 Red Hat, Inc', 'Copyright (c) 2007 Red Hat, Inc.', ] check_detection(expected, test_file) - def test_copyright_regents_complex(self): - test_file = self.get_test_loc('copyrights/copyright_regents_complex-strtol_c.c') + def test_regents_complex(self): + test_file = self.get_test_loc('copyrights/regents_complex-strtol_c.c') expected = [ 'Copyright (c) 1990 The Regents of the University of California.', ] check_detection(expected, test_file) # #@expectedFailure - def test_copyright_regents_license(self): - test_file = self.get_test_loc('copyrights/copyright_regents_license-LICENSE') + def test_regents_license(self): + test_file = self.get_test_loc('copyrights/regents_license-LICENSE') expected = [ 'copyrighted by The Regents of the University of California.', 'Copyright 1979, 1980, 1983, 1986, 1988, 1989, 1991, 1992, 1993, 1994 The Regents of the University of California.', @@ -3458,48 +3449,48 @@ def test_copyright_regents_license(self): ] check_detection(expected, test_file) - def test_copyright_resig_js(self): - test_file = self.get_test_loc('copyrights/copyright_resig_js-js.js') + def test_resig_js(self): + test_file = self.get_test_loc('copyrights/resig_js-js.js') expected = [ 'Copyright (c) 2009 John Resig', ] check_detection(expected, test_file) - def test_copyright_rusty(self): - test_file = self.get_test_loc('copyrights/copyright_rusty.txt') + def test_rusty(self): + test_file = self.get_test_loc('copyrights/rusty.txt') expected = [ '(c) Rusty Russell, IBM 2002', ] check_detection(expected, test_file) - def test_copyright_rusty_c(self): - test_file = self.get_test_loc('copyrights/copyright_rusty_c-c.c') + def test_rusty_c(self): + test_file = self.get_test_loc('copyrights/rusty_c-c.c') expected = [ '(c) Rusty Russell, IBM 2002', ] check_detection(expected, test_file) - def test_copyright_s_fabsl_c(self): - test_file = self.get_test_loc('copyrights/copyright_s_fabsl_c-s_fabsl_c.c') + def test_s_fabsl_c(self): + test_file = self.get_test_loc('copyrights/s_fabsl_c-s_fabsl_c.c') expected = [ 'Copyright (c) 2003 Dag-Erling Coidan Smrgrav', ] check_detection(expected, test_file) - def test_copyright_sample_java(self): - test_file = self.get_test_loc('copyrights/copyright_sample_java-java.java') + def test_sample_java(self): + test_file = self.get_test_loc('copyrights/sample_java-java.java') expected = [ 'Copyright (c) 2000-2007, Sample ABC Inc.', ] check_detection(expected, test_file) - def test_copyright_sample_no_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_sample_no_copyright-c.c') + def test_sample_no(self): + test_file = self.get_test_loc('copyrights/sample_no-c.c') expected = [] check_detection(expected, test_file) - def test_copyright_seahorse_plugins(self): - test_file = self.get_test_loc('copyrights/copyright_seahorse_plugins-seahorse_plugins_copyright.copyright') + def test_seahorse_plugins(self): + test_file = self.get_test_loc('copyrights/seahorse_plugins-seahorse_plugins.copyright') expected = [ 'Copyright (c) 2004-2007 Stefan Walter', 'Copyright (c) 2004-2006 Adam Schreiber', @@ -3548,35 +3539,35 @@ def test_copyright_seahorse_plugins(self): ] check_detection(expected, test_file) - def test_copyright_simgear1_0_0_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_simgear1_0_0_copyright-simgear__copyright.copyright') + def test_simgear1_0_0(self): + test_file = self.get_test_loc('copyrights/simgear1_0_0-simgear.copyright') expected = [ 'Copyright (c) 1999-2000 Curtis L. Olson ', 'Copyright (c) 2002-2004 Mark J. Harris', ] check_detection(expected, test_file) - def test_copyright_snippet_no_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_snippet_no_copyright') + def test_snippet_no(self): + test_file = self.get_test_loc('copyrights/snippet_no') expected = [] check_detection(expected, test_file) - def test_copyright_snmptrapd_c(self): - test_file = self.get_test_loc('copyrights/copyright_snmptrapd_c-snmptrapd_c.c') + def test_snmptrapd_c(self): + test_file = self.get_test_loc('copyrights/snmptrapd_c-snmptrapd_c.c') expected = [ 'Copyright 1989, 1991, 1992 by Carnegie Mellon University', ] check_detection(expected, test_file) - def test_copyright_some_co(self): - test_file = self.get_test_loc('copyrights/copyright_some_co-9_h.h') + def test_some_co(self): + test_file = self.get_test_loc('copyrights/some_co-9_h.h') expected = [ 'Copyright Some Company, inc.', ] check_detection(expected, test_file) - def test_copyright_somefile_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_somefile_cpp-somefile_cpp.cpp') + def test_somefile_cpp(self): + test_file = self.get_test_loc('copyrights/somefile_cpp-somefile_cpp.cpp') expected = [ '(c) 2005', 'Copyright Private Company (PC) Property of Private Company', @@ -3584,67 +3575,67 @@ def test_copyright_somefile_cpp(self): ] check_detection(expected, test_file) - def test_copyright_source_auditor_projectinfo_java(self): - test_file = self.get_test_loc('copyrights/copyright_source_auditor_projectinfo_java-ProjectInfo_java.java') + def test_source_auditor_projectinfo_java(self): + test_file = self.get_test_loc('copyrights/source_auditor_projectinfo_java-ProjectInfo_java.java') expected = [ 'Copyright (c) 2009 Source Auditor Inc.', ] check_detection(expected, test_file) - def test_copyright_stacktrace_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_stacktrace_cpp-stacktrace_cpp.cpp') + def test_stacktrace_cpp(self): + test_file = self.get_test_loc('copyrights/stacktrace_cpp-stacktrace_cpp.cpp') expected = [ 'Copyright 2003, 2004 Rickard E. Faith (faith@dict.org)', ] check_detection(expected, test_file) - def test_copyright_stmicro_in_h(self): - test_file = self.get_test_loc('copyrights/copyright_stmicro_in_h-h.h') + def test_stmicro_in_h(self): + test_file = self.get_test_loc('copyrights/stmicro_in_h-h.h') expected = [ 'COPYRIGHT (c) ST-Microelectronics 1998.', ] check_detection(expected, test_file) - def test_copyright_stmicro_in_txt(self): - test_file = self.get_test_loc('copyrights/copyright_stmicro_in_txt.txt') + def test_stmicro_in_txt(self): + test_file = self.get_test_loc('copyrights/stmicro_in_txt.txt') expected = [ 'COPYRIGHT (c) STMicroelectronics 2005.', 'COPYRIGHT (c) ST-Microelectronics 1998.', ] check_detection(expected, test_file) - def test_copyright_strchr_assembly(self): - test_file = self.get_test_loc('copyrights/copyright_strchr_assembly-9_9_strchr_S.S') + def test_strchr_assembly(self): + test_file = self.get_test_loc('copyrights/strchr_assembly-9_9_strchr_S.S') expected = [ 'Copyright (c) 2007 ARC International (UK) LTD', ] check_detection(expected, test_file) - def test_copyright_super_tech_c(self): - test_file = self.get_test_loc('copyrights/copyright_super_tech_c-c.c') + def test_super_tech_c(self): + test_file = self.get_test_loc('copyrights/super_tech_c-c.c') expected = [ 'Copyright (c) $LastChangedDate$ Super Technologies Corporation, Cedar Rapids, Iowa, U.S.A.', 'Copyright (c) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.', ] check_detection(expected, test_file) - def test_copyright_tcl_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_tcl_copyright-tcl_copyright.copyright') + def test_tcl(self): + test_file = self.get_test_loc('copyrights/tcl-tcl.copyright') expected = [ 'copyrighted by the Regents of the University of California , Sun Microsystems, Inc. , Scriptics Corporation', # not found, rather complex 'Copyright (c) 2007 Software in the Public Interest', ] check_detection(expected, test_file) - def test_copyright_tech_sys(self): - test_file = self.get_test_loc('copyrights/copyright_tech_sys.txt') + def test_tech_sys(self): + test_file = self.get_test_loc('copyrights/tech_sys.txt') expected = [ '(c) Copyright 1985-1999 SOME TECHNOLOGY SYSTEMS', ] check_detection(expected, test_file) - def test_copyright_texinfo_tex(self): - test_file = self.get_test_loc('copyrights/copyright_texinfo_tex-texinfo_tex.tex') + def test_texinfo_tex(self): + test_file = self.get_test_loc('copyrights/texinfo_tex-texinfo_tex.tex') expected = [ 'Copyright (c) 1985, 1986, 1988, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.', ] @@ -3652,32 +3643,32 @@ def test_copyright_texinfo_tex(self): expected_in_results=False, results_in_expected=True) - def test_copyright_texlive_lang_greek_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_texlive_lang_greek_copyright-texlive_lang_greek_copyright.copyright') + def test_texlive_lang_greek(self): + test_file = self.get_test_loc('copyrights/texlive_lang_greek-texlive_lang_greek.copyright') expected = [ 'Copyright 1999 2002-2006 LaTeX3 Project', 'Copyright 2005 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_texlive_lang_spanish_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_texlive_lang_spanish_copyright-texlive_lang_spanish_copyright.copyright') + def test_texlive_lang_spanish(self): + test_file = self.get_test_loc('copyrights/texlive_lang_spanish-texlive_lang_spanish.copyright') expected = [ 'Copyright 1999 2002-2006 LaTeX3 Project', 'Copyright 2005 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_texlive_lang_vietnamese_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_texlive_lang_vietnamese_copyright_label-texlive_lang_vietnamese_copyright_label.label') + def test_texlive_lang_vietnamese(self): + test_file = self.get_test_loc('copyrights/texlive_lang_vietnamese-texlive_lang_vietnamese.label') expected = [ 'Copyright 1999 2002-2006 LaTeX3 Project', 'Copyright 2005 M. Y. Name', ] check_detection(expected, test_file) - def test_copyright_tfc_c(self): - test_file = self.get_test_loc('copyrights/copyright_tfc_c-c.c') + def test_tfc_c(self): + test_file = self.get_test_loc('copyrights/tfc_c-c.c') expected = [ 'Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 Traditional Food Consortium, Inc.', 'Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Traditional Food Consortium, Inc.', @@ -3686,23 +3677,23 @@ def test_copyright_tfc_c(self): expected_in_results=False, results_in_expected=True) - def test_copyright_thirdpartyproject_prop(self): - test_file = self.get_test_loc('copyrights/copyright_thirdpartyproject_prop-ThirdPartyProject_prop.prop') + def test_thirdpartyproject_prop(self): + test_file = self.get_test_loc('copyrights/thirdpartyproject_prop-ThirdPartyProject_prop.prop') expected = [ 'Copyright 2010 Google Inc.', ] check_detection(expected, test_file) - def test_copyright_trailing_For(self): - test_file = self.get_test_loc('copyrights/copyright_trailing_For-copyright_c.c') + def test_trailing_For(self): + test_file = self.get_test_loc('copyrights/trailing_For-c.c') expected = [ 'Copyright . 2008 Mycom Pany, inc.', 'Copyright (c) 1995-2003 Jean-loup Gailly.', ] check_detection(expected, test_file) - def test_copyright_trailing_name(self): - test_file = self.get_test_loc('copyrights/copyright_trailing_name-copyright.txt') + def test_trailing_name(self): + test_file = self.get_test_loc('copyrights/trailing_name-copyright.txt') expected = [ 'Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd and Clark Cooper', ] @@ -3710,23 +3701,23 @@ def test_copyright_trailing_name(self): expected_in_results=False, results_in_expected=True) - def test_copyright_trailing_redistribution(self): - test_file = self.get_test_loc('copyrights/copyright_trailing_redistribution-bspatch_c.c') + def test_trailing_redistribution(self): + test_file = self.get_test_loc('copyrights/trailing_redistribution-bspatch_c.c') expected = [ 'Copyright (c) 2008 The Android Open Source Project', 'Copyright 2003-2005 Colin Percival', ] check_detection(expected, test_file) - def test_copyright_transcode_doc_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_transcode_doc_copyright-transcode_doc_copyright.copyright') + def test_transcode_doc(self): + test_file = self.get_test_loc('copyrights/transcode_doc-transcode_doc.copyright') expected = [ 'Copyright (c) 2001 Thomas Ostreich', ] check_detection(expected, test_file) - def test_copyright_transfig_copyright_with_parts(self): - test_file = self.get_test_loc('copyrights/copyright_transfig_copyright_with_parts-transfig_copyright.copyright') + def test_transfig_with_parts(self): + test_file = self.get_test_loc('copyrights/transfig_with_parts-transfig.copyright') expected = [ 'Copyright (c) 1985-1988 Supoj Sutantavibul', 'Copyright (c) 1991-1999 Micah Beck', @@ -3748,50 +3739,50 @@ def test_copyright_transfig_copyright_with_parts(self): ] check_detection(expected, test_file) - def test_copyright_treetablemodeladapter_java(self): - test_file = self.get_test_loc('copyrights/copyright_treetablemodeladapter_java-TreeTableModelAdapter_java.java') + def test_treetablemodeladapter_java(self): + test_file = self.get_test_loc('copyrights/treetablemodeladapter_java-TreeTableModelAdapter_java.java') expected = [ 'Copyright 1997, 1998 by Sun Microsystems, Inc.', ] check_detection(expected, test_file) - def test_copyright_truncated_dmv_c(self): - test_file = self.get_test_loc('copyrights/copyright_truncated_dmv_c-9_c.c') + def test_truncated_dmv_c(self): + test_file = self.get_test_loc('copyrights/truncated_dmv_c-9_c.c') expected = [ 'Copyright (c) 1995 DMV - DigiMedia Vision', ] check_detection(expected, test_file) - def test_copyright_truncated_doe(self): - test_file = self.get_test_loc('copyrights/copyright_truncated_doe-c.c') + def test_truncated_doe(self): + test_file = self.get_test_loc('copyrights/truncated_doe-c.c') expected = [ 'Copyright (c) 2008 by John Doe', ] check_detection(expected, test_file) - def test_copyright_truncated_inria(self): - test_file = self.get_test_loc('copyrights/copyright_truncated_inria.txt') + def test_truncated_inria(self): + test_file = self.get_test_loc('copyrights/truncated_inria.txt') expected = [ '(c) 1998-2000 (W3C) MIT, INRIA, Keio University', ] check_detection(expected, test_file) - def test_copyright_truncated_rusty(self): - test_file = self.get_test_loc('copyrights/copyright_truncated_rusty-c.c') + def test_truncated_rusty(self): + test_file = self.get_test_loc('copyrights/truncated_rusty-c.c') expected = [ '(c) 1999-2001 Paul Rusty Russell', ] check_detection(expected, test_file) - def test_copyright_truncated_swfobject_js(self): - test_file = self.get_test_loc('copyrights/copyright_truncated_swfobject_js-swfobject_js.js') + def test_truncated_swfobject_js(self): + test_file = self.get_test_loc('copyrights/truncated_swfobject_js-swfobject_js.js') expected = [ 'Copyright (c) 2007-2008 Geoff Stearns, Michael Williams, and Bobby van der Sluis', ] check_detection(expected, test_file) - def test_copyright_ttf_malayalam_fonts_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_ttf_malayalam_fonts_copyright-ttf_malayalam_fonts_copyright.copyright') + def test_ttf_malayalam_fonts(self): + test_file = self.get_test_loc('copyrights/ttf_malayalam_fonts-ttf_malayalam_fonts.copyright') expected = [ 'Copyright (c) Jeroen Hellingman , N.V Shaji ', 'Copyright (c) 2004 Kevin & Siji', @@ -3806,38 +3797,38 @@ def test_copyright_ttf_malayalam_fonts_copyright(self): ] check_detection(expected, test_file) - def test_copyright_tunnel_h(self): - test_file = self.get_test_loc('copyrights/copyright_tunnel_h-tunnel_h.h') + def test_tunnel_h(self): + test_file = self.get_test_loc('copyrights/tunnel_h-tunnel_h.h') expected = [ 'Copyright (c) 2000 Frank Strauss ', ] check_detection(expected, test_file) - def test_copyright_two_digits_years(self): - test_file = self.get_test_loc('copyrights/copyright_two_digits_years-digits_c.c') + def test_two_digits_years(self): + test_file = self.get_test_loc('copyrights/two_digits_years-digits_c.c') expected = [ 'Copyright (c) 1987,88,89,90,91,92,93,94,96,97 Free Software Foundation, Inc.', ] check_detection(expected, test_file) @expectedFailure - def test_copyright_url_in_html(self): - test_file = self.get_test_loc('copyrights/copyright_url_in_html-detail_9_html.html') + def test_url_in_html(self): + test_file = self.get_test_loc('copyrights/url_in_html-detail_9_html.html') expected = [ '(c) 2004-2009 pudn.com', ] check_detection(expected, test_file) - def test_copyright_utilities_js(self): - test_file = self.get_test_loc('copyrights/copyright_utilities_js-utilities_js.js') + def test_utilities_js(self): + test_file = self.get_test_loc('copyrights/utilities_js-utilities_js.js') expected = [ 'Copyright (c) 2009, Yahoo! Inc.', 'Copyright 2001 Robert Penner', ] check_detection(expected, test_file) - def test_copyright_var_route_c(self): - test_file = self.get_test_loc('copyrights/copyright_var_route_c-var_route_c.c') + def test_var_route_c(self): + test_file = self.get_test_loc('copyrights/var_route_c-var_route_c.c') expected = [ 'Copyright 1988, 1989 by Carnegie Mellon University', 'Copyright 1989 TGV, Incorporated', @@ -3846,63 +3837,63 @@ def test_copyright_var_route_c(self): ] check_detection(expected, test_file) - def test_copyright_view_layout2_xml(self): - test_file = self.get_test_loc('copyrights/copyright_view_layout2_xml-view_layout_xml.xml') + def test_view_layout2_xml(self): + test_file = self.get_test_loc('copyrights/view_layout2_xml-view_layout_xml.xml') expected = [ 'Copyright (c) 2008 Esmertec AG.', ] check_detection(expected, test_file) - def test_copyright_warning_parsing_empty_text(self): - test_file = self.get_test_loc('copyrights/copyright_warning_parsing_empty_text-controlpanel_anjuta.anjuta') + def test_warning_parsing_empty_text(self): + test_file = self.get_test_loc('copyrights/warning_parsing_empty_text-controlpanel_anjuta.anjuta') expected = [] check_detection(expected, test_file) - def test_copyright_web_app_dtd__b_sun(self): - test_file = self.get_test_loc('copyrights/copyright_web_app_dtd_b_sun-web_app__dtd.dtd') + def test_web_app_dtd_b_sun(self): + test_file = self.get_test_loc('copyrights/web_app_dtd_b_sun-web_app_dtd.dtd') expected = [ 'Copyright 2000-2007 Sun Microsystems, Inc.', ] check_detection(expected, test_file) - def test_copyright_web_app_dtd_sun_twice(self): - test_file = self.get_test_loc('copyrights/copyright_web_app_dtd_sun_twice-web_app__b_dtd.dtd') + def test_web_app_dtd_sun_twice(self): + test_file = self.get_test_loc('copyrights/web_app_dtd_sun_twice-web_app_b_dtd.dtd') expected = [ 'Copyright (c) 2000 Sun Microsystems, Inc.', 'Copyright (c) 2000 Sun Microsystems, Inc.', ] check_detection(expected, test_file) - def test_copyright_wide_c(self): - test_file = self.get_test_loc('copyrights/copyright_wide_c-c.c') + def test_wide_c(self): + test_file = self.get_test_loc('copyrights/wide_c-c.c') expected = [ 'Copyright (c) 1995, 1996, 1997, and 1998 WIDE Project.', ] check_detection(expected, test_file) - def test_copyright_wide_txt(self): - test_file = self.get_test_loc('copyrights/copyright_wide_txt.txt') + def test_wide_txt(self): + test_file = self.get_test_loc('copyrights/wide_txt.txt') expected = [ 'Copyright (c) 1995, 1996, 1997, and 1998 WIDE Project.', ] check_detection(expected, test_file) - def test_copyright_with_verbatim_lf(self): - test_file = self.get_test_loc('copyrights/copyright_with_verbatim_lf-verbatim_lf_c.c') + def test_with_verbatim_lf(self): + test_file = self.get_test_loc('copyrights/with_verbatim_lf-verbatim_lf_c.c') expected = [ 'Copyright 2003-2005 Colin Percival', ] check_detection(expected, test_file) - def test_copyright_xconsortium_sh(self): - test_file = self.get_test_loc('copyrights/copyright_xconsortium_sh-9_sh.sh') + def test_xconsortium_sh(self): + test_file = self.get_test_loc('copyrights/xconsortium_sh-9_sh.sh') expected = [ 'Copyright (c) 1994 X Consortium', ] check_detection(expected, test_file) - def test_copyright_xfonts_utils_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_xfonts_utils_copyright-xfonts_utils_copyright.copyright') + def test_xfonts_utils(self): + test_file = self.get_test_loc('copyrights/xfonts_utils-xfonts_utils.copyright') expected = [ 'Copyright 1991, 1993, 1998 The Open Group', 'Copyright 2005 Red Hat, Inc.', @@ -3923,8 +3914,8 @@ def test_copyright_xfonts_utils_copyright(self): ] check_detection(expected, test_file) - def test_copyright_xresprobe_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_xresprobe_copyright_label-xresprobe_copyright_label.label') + def test_xresprobe(self): + test_file = self.get_test_loc('copyrights/xresprobe-xresprobe.label') expected = [ 'copyright (c) 2004 Canonical Software', 'Copyright (c) 2002 Terra Soft Solutions, Inc.', @@ -3935,51 +3926,51 @@ def test_copyright_xresprobe_copyright_label(self): ] check_detection(expected, test_file) - def test_copyright_xsane_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_xsane_copyright_label-xsane_copyright_label.label') + def test_xsane(self): + test_file = self.get_test_loc('copyrights/xsane-xsane.label') expected = [ 'Copyright (c) 1998-2005 Oliver Rauch', ] check_detection(expected, test_file) - def test_copyright_does_not_return_junk_in_pdf(self): + def test_does_not_return_junk_in_pdf(self): # from https://github.com/ttgurney/yocto-spdx/blob/master/doc/Yocto-SPDX_Manual_Install_Walkthrough.pdf - test_file = self.get_test_loc('copyrights/copyright_Yocto-SPDX.pdf') + test_file = self.get_test_loc('copyrights/Yocto-SPDX.pdf') expected = [ ] check_detection(expected, test_file) - def test_copyright_name_and_co(self): - test_file = self.get_test_loc('copyrights/copyright_nnp_and_co.txt') + def test_name_and_co(self): + test_file = self.get_test_loc('copyrights/nnp_and_co.txt') expected = [ 'Copyright (c) 2001, Sandra and Klaus Rennecke.', ] check_detection(expected, test_file) - def test_copyright_with_ascii_art(self): - test_file = self.get_test_loc('copyrights/copyright_with_ascii_art.txt') + def test_with_ascii_art(self): + test_file = self.get_test_loc('copyrights/with_ascii_art.txt') expected = [ 'Copyright (c) 1996. The Regents of the University of California.', ] check_detection(expected, test_file) - def test_copyright_should_not_be_detected_in_pixel_data_stream(self): - test_file = self.get_test_loc('copyrights/copyright_pixelstream.rgb') + def test_should_not_be_detected_in_pixel_data_stream(self): + test_file = self.get_test_loc('copyrights/pixelstream.rgb') expected = [] check_detection(expected, test_file) - def test_copyright_should_not_contain_leading_or_trailing_colon(self): - test_file = self.get_test_loc('copyrights/copyright_with_colon') + def test_should_not_contain_leading_or_trailing_colon(self): + test_file = self.get_test_loc('copyrights/with_colon') expected = ['copyright (c) 2013 by Armin Ronacher.'] check_detection(expected, test_file) - def test_copyright_in_markup_should_not_be_truncated(self): - test_file = self.get_test_loc('copyrights/copyright_in_html.html') + def test_markup_should_not_be_truncated(self): + test_file = self.get_test_loc('copyrights/html.html') expected = ["(c) Copyright 2010 by the WTForms Team"] check_detection(expected, test_file) - def test_copyright_should_not_have_trailing_garbage(self): - test_file = self.get_test_loc('copyrights/copyright_with_trailing_words.js') + def test_should_not_have_trailing_garbage(self): + test_file = self.get_test_loc('copyrights/with_trailing_words.js') expected = [ 'Copyright 2012-2015 The Dojo Foundation', 'Copyright 2009-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters', @@ -3992,93 +3983,93 @@ def test_copyright_should_not_have_trailing_garbage(self): ] check_detection(expected, test_file) - def test_copyright_should_not_have_trailing_available(self): - test_file = self.get_test_loc('copyrights/copyright_hostapd_trailing_available.c') + def test_should_not_have_trailing_available(self): + test_file = self.get_test_loc('copyrights/hostapd_trailing_available.c') expected = ['Copyright (c) 2004-2005, Jouni Malinen '] check_detection(expected, test_file) - def test_copyright_with_dots_and_all_lowercase_on_multilines(self): + def test_with_dots_and_all_lowercase_on_multilines(self): test_lines = ['Copyright . 2008 company name, inc.', ' Change: Add functions', ] expected = ['Copyright . 2008 company name, inc.'] check_detection(expected, test_lines) - def test_copyright_with_dots_and_all_lowercase_on_single_line(self): + def test_with_dots_and_all_lowercase_on_single_line(self): test_lines = ['Copyright . 2008 foo name, inc.'] expected = ['Copyright . 2008 foo name, inc.'] check_detection(expected, test_lines) - def test_copyright_copy_copy_by_name3(self): + def test_copy_copy_by_name3(self): test_lines = ['Copyright (c) by 2007 Joachim Foerster '] expected = ['Copyright (c) by 2007 Joachim Foerster '] check_detection(expected, test_lines) - def test_copyright_rimini(self): + def test_rimini(self): test_file = self.get_test_loc('copyrights/rimini.c') expected = ['(c) Copyright 2000 Paolo Scaffardi, AIRVENT SAM s.p.a - RIMINI(ITALY), arsenio@tin.it'] check_detection(expected, test_file) - def test_copyright_should_not_be_detected_in_apache_html(self): - test_file = self.get_test_loc('copyrights/copyright_apache_in_html.html') + def test_should_not_be_detected_in_apache_html(self): + test_file = self.get_test_loc('copyrights/apache_in_html.html') expected = [] check_detection(expected, test_file) - def test_copyright_bv_legal_entity(self): + def test_bv_legal_entity(self): test_file = self.get_test_loc('copyrights/bv.txt') expected = ['Copyright (c) 2016 HERE Europe B.V.', '(c) HERE 2016'] check_detection(expected, test_file) - def test_copyright_with_dash_and_dotted_name(self): + def test_with_dash_and_dotted_name(self): test_lines = ['Copyright 1999, 2000 - D.T.Shield.'] expected = ['Copyright 1999, 2000 - D.T.Shield.'] check_detection(expected, test_lines) - def test_copyright_with_sign_dash_and_dotted_name(self): + def test_with_sign_dash_and_dotted_name(self): test_lines = ['Copyright (c) 1999, 2000 - D.T.Shield.'] expected = ['Copyright (c) 1999, 2000 - D.T.Shield.'] check_detection(expected, test_lines) - def test_copyright_with_sign_year_comp_and_auth(self): + def test_with_sign_year_comp_and_auth(self): test_lines = ['Copyright (c) 2012-2016, Project contributors'] expected = ['Copyright (c) 2012-2016, Project contributors'] check_detection(expected, test_lines) - def test_copyright_with_year_comp_and_auth(self): + def test_with_year_comp_and_auth(self): test_lines = ['Copyright 2012-2016, Project contributors'] expected = ['Copyright 2012-2016, Project contributors'] check_detection(expected, test_lines) - def test_copyright_with_year_noun_junk_auth_noun_and_auth(self): + def test_with_year_noun_junk_auth_noun_and_auth(self): test_lines = ['Copyright 2007-2010 the original author or authors.'] expected = ['Copyright 2007-2010 the original author or authors.'] check_detection(expected, test_lines) - def test_copyright_with_sign_year_noun_junk_auth_noun_and_auth(self): + def test_with_sign_year_noun_junk_auth_noun_and_auth(self): test_lines = ['Copyright (c) 2007-2010 the original author or authors.'] expected = ['Copyright (c) 2007-2010 the original author or authors.'] check_detection(expected, test_lines) - def test_copyright_byten_c_exactly(self): + def test_byten_c_exactly(self): test_lines = ['... don’t fit into your fixed-size buffer.\nByten ( c )\nExactly n bytes. If the'] expected = [] check_detection(expected, test_lines) - def test_copyright_should_not_be_detected_in_junk_strings_with_year_prefix(self): + def test_should_not_be_detected_in_junk_strings_with_year_prefix(self): test_file = self.get_test_loc('copyrights/access_strings.txt') expected = [] check_detection(expected, test_file) - def test_copyright_chromium_authors(self): + def test_chromium_authors(self): test_lines = ['© 2017 The Chromium Authors'] expected = ['(c) 2017 The Chromium Authors'] check_detection(expected, test_lines) - def test_copyright_rim(self): + def test_rim(self): test_lines = ['Copyright (C) Research In Motion Limited 2010. All rights reserved.'] expected = ['Copyright (c) Research In Motion Limited 2010.'] check_detection(expected, test_lines) - def test_copyright_sinica(self): + def test_sinica(self): test_lines = ''' # Copyright (c) 1999 Computer Systems and Communication Lab, # Institute of Information Science, Academia Sinica. @@ -4089,70 +4080,70 @@ def test_copyright_sinica(self): check_detection(expected, test_lines) - def test_copyright_copr1(self): + def test_copr1(self): test_lines = ['Copyright or Copr. CNRS'] expected = ['Copyright or Copr. CNRS'] check_detection(expected, test_lines) - def test_copyright_copr2(self): + def test_copr2(self): test_lines = ['Copyright or Copr. 2006 INRIA - CIRAD - INRA'] expected = ['Copr. 2006 INRIA - CIRAD - INRA'] check_detection(expected, test_lines) @expectedFailure - def test_copyright_copr2_correct(self): + def test_copr2_correct(self): test_lines = ['Copyright or Copr. 2006 INRIA - CIRAD - INRA'] expected = ['Copyright or Copr. 2006 INRIA - CIRAD - INRA'] check_detection(expected, test_lines) - def test_copyright_copr3(self): + def test_copr3(self): test_lines = ['Copyright or © or Copr. SSD Research Team 2011'] expected = ['Copr. SSD Research Team 2011'] check_detection(expected, test_lines) @expectedFailure - def test_copyright_copr3_correct(self): + def test_copr3_correct(self): test_lines = ['Copyright or © or Copr. SSD Research Team 2011'] expected = ['Copyright or (c) or Copr. SSD Research Team 2011'] check_detection(expected, test_lines) - def test_copyright_copr4(self): + def test_copr4(self): test_lines = ["(C) Copr. 1986-92 Numerical Recipes Software i9k''3"] expected = ['(c) Copr. 1986-92 Numerical Recipes Software'] check_detection(expected, test_lines) - def test_copyright_copr5(self): + def test_copr5(self): test_lines = ['Copyright or Copr. Mines Paristech, France - Mark NOBLE, Alexandrine GESRET'] expected = ['Copr. Mines Paristech, France - Mark NOBLE'] check_detection(expected, test_lines) @expectedFailure - def test_copyright_copr5_correct(self): + def test_copr5_correct(self): test_lines = ['Copyright or Copr. Mines Paristech, France - Mark NOBLE, Alexandrine GESRET'] expected = ['Copyright or Copr. Mines Paristech, France - Mark NOBLE, Alexandrine GESRET'] check_detection(expected, test_lines) - def test_copyright_oracle(self): + def test_oracle(self): test_lines = ['Copyright (c) 1997-2015 Oracle and/or its affiliates. All rights reserved.'] expected = ['Copyright (c) 1997-2015 Oracle and/or its affiliates.'] check_detection(expected, test_lines) - def test_copyright_windows(self): + def test_windows(self): test_lines = ['This release supports NT-based Windows releases like Windows 2000 SP4, Windows XP, and Windows 2003.'] expected = [] check_detection(expected, test_lines) - def test_copyright_in_binary_sql_server(self): + def test_binary_sql_server(self): test_lines = ['2005charchar? 7 DDLSQL Server 2005smalldatetimedatetimeLDDDDDD7'] expected = [] check_detection(expected, test_lines) - def test_copyright_with_example_com_url(self): + def test_with_example_com_url(self): test_lines = ['"domain": function(c) { assert.equal(c.domain, "example.com") },'] expected = [] check_detection(expected, test_lines) - def test_copyright_various(self): + def test_various(self): test_lines = ''' libwmf (): library for wmf conversion Copyright (C) 2000 - various; see CREDITS, ChangeLog, and sources @@ -4161,7 +4152,7 @@ def test_copyright_various(self): expected = ['Copyright (c) 2000 - various'] # ; see CREDITS, ChangeLog, and sources check_detection(expected, test_lines) - def test_copyright_natural_docs(self): + def test_natural_docs(self): test_lines = ''' // Search script generated by doxygen // Copyright (C) 2009 by Dimitri van Heesch. @@ -4176,7 +4167,7 @@ def test_copyright_natural_docs(self): ] check_detection(expected, test_lines) - def test_copyright_and_authors_mixed(self): + def test_and_authors_mixed(self): test_lines = ''' * Copyright (c) 1998 Softweyr LLC. All rights reserved. * @@ -4192,7 +4183,7 @@ def test_copyright_and_authors_mixed(self): ] check_detection(expected, test_lines) - def test_copyright_word_in_html(self): + def test_word_in_html(self): test_lines = ''' Copyright © 2010 Nokia Corporation and/or its subsidiary(-ies) '''.splitlines(False) @@ -4201,7 +4192,7 @@ def test_copyright_word_in_html(self): ] check_detection(expected, test_lines) - def test_copyright_with_date_in_angle_brackets(self): + def test_with_date_in_angle_brackets(self): test_lines = ''' * Copyright (C) <2013>, GENIVI Alliance, Inc. * Author: bj@open-rnd.pl @@ -4215,7 +4206,7 @@ def test_copyright_with_date_in_angle_brackets(self): ] check_detection(expected, test_lines, what='authors') - def test_copyright_with_zoo(self): + def test_with_zoo(self): test_lines = ''' * Download Upload Messaging Manager * @@ -4227,7 +4218,7 @@ def test_copyright_with_zoo(self): ] check_detection(expected, test_lines, what='copyrights') - def test_copyright_in_man_page(self): + def test_man_page(self): test_lines = '''COPYRIGHT Copyright \(co 2001-2017 Free Software Foundation, Inc., and others. print "Copyright \\(co ". $args{'copyright'} . ".\n"; @@ -4242,7 +4233,7 @@ def test_copyright_in_man_page(self): ] check_detection(expected, test_lines, what='holders') - def test_copyright_is_not_mixed_with_authors(self): + def test_is_not_mixed_with_authors(self): test_lines = ''' * Copyright (C) 2000-2012 Free Software Foundation, Inc. * Author: Nikos Mavrogiannopoulos @@ -4257,7 +4248,7 @@ def test_copyright_is_not_mixed_with_authors(self): ] check_detection(expected, test_lines, what='authors') - def test_ibm_copyright_and_authors_are_detected(self): + def test_ibm_and_authors_are_detected(self): test_lines = ''' * Copyright IBM, Corp. 2007 * @@ -4279,7 +4270,7 @@ def test_ibm_copyright_and_authors_are_detected(self): ] check_detection(expected, test_lines, what='holders') - def test_copyright_germany(self): + def test_germany(self): test_lines = ''' * Copyright (C) 2011 * Bardenheuer GmbH, Munich and Bundesdruckerei GmbH, Berlin @@ -4290,7 +4281,7 @@ def test_copyright_germany(self): check_detection(expected, test_lines, what='holders') @expectedFailure - def test_copyright_germany_should_detect_trailing_city(self): + def test_germany_should_detect_trailing_city(self): test_lines = ''' * Copyright (C) 2011 * Bardenheuer GmbH, Munich and Bundesdruckerei GmbH, Berlin @@ -4300,7 +4291,7 @@ def test_copyright_germany_should_detect_trailing_city(self): ] check_detection(expected, test_lines, what='holders') - def test_copyright_does_not_detect_junk_in_texinfo(self): + def test_does_not_detect_junk_in_texinfo(self): test_lines = ''' \DeclareUnicodeCharacter{00A8}{\"{ }} \DeclareUnicodeCharacter{00A9}{\copyright} @@ -4343,7 +4334,7 @@ def test_author_does_not_report_trailing_junk_and_incorrect_authors(self): ] check_detection(expected, test_lines, what='authors') - def test_copyright_in_assembly_data(self): + def test_assembly_data(self): test_lines = ''' [assembly: AssemblyProduct("")] [assembly: AssemblyCopyright("(c) 2004 by Henrik Ravn")] @@ -4364,7 +4355,7 @@ def test_author_does_not_report_incorrect_junk(self): ] check_detection(expected, test_lines, what='authors') - def test_copyright_does_not_truncate_last_name(self): + def test_does_not_truncate_last_name(self): test_lines = ''' /* Copyright 2014, Kenneth MacKay. Licensed under the BSD 2-clause license. */ '''.splitlines(False) @@ -4374,7 +4365,7 @@ def test_copyright_does_not_truncate_last_name(self): check_detection(expected, test_lines, what='copyrights') @expectedFailure - def test_copyright_with_leading_date_andtrailing_plus(self): + def test_with_leading_date_andtrailing_plus(self): test_lines = ''' * 2004+ Copyright (c) Evgeniy Polyakov * All rights reserved. diff --git a/tests/cluecode/test_copyrights_ics.py b/tests/cluecode/test_copyrights_ics.py index c1de9209456..1268aee8a58 100644 --- a/tests/cluecode/test_copyrights_ics.py +++ b/tests/cluecode/test_copyrights_ics.py @@ -23,7 +23,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import os.path from unittest.case import expectedFailure @@ -36,6 +37,7 @@ rather diversified sample of a typical Linux-based user space environment. """ + class TestCopyright(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') @@ -8672,7 +8674,6 @@ def test_ics_iptables_extensions_libxt_tcpoptstrip_c(self): ] check_detection(expected, test_file) - def test_ics_iptables_extensions_libxt_tee_c(self): test_file = self.get_test_loc('ics/iptables-extensions/libxt_TEE.c') expected = [ @@ -10752,7 +10753,6 @@ def test_ics_libffi_ltconfig(self): ] check_detection(expected, test_file, what='authors') - def test_ics_libffi_ltmain_sh(self): test_file = self.get_test_loc('ics/libffi/ltmain.sh') expected = [ diff --git a/tests/cluecode/test_copyrights_lines.py b/tests/cluecode/test_copyrights_lines.py index 285798f88b6..3adc8ebe3a9 100644 --- a/tests/cluecode/test_copyrights_lines.py +++ b/tests/cluecode/test_copyrights_lines.py @@ -34,7 +34,7 @@ class TestCopyrightDetector(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_copyright_detect2_basic(self): - location = self.get_test_loc('copyrights/copyright_essential_smoke-ibm_c.c') + location = self.get_test_loc('copyright_lines/essential_smoke-ibm_c.c') expected = [ ([u'Copyright IBM and others (c) 2008'], [], [u'2008'], [u'IBM and others'], 6, 6), ([u'Copyright Eclipse, IBM and others (c) 2008'], [], [u'2008'], [u'Eclipse, IBM and others'], 8, 8) @@ -76,14 +76,14 @@ class TestCopyrightLinesDetection(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_company_name_in_java(self): - test_file = self.get_test_loc('copyrights/company_name_in_java-9_java.java') + test_file = self.get_test_loc('copyright_lines/company_name_in_java-9_java.java') expected = [ ([u'Copyright (c) 2008-2011 Company Name Incorporated'], 2, 3) ] check_detection(expected, test_file) def test_copyright_03e16f6c_0(self): - test_file = self.get_test_loc('copyrights/copyright_03e16f6c_0-e_f_c.0') + test_file = self.get_test_loc('copyright_lines/03e16f6c_0-e_f_c.0') expected = [ ([u'Copyright (c) 1997 Microsoft Corp., OU Microsoft Corporation, CN Microsoft Root', u'Copyright (c) 1997 Microsoft Corp., OU Microsoft Corporation, CN Microsoft Root'], @@ -95,7 +95,7 @@ def test_copyright_03e16f6c_0(self): def test_copyright_3a3b02ce_0(self): # this is a certificate and the actual copyright holder is not clear: # could be either Wisekey or OISTE Foundation. - test_file = self.get_test_loc('copyrights/copyright_3a3b02ce_0-a_b_ce.0') + test_file = self.get_test_loc('copyright_lines/3a3b02ce_0-a_b_ce.0') expected = [([ u'Copyright (c) 2005, OU OISTE Foundation Endorsed, CN OISTE WISeKey Global Root', u'Copyright (c) 2005, OU OISTE Foundation Endorsed, CN OISTE WISeKey Global Root' @@ -105,47 +105,47 @@ def test_copyright_3a3b02ce_0(self): check_detection(expected, test_file) def test_copyright_boost_vector(self): - test_file = self.get_test_loc('copyrights/vector50.hpp') + test_file = self.get_test_loc('copyright_lines/vector50.hpp') expected = [([u'Copyright (c) 2005 Arkadiy Vertleyb', u'Copyright (c) 2005 Peder Holt'], 2, 3)] check_detection(expected, test_file) def test_copyright_ABC_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_ABC_cpp-Case_cpp.cpp') + test_file = self.get_test_loc('copyright_lines/ABC_cpp-Case_cpp.cpp') expected = [([u'Copyright (c) ABC Company'], 12, 12)] check_detection(expected, test_file) def test_copyright_ABC_file_cpp(self): - test_file = self.get_test_loc('copyrights/copyright_ABC_file_cpp-File_cpp.cpp') + test_file = self.get_test_loc('copyright_lines/ABC_file_cpp-File_cpp.cpp') expected = [([u'Copyright (c) ABC Company'], 12, 12)] check_detection(expected, test_file) def test_copyright_heunrich_c(self): - test_file = self.get_test_loc('copyrights/copyright_heunrich_c-c.c') + test_file = self.get_test_loc('copyright_lines/heunrich_c-c.c') expected = [([u'Copyright (c) 2000 HEUNRICH HERTZ INSTITUTE'], 5, 5)] check_detection(expected, test_file) def test_copyright_isc(self): - test_file = self.get_test_loc('copyrights/copyright_isc-c.c') + test_file = self.get_test_loc('copyright_lines/isc-c.c') expected = [([u'Copyright (c) 1998-2000 The Internet Software Consortium.'], 1, 3)] check_detection(expected, test_file) def test_copyright_sample_py(self): - test_file = self.get_test_loc('copyrights/copyright_sample_py-py.py') + test_file = self.get_test_loc('copyright_lines/sample_py-py.py') expected = [([u'COPYRIGHT 2006 ABC ABC'], 6, 7)] check_detection(expected, test_file) def test_copyright_abc(self): - test_file = self.get_test_loc('copyrights/copyright_abc') + test_file = self.get_test_loc('copyright_lines/abc') expected = [([u'Copyright (c) 2006 abc.org'], 1, 2)] check_detection(expected, test_file) def test_copyright_abc_loss_of_holder_c(self): - test_file = self.get_test_loc('copyrights/copyright_abc_loss_of_holder_c-c.c') + test_file = self.get_test_loc('copyright_lines/abc_loss_of_holder_c-c.c') expected = [([u'copyright abc 2001'], 1, 2)] check_detection(expected, test_file) def test_copyright_abiword_common_copyright(self): - test_file = self.get_test_loc('copyrights/copyright_abiword_common_copyright-abiword_common_copyright.copyright') + test_file = self.get_test_loc('copyright_lines/abiword_common.copyright') expected = [ ([u'Copyright (c) 1998- AbiSource, Inc. & Co.'], 17, 17), ([u'Copyright (c) 2009 Masayuki Hatta', @@ -155,17 +155,17 @@ def test_copyright_abiword_common_copyright(self): check_detection(expected, test_file) def test_copyright_acme_c(self): - test_file = self.get_test_loc('copyrights/copyright_acme_c-c.c') + test_file = self.get_test_loc('copyright_lines/acme_c-c.c') expected = [([u'Copyright (c) 2000 ACME, Inc.'], 1, 1)] check_detection(expected, test_file) def test_copyright_activefieldattribute_cs(self): - test_file = self.get_test_loc('copyrights/copyright_activefieldattribute_cs-ActiveFieldAttribute_cs.cs') + test_file = self.get_test_loc('copyright_lines/activefieldattribute_cs-ActiveFieldAttribute_cs.cs') expected = [([u'Web Applications Copyright 2009 - Thomas Hansen thomas@ra-ajax.org.'], 2, 5)] check_detection(expected, test_file) def test_copyright_addr_c(self): - test_file = self.get_test_loc('copyrights/copyright_addr_c-addr_c.c') + test_file = self.get_test_loc('copyright_lines/addr_c-addr_c.c') expected = [ ([u'Copyright 1999 Cornell University.'], 2, 4), ([u'Copyright 2000 Jon Doe.'], 5, 5) @@ -173,17 +173,17 @@ def test_copyright_addr_c(self): check_detection(expected, test_file) def test_copyright_adler_inflate_c(self): - test_file = self.get_test_loc('copyrights/copyright_adler_inflate_c-inflate_c.c') + test_file = self.get_test_loc('copyright_lines/adler_inflate_c-inflate_c.c') expected = [([u'Not copyrighted 1992 by Mark Adler'], 1, 2)] check_detection(expected, test_file) def test_copyright_aleal(self): - test_file = self.get_test_loc('copyrights/copyright_aleal-c.c') + test_file = self.get_test_loc('copyright_lines/aleal-c.c') expected = [([u'copyright (c) 2006 by aleal'], 2, 2)] check_detection(expected, test_file) def test_copyright_andre_darcy(self): - test_file = self.get_test_loc('copyrights/copyright_andre_darcy-c.c') + test_file = self.get_test_loc('copyright_lines/andre_darcy-c.c') expected = [ ([u'Copyright (c) 1995, Pascal Andre (andre@via.ecp.fr).'], 2, 6), ([u"copyright 1997, 1998, 1999 by D'Arcy J.M. Cain (darcy@druid.net)"], 25, 26) @@ -191,7 +191,7 @@ def test_copyright_andre_darcy(self): check_detection(expected, test_file) def test_copyright_android_c(self): - test_file = self.get_test_loc('copyrights/copyright_android_c-c.c') + test_file = self.get_test_loc('copyright_lines/android_c-c.c') expected = [ ([u'Copyright (c) 2009 The Android Open Source Project'], 2, 2), ([u'Copyright 2003-2005 Colin Percival'], 23, 24) @@ -199,7 +199,7 @@ def test_copyright_android_c(self): check_detection(expected, test_file) def test_copyright_apache_notice(self): - test_file = self.get_test_loc('copyrights/copyright_apache_notice-NOTICE') + test_file = self.get_test_loc('copyright_lines/apache_notice-NOTICE') expected = [ ([u'Copyright 1999-2006 The Apache Software Foundation'], 6, 7), ([u'Copyright 1999-2006 The Apache Software Foundation'], 16, 17), @@ -209,12 +209,12 @@ def test_copyright_apache_notice(self): check_detection(expected, test_file) def test_copyright_aptitude_copyright_label(self): - test_file = self.get_test_loc('copyrights/copyright_aptitude_copyright_label-aptitude_copyright_label.label') + test_file = self.get_test_loc('copyright_lines/aptitude-aptitude.label') expected = [([u'Copyright 1999-2005 Daniel Burrows '], 1, 1)] check_detection(expected, test_file) def test_copyright_atheros_spanning_lines(self): - test_file = self.get_test_loc('copyrights/copyright_atheros_spanning_lines-py.py') + test_file = self.get_test_loc('copyright_lines/atheros_spanning_lines-py.py') expected = [ ([u'Copyright (c) 2000 Atheros Communications, Inc.'], 2, 2), ([u'Copyright (c) 2001 Atheros Communications, Inc.'], 3, 3), @@ -223,18 +223,18 @@ def test_copyright_atheros_spanning_lines(self): check_detection(expected, test_file) def test_copyright_att_in_c(self): - test_file = self.get_test_loc('copyrights/copyright_att_in_c-9_c.c') + test_file = self.get_test_loc('copyright_lines/att_in_c-9_c.c') expected = [([u'Copyright (c) 1991 by AT&T.'], 5, 5)] check_detection(expected, test_file) def test_copyright_audio_c(self): - test_file = self.get_test_loc('copyrights/copyright_audio_c-c.c') + test_file = self.get_test_loc('copyright_lines/audio_c-c.c') expected = [([u'copyright (c) 1995, AudioCodes, DSP Group, France Telecom, Universite de Sherbrooke.'], 2, 4)] check_detection(expected, test_file) def test_copyright_babkin_txt(self): - test_file = self.get_test_loc('copyrights/copyright_babkin_txt.txt') + test_file = self.get_test_loc('copyright_lines/babkin_txt.txt') expected = [ ([u'Copyright (c) North', u'Copyright (c) South', @@ -245,7 +245,7 @@ def test_copyright_babkin_txt(self): check_detection(expected, test_file) def test_copyright_blender_debian(self): - test_file = self.get_test_loc('copyrights/copyright_blender_debian-blender_copyright.copyright') + test_file = self.get_test_loc('copyright_lines/blender_debian-blender.copyright') expected = [ ([u'Copyright (c) 2002-2008 Blender Foundation'], 8, 11), ([u'Copyright (c) 2004-2005 Masayuki Hatta ', diff --git a/tests/commoncode/test_codec.py b/tests/commoncode/test_codec.py index cd667c079f8..7f9fddbedd7 100644 --- a/tests/commoncode/test_codec.py +++ b/tests/commoncode/test_codec.py @@ -22,7 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function from unittest import TestCase @@ -32,6 +33,7 @@ class TestCodec(TestCase): + def test_bin_to_num_basic(self): expected = 123 result = bin_to_num('{') diff --git a/tests/commoncode/test_command.py b/tests/commoncode/test_command.py index f1cbaf27478..04e22188aab 100644 --- a/tests/commoncode/test_command.py +++ b/tests/commoncode/test_command.py @@ -33,7 +33,6 @@ from commoncode.system import on_linux from commoncode.system import on_mac from commoncode.system import on_windows -from unittest.case import skipUnless class TestCommand(FileBasedTesting): diff --git a/tests/commoncode/test_date.py b/tests/commoncode/test_date.py index ed6d62af2fe..33f389953e7 100644 --- a/tests/commoncode/test_date.py +++ b/tests/commoncode/test_date.py @@ -22,17 +22,18 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function -import os from datetime import datetime +import os from commoncode import testcase - import commoncode.date class TestDate(testcase.FileBasedTesting): + def test_secs_from_epoch_can_handle_micro_and_nano_secs(self): test_file = self.get_temp_file() open(test_file, 'w').close() diff --git a/tests/commoncode/test_fileset.py b/tests/commoncode/test_fileset.py index e12f2f29543..0e3f07d6856 100644 --- a/tests/commoncode/test_fileset.py +++ b/tests/commoncode/test_fileset.py @@ -22,13 +22,15 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import os import commoncode.testcase from commoncode import fileset + class FilesetTest(commoncode.testcase.FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') diff --git a/tests/commoncode/test_fileutils.py b/tests/commoncode/test_fileutils.py index ea94a5b75fa..63e9989ef47 100644 --- a/tests/commoncode/test_fileutils.py +++ b/tests/commoncode/test_fileutils.py @@ -22,7 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import os from os.path import join @@ -32,8 +33,8 @@ from commoncode import filetype from commoncode import fileutils from commoncode.fileutils import as_posixpath -from commoncode.fileutils import path_to_bytes -from commoncode.fileutils import path_to_unicode +from commoncode.fileutils import fsencode +from commoncode.fileutils import fsdecode from commoncode.system import on_linux from commoncode.system import on_posix from commoncode.system import on_mac @@ -253,7 +254,7 @@ def test_copytree_does_not_copy_fifo(self): src = self.get_test_loc('fileutils/filetype', copy=True) dest = self.get_temp_dir() src_file = join(src, 'myfifo') - os.mkfifo(src_file) # @UndefinedVariable + os.mkfifo(src_file) # NOQA dest_dir = join(dest, 'dest') fileutils.copytree(src, dest_dir) assert not os.path.exists(join(dest_dir, 'myfifo')) @@ -306,6 +307,19 @@ def test_resource_name(self): assert 'f.a' == fileutils.resource_name('a/b/d/f/f.a') assert 'f.a' == fileutils.resource_name('f.a') + @skipIf(on_windows, 'Windows FS encoding is ... different!') + def test_fsdecode_and_fsencode_are_idempotent(self): + a = b'foo\xb1bar' + b = u'foo\udcb1bar' + assert a == fsencode(fsdecode(a)) + assert a == fsencode(fsdecode(b)) + assert b == fsdecode(fsencode(a)) + assert b == fsdecode(fsencode(b)) + + +class TestFileUtilsWalk(FileBasedTesting): + test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + def test_os_walk_with_unicode_path(self): test_dir = self.extract_test_zip('fileutils/walk/unicode.zip') test_dir = join(test_dir, 'unicode') @@ -359,10 +373,34 @@ def test_fileutils_walk_can_walk_an_empty_dir(self): ] assert expected == result - def test_file_iter(self): + def test_walk_can_walk_non_utf8_path_from_unicode_path(self): + test_dir = self.extract_test_tar_raw('fileutils/walk_non_utf8/non_unicode.tgz') + test_dir = join(test_dir, 'non_unicode') + + if not on_linux: + test_dir = unicode(test_dir) + result = list(fileutils.walk(test_dir))[0] + _dirpath, _dirnames, filenames = result + assert 18 == len(filenames) + + def test_os_walk_can_walk_non_utf8_path_from_unicode_path(self): + test_dir = self.extract_test_tar_raw('fileutils/walk_non_utf8/non_unicode.tgz') + test_dir = join(test_dir, 'non_unicode') + + if not on_linux: + test_dir = unicode(test_dir) + result = list(os.walk(test_dir))[0] + _dirpath, _dirnames, filenames = result + assert 18 == len(filenames) + + +class TestFileUtilsIter(FileBasedTesting): + test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + + def test_resource_iter(self): test_dir = self.get_test_loc('fileutils/walk') base = self.get_test_loc('fileutils') - result = [as_posixpath(f.replace(base, '')) for f in fileutils.file_iter(test_dir)] + result = [as_posixpath(f.replace(base, '')) for f in fileutils.resource_iter(test_dir, with_dirs=False)] expected = [ '/walk/f', '/walk/unicode.zip', @@ -372,23 +410,35 @@ def test_file_iter(self): ] assert sorted(expected) == sorted(result) - def test_file_iter_can_iterate_a_single_file(self): + def test_resource_iter_can_iterate_a_single_file(self): + test_file = self.get_test_loc('fileutils/walk/f') + result = [as_posixpath(f) for f in fileutils.resource_iter(test_file, with_dirs=False)] + expected = [as_posixpath(test_file)] + assert expected == result + + def test_resource_iter_can_iterate_a_single_file_with_dirs(self): test_file = self.get_test_loc('fileutils/walk/f') - result = [as_posixpath(f) for f in fileutils.file_iter(test_file)] + result = [as_posixpath(f) for f in fileutils.resource_iter(test_file, with_dirs=True)] expected = [as_posixpath(test_file)] assert expected == result - def test_file_iter_can_walk_an_empty_dir(self): + def test_resource_iter_can_walk_an_empty_dir(self): + test_dir = self.get_temp_dir() + result = list(fileutils.resource_iter(test_dir, with_dirs=False)) + expected = [] + assert expected == result + + def test_resource_iter_can_walk_an_empty_dir_with_dirs(self): test_dir = self.get_temp_dir() - result = list(fileutils.file_iter(test_dir)) + result = list(fileutils.resource_iter(test_dir, with_dirs=False)) expected = [] assert expected == result - def test_resource_iter_with_files_no_dir(self): + def test_resource_iter_without_dir(self): test_dir = self.get_test_loc('fileutils/walk') base = self.get_test_loc('fileutils') result = sorted([as_posixpath(f.replace(base, '')) - for f in fileutils.resource_iter(test_dir, with_files=True, with_dirs=False)]) + for f in fileutils.resource_iter(test_dir, with_dirs=False)]) expected = [ '/walk/f', '/walk/unicode.zip', @@ -398,11 +448,11 @@ def test_resource_iter_with_files_no_dir(self): ] assert sorted(expected) == sorted(result) - def test_resource_iter_with_files_and_dir(self): + def test_resource_iter_with_dirs(self): test_dir = self.get_test_loc('fileutils/walk') base = self.get_test_loc('fileutils') result = sorted([as_posixpath(f.replace(base, '')) - for f in fileutils.resource_iter(test_dir, with_files=True, with_dirs=True)]) + for f in fileutils.resource_iter(test_dir, with_dirs=True)]) expected = [ '/walk/d1', '/walk/d1/d2', @@ -415,23 +465,11 @@ def test_resource_iter_with_files_and_dir(self): ] assert sorted(expected) == sorted(result) - def test_resource_iter_with_dir_only(self): - test_dir = self.get_test_loc('fileutils/walk') - base = self.get_test_loc('fileutils') - result = sorted([as_posixpath(f.replace(base, '')) - for f in fileutils.resource_iter(test_dir, with_files=False, with_dirs=True)]) - expected = [ - '/walk/d1', - '/walk/d1/d2', - '/walk/d1/d2/d3', - ] - assert sorted(expected) == sorted(result) - def test_resource_iter_return_byte_on_byte_input(self): test_dir = self.get_test_loc('fileutils/walk') base = self.get_test_loc('fileutils') result = sorted([as_posixpath(f.replace(base, '')) - for f in fileutils.resource_iter(test_dir, with_files=True, with_dirs=True)]) + for f in fileutils.resource_iter(test_dir, with_dirs=True)]) expected = [ '/walk/d1', '/walk/d1/d2', @@ -452,7 +490,7 @@ def test_resource_iter_return_unicode_on_unicode_input(self): test_dir = self.get_test_loc('fileutils/walk') base = unicode(self.get_test_loc('fileutils')) result = sorted([as_posixpath(f.replace(base, '')) - for f in fileutils.resource_iter(test_dir, with_files=True, with_dirs=True)]) + for f in fileutils.resource_iter(test_dir, with_dirs=True)]) expected = [ u'/walk/d1', u'/walk/d1/d2', @@ -466,19 +504,7 @@ def test_resource_iter_return_unicode_on_unicode_input(self): assert sorted(expected) == sorted(result) assert all(isinstance(p, unicode) for p in result) - def test_resource_iter_can_iterate_a_single_file(self): - test_file = self.get_test_loc('fileutils/walk/f') - result = [as_posixpath(f) for f in fileutils.resource_iter(test_file)] - expected = [as_posixpath(test_file)] - assert expected == result - - def test_resource_iter_can_walk_an_empty_dir(self): - test_dir = self.get_temp_dir() - result = list(fileutils.resource_iter(test_dir)) - expected = [] - assert expected == result - - def test_fileutils_resource_iter_can_walk_unicode_path_with_zip(self): + def test_resource_iter_can_walk_unicode_path_with_zip(self): test_dir = self.extract_test_zip('fileutils/walk/unicode.zip') test_dir = join(test_dir, 'unicode') @@ -509,53 +535,24 @@ def test_fileutils_resource_iter_can_walk_unicode_path_with_zip(self): ] assert expected == result - def test_resource_iter_can_walk_non_utf8_path_from_unicode_path(self): + def test_resource_iter_can_walk_non_utf8_path_from_unicode_path_with_dirs(self): test_dir = self.extract_test_tar_raw('fileutils/walk_non_utf8/non_unicode.tgz') test_dir = join(test_dir, 'non_unicode') if not on_linux: test_dir = unicode(test_dir) - result = list(fileutils.resource_iter(test_dir)) + result = list(fileutils.resource_iter(test_dir, with_dirs=True)) assert 18 == len(result) - def test_walk_can_walk_non_utf8_path_from_unicode_path(self): - test_dir = self.extract_test_tar_raw('fileutils/walk_non_utf8/non_unicode.tgz') - test_dir = join(test_dir, 'non_unicode') - - if not on_linux: - test_dir = unicode(test_dir) - result = list(fileutils.walk(test_dir))[0] - _dirpath, _dirnames, filenames = result - assert 18 == len(filenames) - - def test_file_iter_can_walk_non_utf8_path_from_unicode_path(self): + def test_resource_iter_can_walk_non_utf8_path_from_unicode_path(self): test_dir = self.extract_test_tar_raw('fileutils/walk_non_utf8/non_unicode.tgz') test_dir = join(test_dir, 'non_unicode') if not on_linux: test_dir = unicode(test_dir) - result = list(fileutils.file_iter(test_dir)) + result = list(fileutils.resource_iter(test_dir, with_dirs=False)) assert 18 == len(result) - def test_os_walk_can_walk_non_utf8_path_from_unicode_path(self): - test_dir = self.extract_test_tar_raw('fileutils/walk_non_utf8/non_unicode.tgz') - test_dir = join(test_dir, 'non_unicode') - - if not on_linux: - test_dir = unicode(test_dir) - result = list(os.walk(test_dir))[0] - _dirpath, _dirnames, filenames = result - assert 18 == len(filenames) - - @skipIf(on_windows, 'Windows FS encoding is ... different') - def test_path_to_unicode_and_path_to_bytes_are_idempotent(self): - a = b'foo\xb1bar' - b = u'foo\udcb1bar' - assert a == path_to_bytes(path_to_unicode(a)) - assert a == path_to_bytes(path_to_unicode(b)) - assert b == path_to_unicode(path_to_bytes(a)) - assert b == path_to_unicode(path_to_bytes(b)) - class TestBaseName(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') diff --git a/tests/commoncode/test_functional.py b/tests/commoncode/test_functional.py index afc6004bebe..1857f4b8e18 100644 --- a/tests/commoncode/test_functional.py +++ b/tests/commoncode/test_functional.py @@ -22,7 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function from unittest.case import TestCase @@ -37,6 +38,7 @@ def test_flatten(self): assert expected == test def test_flatten_generator(self): + def gen(): for _ in range(2): yield range(5) diff --git a/tests/commoncode/test_ignore.py b/tests/commoncode/test_ignore.py index aafd363023b..4a4166bdd63 100644 --- a/tests/commoncode/test_ignore.py +++ b/tests/commoncode/test_ignore.py @@ -22,40 +22,23 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import os +from unittest.case import skipIf import commoncode.testcase from commoncode import fileutils - - from commoncode import ignore from commoncode.system import on_mac -from unittest.case import skipIf class IgnoreTest(commoncode.testcase.FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') - def check_default(self, test_dir, expected_message): - for top, dirs, files in os.walk(test_dir, topdown=True): - not_ignored = [] - for d in dirs: - p = os.path.join(top, d) - ign = ignore.is_ignored(p, ignore.default_ignores, {}) - if not ign: - not_ignored.append(d) - dirs[:] = not_ignored - - for f in files: - p = os.path.join(top, f) - ign = ignore.is_ignored(p, ignore.default_ignores, {}) - if ign: - assert ign == expected_message - @skipIf(on_mac, 'Return different result on Mac for reasons to investigate') - def test_default_ignores_eclipse1(self): + def test_is_ignored_default_ignores_eclipse1(self): test_dir = self.extract_test_tar('ignore/excludes/eclipse.tgz') test_base = os.path.join(test_dir, 'eclipse') @@ -63,7 +46,7 @@ def test_default_ignores_eclipse1(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: Eclipse IDE artifact' == result - def test_default_ignores_eclipse2(self): + def test_is_ignored_default_ignores_eclipse2(self): test_dir = self.extract_test_tar('ignore/excludes/eclipse.tgz') test_base = os.path.join(test_dir, 'eclipse') @@ -71,7 +54,7 @@ def test_default_ignores_eclipse2(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: Eclipse IDE artifact' == result - def test_default_ignores_eclipse3(self): + def test_is_ignored_default_ignores_eclipse3(self): test_dir = self.extract_test_tar('ignore/excludes/eclipse.tgz') test_base = os.path.join(test_dir, 'eclipse') @@ -79,7 +62,7 @@ def test_default_ignores_eclipse3(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: Eclipse IDE artifact' == result - def test_default_ignores_eclipse4(self): + def test_is_ignored_default_ignores_eclipse4(self): test_dir = self.extract_test_tar('ignore/excludes/eclipse.tgz') test_base = os.path.join(test_dir, 'eclipse') @@ -87,7 +70,7 @@ def test_default_ignores_eclipse4(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: Eclipse IDE artifact' == result - def test_default_ignores_mac1(self): + def test_is_ignored_default_ignores_mac1(self): test_dir = self.extract_test_tar('ignore/excludes/mac.tgz') test_base = os.path.join(test_dir, 'mac') @@ -95,7 +78,7 @@ def test_default_ignores_mac1(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: MacOSX artifact' == result - def test_default_ignores_mac2(self): + def test_is_ignored_default_ignores_mac2(self): test_dir = self.extract_test_tar('ignore/excludes/mac.tgz') test_base = os.path.join(test_dir, 'mac') @@ -103,7 +86,7 @@ def test_default_ignores_mac2(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: MacOSX artifact' == result - def test_default_ignores_mac3(self): + def test_is_ignored_default_ignores_mac3(self): test_dir = self.extract_test_tar('ignore/excludes/mac.tgz') test_base = os.path.join(test_dir, 'mac') @@ -111,7 +94,7 @@ def test_default_ignores_mac3(self): result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: MacOSX artifact' == result - def test_default_ignores_mac4(self): + def test_is_ignored_default_ignores_mac4(self): test_dir = self.extract_test_tar('ignore/excludes/mac.tgz') test_base = os.path.join(test_dir, 'mac') @@ -120,7 +103,7 @@ def test_default_ignores_mac4(self): assert 'Default ignore: MacOSX artifact' == result @skipIf(on_mac, 'Return different result on Mac for reasons to investigate') - def test_default_ignores_mac5(self): + def test_is_ignored_default_ignores_mac5(self): test_dir = self.extract_test_tar('ignore/excludes/mac.tgz') test_base = os.path.join(test_dir, 'mac') @@ -130,14 +113,14 @@ def test_default_ignores_mac5(self): assert 'Default ignore: MacOSX artifact' == result @skipIf(on_mac, 'Return different result on Mac for reasons to investigate') - def test_default_ignores_msft(self): + def test_is_ignored_default_ignores_msft(self): test_dir = self.extract_test_tar('ignore/excludes/msft-vs.tgz') test = os.path.join(test_dir, 'msft-vs/tst.sluo') result = ignore.is_ignored(test, ignore.default_ignores, {}) assert 'Default ignore: Microsoft VS project artifact' == result @skipIf(on_mac, 'Return different result on Mac for reasons to investigate') - def test_skip_vcs_files_and_dirs(self): + def test_is_ignored_skip_vcs_files_and_dirs(self): test_dir = self.extract_test_tar('ignore/vcs.tgz') result = [] for top, dirs, files in os.walk(test_dir, topdown=True): @@ -178,7 +161,7 @@ def test_skip_vcs_files_and_dirs(self): ] assert sorted(expected) == sorted(result) - def test_default_ignore_does_not_skip_one_char_names(self): + def test_fileset_match_default_ignore_does_not_skip_one_char_names(self): # use fileset directly to work on strings not locations from commoncode import fileset tests = [c for c in 'HFS+ Private Data'] + 'HFS+ Private Data'.split() diff --git a/tests/commoncode/test_version.py b/tests/commoncode/test_version.py index 49ed53b7f19..8c5ed2ce219 100644 --- a/tests/commoncode/test_version.py +++ b/tests/commoncode/test_version.py @@ -22,13 +22,16 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import unittest from commoncode import version + class TestVersionHint(unittest.TestCase): + def test_version_hint(self): data = { '/xmlgraphics/fop/source/fop-1.0-src.zip': '1.0', diff --git a/tests/extractcode/data/archive/weird_names/weird_names.7z_7zip_win.expected b/tests/extractcode/data/archive/weird_names/weird_names.7z_7zip_win.expected index d803623116c..10c554f721e 100644 --- a/tests/extractcode/data/archive/weird_names/weird_names.7z_7zip_win.expected +++ b/tests/extractcode/data/archive/weird_names/weird_names.7z_7zip_win.expected @@ -1,111 +1,111 @@ [ - "/weird_names/man\\1/1.gz", - "/weird_names/man\\1/:.1.gz", - "/weird_names/man\\1/:/.1", - "/weird_names/man\\1/[.1.gz", - "/weird_names/man\\1/[/:*.1", + "/weird_names/man/1/1.gz", + "/weird_names/man/1/[.1.gz", + "/weird_names/man/1/[/__.1", + "/weird_names/man/1/_.1.gz", + "/weird_names/man/1/_/.1", "/weird_names/some 'file", - "/weird_names/some /file", "/weird_names/some file", - "/weird_names/some\"file", - "/weird_names/some/\"file", - "/weird_names/win/AUX", - "/weird_names/win/AUX.txt", - "/weird_names/win/COM1", - "/weird_names/win/COM1.txt", - "/weird_names/win/COM2", - "/weird_names/win/COM2.txt", - "/weird_names/win/COM3", - "/weird_names/win/COM3.txt", - "/weird_names/win/COM4", - "/weird_names/win/COM4.txt", - "/weird_names/win/COM5", - "/weird_names/win/COM5.txt", - "/weird_names/win/COM6", - "/weird_names/win/COM6.txt", - "/weird_names/win/COM7", - "/weird_names/win/COM7.txt", - "/weird_names/win/COM8", - "/weird_names/win/COM8.txt", - "/weird_names/win/COM9", - "/weird_names/win/COM9.txt", - "/weird_names/win/CON", - "/weird_names/win/CON.txt", - "/weird_names/win/LPT1", - "/weird_names/win/LPT1.txt", - "/weird_names/win/LPT2", - "/weird_names/win/LPT2.txt", - "/weird_names/win/LPT3", - "/weird_names/win/LPT3.txt", - "/weird_names/win/LPT4", - "/weird_names/win/LPT4.txt", - "/weird_names/win/LPT5", - "/weird_names/win/LPT5.txt", - "/weird_names/win/LPT6", - "/weird_names/win/LPT6.txt", - "/weird_names/win/LPT7", - "/weird_names/win/LPT7.txt", - "/weird_names/win/LPT8", - "/weird_names/win/LPT8.txt", - "/weird_names/win/LPT9", - "/weird_names/win/LPT9.txt", - "/weird_names/win/NUL", - "/weird_names/win/NUL.txt", - "/weird_names/win/PRN", - "/weird_names/win/PRN.txt", - "/weird_names/win/aux", - "/weird_names/win/aux.txt", - "/weird_names/win/com1", - "/weird_names/win/com1.txt", - "/weird_names/win/com2", - "/weird_names/win/com2.txt", - "/weird_names/win/com3", - "/weird_names/win/com3.txt", - "/weird_names/win/com4", - "/weird_names/win/com4.txt", - "/weird_names/win/com5", - "/weird_names/win/com5.txt", - "/weird_names/win/com6", - "/weird_names/win/com6.txt", - "/weird_names/win/com7", - "/weird_names/win/com7.txt", - "/weird_names/win/com8", - "/weird_names/win/com8.txt", - "/weird_names/win/com9", - "/weird_names/win/com9.txt", - "/weird_names/win/con", - "/weird_names/win/con.txt", - "/weird_names/win/lpt1", - "/weird_names/win/lpt1.txt", - "/weird_names/win/lpt2", - "/weird_names/win/lpt2.txt", - "/weird_names/win/lpt3", - "/weird_names/win/lpt3.txt", - "/weird_names/win/lpt4", - "/weird_names/win/lpt4.txt", - "/weird_names/win/lpt5", - "/weird_names/win/lpt5.txt", - "/weird_names/win/lpt6", - "/weird_names/win/lpt6.txt", - "/weird_names/win/lpt7", - "/weird_names/win/lpt7.txt", - "/weird_names/win/lpt8", - "/weird_names/win/lpt8.txt", - "/weird_names/win/lpt9", - "/weird_names/win/lpt9.txt", - "/weird_names/win/nul", - "/weird_names/win/nul.txt", - "/weird_names/win/prn", - "/weird_names/win/prn.txt", - "/weird_names/winchr/ab\t.t\t", - "/weird_names/winchr/ab\n.t\n", - "/weird_names/winchr/ab\r.t\r", - "/weird_names/winchr/ab\".t\"", - "/weird_names/winchr/ab*_1.t*", + "/weird_names/some/_file", + "/weird_names/some_/file", + "/weird_names/some_file", + "/weird_names/win/_AUX", + "/weird_names/win/_AUX_1.txt", + "/weird_names/win/_COM1", + "/weird_names/win/_COM1.txt", + "/weird_names/win/_COM2", + "/weird_names/win/_COM2_1.txt", + "/weird_names/win/_COM3", + "/weird_names/win/_COM3_1.txt", + "/weird_names/win/_COM4", + "/weird_names/win/_COM4.txt", + "/weird_names/win/_COM5.txt", + "/weird_names/win/_COM5_1", + "/weird_names/win/_COM6", + "/weird_names/win/_COM6.txt", + "/weird_names/win/_COM7", + "/weird_names/win/_COM7.txt", + "/weird_names/win/_COM8", + "/weird_names/win/_COM8.txt", + "/weird_names/win/_COM9", + "/weird_names/win/_COM9.txt", + "/weird_names/win/_CON_1", + "/weird_names/win/_CON_1.txt", + "/weird_names/win/_LPT1", + "/weird_names/win/_LPT1_1.txt", + "/weird_names/win/_LPT2", + "/weird_names/win/_LPT2_1.txt", + "/weird_names/win/_LPT3_1", + "/weird_names/win/_LPT3_1.txt", + "/weird_names/win/_LPT4_1", + "/weird_names/win/_LPT4_1.txt", + "/weird_names/win/_LPT5_1", + "/weird_names/win/_LPT5_1.txt", + "/weird_names/win/_LPT6.txt", + "/weird_names/win/_LPT6_1", + "/weird_names/win/_LPT7", + "/weird_names/win/_LPT7.txt", + "/weird_names/win/_LPT8", + "/weird_names/win/_LPT8.txt", + "/weird_names/win/_LPT9", + "/weird_names/win/_LPT9.txt", + "/weird_names/win/_NUL.txt", + "/weird_names/win/_NUL_1", + "/weird_names/win/_PRN.txt", + "/weird_names/win/_PRN_1", + "/weird_names/win/_aux.txt", + "/weird_names/win/_aux_1", + "/weird_names/win/_com1_1", + "/weird_names/win/_com1_1.txt", + "/weird_names/win/_com2.txt", + "/weird_names/win/_com2_1", + "/weird_names/win/_com3.txt", + "/weird_names/win/_com3_1", + "/weird_names/win/_com4_1", + "/weird_names/win/_com4_1.txt", + "/weird_names/win/_com5", + "/weird_names/win/_com5_1.txt", + "/weird_names/win/_com6_1", + "/weird_names/win/_com6_1.txt", + "/weird_names/win/_com7_1", + "/weird_names/win/_com7_1.txt", + "/weird_names/win/_com8_1", + "/weird_names/win/_com8_1.txt", + "/weird_names/win/_com9_1", + "/weird_names/win/_com9_1.txt", + "/weird_names/win/_con", + "/weird_names/win/_con.txt", + "/weird_names/win/_lpt1.txt", + "/weird_names/win/_lpt1_1", + "/weird_names/win/_lpt2.txt", + "/weird_names/win/_lpt2_1", + "/weird_names/win/_lpt3", + "/weird_names/win/_lpt3.txt", + "/weird_names/win/_lpt4", + "/weird_names/win/_lpt4.txt", + "/weird_names/win/_lpt5", + "/weird_names/win/_lpt5.txt", + "/weird_names/win/_lpt6", + "/weird_names/win/_lpt6_1.txt", + "/weird_names/win/_lpt7_1", + "/weird_names/win/_lpt7_1.txt", + "/weird_names/win/_lpt8_1", + "/weird_names/win/_lpt8_1.txt", + "/weird_names/win/_lpt9_1", + "/weird_names/win/_lpt9_1.txt", + "/weird_names/win/_nul", + "/weird_names/win/_nul_1.txt", + "/weird_names/win/_prn", + "/weird_names/win/_prn_1.txt", "/weird_names/winchr/ab/.t", - "/weird_names/winchr/ab:.t:", - "/weird_names/winchr/ab<.t<", - "/weird_names/winchr/ab>.t>", - "/weird_names/winchr/ab?_2.t?", - "/weird_names/winchr/ab|.t|" + "/weird_names/winchr/ab_.t_", + "/weird_names/winchr/ab__1.t_", + "/weird_names/winchr/ab__2.t_", + "/weird_names/winchr/ab__3.t_", + "/weird_names/winchr/ab__4.t_", + "/weird_names/winchr/ab__5.t_", + "/weird_names/winchr/ab__6.t_", + "/weird_names/winchr/ab__7.t_", + "/weird_names/winchr/ab__8.t_", + "/weird_names/winchr/ab__9.t_" ] \ No newline at end of file diff --git a/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_linux.expected b/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_linux.expected index 8e2233ea35b..3e8da5fa945 100644 --- a/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_linux.expected +++ b/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_linux.expected @@ -3,38 +3,6 @@ "/COM2__1.txt", "/COM3_", "/COM3__1.txt", - "/COM3__10.txt", - "/COM3__11.txt", - "/COM3__12.txt", - "/COM3__13.txt", - "/COM3__14.txt", - "/COM3__15.txt", - "/COM3__16.txt", - "/COM3__17.txt", - "/COM3__18.txt", - "/COM3__19.txt", - "/COM3__2.txt", - "/COM3__20.txt", - "/COM3__21.txt", - "/COM3__22.txt", - "/COM3__23.txt", - "/COM3__24.txt", - "/COM3__25.txt", - "/COM3__26.txt", - "/COM3__27.txt", - "/COM3__28.txt", - "/COM3__29.txt", - "/COM3__3.txt", - "/COM3__30.txt", - "/COM3__31.txt", - "/COM3__32.txt", - "/COM3__33.txt", - "/COM3__4.txt", - "/COM3__5.txt", - "/COM3__6.txt", - "/COM3__7.txt", - "/COM3__8.txt", - "/COM3__9.txt", "/COM4_", "/COM5_", "/COM7_", @@ -78,13 +46,6 @@ "/com3_.txt", "/com4_.txt", "/com4__1", - "/com4__2", - "/com4__3", - "/com4__4", - "/com4__5", - "/com4__6", - "/com4__7", - "/com4__8", "/com5_.txt", "/com6_", "/com6_.txt", diff --git a/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_mac.expected b/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_mac.expected index 8e2233ea35b..3e8da5fa945 100644 --- a/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_mac.expected +++ b/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_mac.expected @@ -3,38 +3,6 @@ "/COM2__1.txt", "/COM3_", "/COM3__1.txt", - "/COM3__10.txt", - "/COM3__11.txt", - "/COM3__12.txt", - "/COM3__13.txt", - "/COM3__14.txt", - "/COM3__15.txt", - "/COM3__16.txt", - "/COM3__17.txt", - "/COM3__18.txt", - "/COM3__19.txt", - "/COM3__2.txt", - "/COM3__20.txt", - "/COM3__21.txt", - "/COM3__22.txt", - "/COM3__23.txt", - "/COM3__24.txt", - "/COM3__25.txt", - "/COM3__26.txt", - "/COM3__27.txt", - "/COM3__28.txt", - "/COM3__29.txt", - "/COM3__3.txt", - "/COM3__30.txt", - "/COM3__31.txt", - "/COM3__32.txt", - "/COM3__33.txt", - "/COM3__4.txt", - "/COM3__5.txt", - "/COM3__6.txt", - "/COM3__7.txt", - "/COM3__8.txt", - "/COM3__9.txt", "/COM4_", "/COM5_", "/COM7_", @@ -78,13 +46,6 @@ "/com3_.txt", "/com4_.txt", "/com4__1", - "/com4__2", - "/com4__3", - "/com4__4", - "/com4__5", - "/com4__6", - "/com4__7", - "/com4__8", "/com5_.txt", "/com6_", "/com6_.txt", diff --git a/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_win.expected b/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_win.expected index 8e2233ea35b..3e8da5fa945 100644 --- a/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_win.expected +++ b/tests/extractcode/data/archive/weird_names/weird_names.ar_libarch_win.expected @@ -3,38 +3,6 @@ "/COM2__1.txt", "/COM3_", "/COM3__1.txt", - "/COM3__10.txt", - "/COM3__11.txt", - "/COM3__12.txt", - "/COM3__13.txt", - "/COM3__14.txt", - "/COM3__15.txt", - "/COM3__16.txt", - "/COM3__17.txt", - "/COM3__18.txt", - "/COM3__19.txt", - "/COM3__2.txt", - "/COM3__20.txt", - "/COM3__21.txt", - "/COM3__22.txt", - "/COM3__23.txt", - "/COM3__24.txt", - "/COM3__25.txt", - "/COM3__26.txt", - "/COM3__27.txt", - "/COM3__28.txt", - "/COM3__29.txt", - "/COM3__3.txt", - "/COM3__30.txt", - "/COM3__31.txt", - "/COM3__32.txt", - "/COM3__33.txt", - "/COM3__4.txt", - "/COM3__5.txt", - "/COM3__6.txt", - "/COM3__7.txt", - "/COM3__8.txt", - "/COM3__9.txt", "/COM4_", "/COM5_", "/COM7_", @@ -78,13 +46,6 @@ "/com3_.txt", "/com4_.txt", "/com4__1", - "/com4__2", - "/com4__3", - "/com4__4", - "/com4__5", - "/com4__6", - "/com4__7", - "/com4__8", "/com5_.txt", "/com6_", "/com6_.txt", diff --git a/tests/extractcode/extractcode_assert_utils.py b/tests/extractcode/extractcode_assert_utils.py index b8b4f8a459c..bfda20d4b8b 100644 --- a/tests/extractcode/extractcode_assert_utils.py +++ b/tests/extractcode/extractcode_assert_utils.py @@ -22,17 +22,18 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function import os from commoncode import filetype from commoncode import fileutils - """ Shared archiving test utils. """ + def check_size(expected_size, location): assert expected_size == os.stat(location).st_size diff --git a/tests/extractcode/test_archive.py b/tests/extractcode/test_archive.py index b1ab4839776..42b02f0dec4 100644 --- a/tests/extractcode/test_archive.py +++ b/tests/extractcode/test_archive.py @@ -54,7 +54,6 @@ from extractcode import sevenzip from extractcode import tar - """ For each archive type --when possible-- we are testing extraction of: - basic, plain archive, no tricks @@ -184,7 +183,7 @@ def test_no_handler_is_selected_for_a_non_archive3(self): def test_7zip_extract_can_extract_to_relative_paths(self): # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir - # To use relative paths, we use our tmp dir at the root of the code + # To use relative paths, we use our tmp dir at the root of the code tree from os.path import dirname, join, abspath import tempfile import shutil @@ -207,7 +206,7 @@ def test_7zip_extract_can_extract_to_relative_paths(self): def test_libarchive_extract_can_extract_to_relative_paths(self): # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir - # To use relative paths, we use our tmp dir at the root of the code + # To use relative paths, we use our tmp dir at the root of the code tree from os.path import dirname, join, abspath import tempfile import shutil @@ -255,7 +254,7 @@ def check_extract(self, test_function, test_file, expected, expected_warnings=No if check_all: len_test_dir = len(test_dir) - extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.file_iter(test_dir)} + extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)} expected = {os.path.join(test_dir, exp_path): exp_size for exp_path, exp_size in expected.items()} assert sorted(expected.items()) == sorted(extracted.items()) else: @@ -288,7 +287,6 @@ def collect_extracted_path(self, test_dir): result = sorted(result) return result - def assertExceptionContains(self, text, callableObj, *args, **kwargs): try: callableObj(*args, **kwargs) @@ -304,6 +302,7 @@ def assertExceptionContains(self, text, callableObj, *args, **kwargs): class TestTarGzip(BaseArchiveTestCase): + def test_extract_targz_basic(self): test_file = self.get_test_loc('archive/tgz/tarred_gzipped.tar.gz') test_dir = self.get_temp_dir() @@ -412,6 +411,7 @@ def test_extract_targz_with_unicode_path_should_extract_without_error(self): class TestGzip(BaseArchiveTestCase): + def test_uncompress_gzip_basic(self): test_file = self.get_test_loc('archive/gzip/file_4.26-1.diff.gz') test_dir = self.get_temp_dir() @@ -467,12 +467,12 @@ def test_uncompress_gzip_can_uncompress_windows_ntfs_wmz(self): test_file = self.get_test_loc('archive/wmz/image003.wmz') test_dir = self.get_temp_dir() archive.uncompress_gzip(test_file, test_dir) - print(os.listdir(test_dir)) result = os.path.join(test_dir, 'image003.wmz-extract') assert os.path.exists(result) class TestTarBz2(BaseArchiveTestCase): + def test_extract_tar_bz2_basic(self): test_file = self.get_test_loc('archive/tbz/tarred_bzipped.tar.bz2') test_dir = self.get_temp_dir() @@ -548,6 +548,7 @@ def test_extract_tar_bz2_multistream(self): class TestBz2(BaseArchiveTestCase): + def test_uncompress_bzip2_basic(self): test_file = self.get_test_loc('archive/bz2/single_file_not_tarred.bz2') test_dir = self.get_temp_dir() @@ -597,6 +598,7 @@ def test_sevenzip_extract_can_handle_bz2_multistream_differently(self): class TestShellArchives(BaseArchiveTestCase): + def test_extract_springboot(self): # a self executable springboot Jar is a zip with a shell script prefix test_file = self.get_test_loc('archive/shar/demo-spring-boot.jar') @@ -608,6 +610,7 @@ def test_extract_springboot(self): class TestZip(BaseArchiveTestCase): + def test_extract_zip_basic(self): test_file = self.get_test_loc('archive/zip/basic.zip') test_dir = self.get_temp_dir() @@ -864,8 +867,6 @@ def test_extract_zip_with_backslash_in_path_3(self): test_file = self.get_test_loc('archive/zip/backslash/boo-0.3-src.zip') test_dir = self.get_temp_dir() archive.extract_zip(test_file, test_dir) - print() - map(print, fileutils.file_iter(test_dir)) result = os.path.join(test_dir, 'src/Boo.Lang.Compiler/TypeSystem/InternalCallableType.cs') assert os.path.exists(result) @@ -909,6 +910,7 @@ def test_extract_zip_can_extract_zip_with_directory_not_marked_with_trailing_sla class TestLibarch(BaseArchiveTestCase): + def test_extract_zip_with_relative_path_libarchive(self): test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip') test_dir = self.get_temp_dir() @@ -925,6 +927,7 @@ def test_extract_zip_with_relative_path_libarchive(self): class TestTar(BaseArchiveTestCase): + def test_extract_tar_basic(self): test_file = self.get_test_loc('archive/tar/tarred.tar') test_dir = self.get_temp_dir() @@ -1012,7 +1015,7 @@ def test_extract_python_testtar_tar_archive_with_special_files(self): # https://hg.python.org/cpython/raw-file/bff88c866886/Lib/test/testtar.tar test_dir = self.get_temp_dir() result = archive.extract_tar(test_file, test_dir) - expected_warnings = ["pax/regtype4: Pathname can't be converted from UTF-8 to current locale."] + expected_warnings = ["'pax/bad-pax-\\xe4\\xf6\\xfc': \nPathname can't be converted from UTF-8 to current locale."] assert sorted(expected_warnings) == sorted(result) expected = [ @@ -1029,12 +1032,12 @@ def test_extract_python_testtar_tar_archive_with_special_files(self): 'misc/regtype-suntar', 'misc/regtype-xstar', 'pax/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname', + 'pax/bad-pax-aou', 'pax/hdrcharset-aou', 'pax/regtype1', 'pax/regtype2', 'pax/regtype3', 'pax/regtype4', - 'pax/regtype4_1', 'pax/umlauts-AOUaouss', 'ustar/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/1234567/longname', 'ustar/conttype', @@ -1043,10 +1046,13 @@ def test_extract_python_testtar_tar_archive_with_special_files(self): 'ustar/sparse', 'ustar/umlauts-AOUaouss' ] + if on_linux: + expected = [bytes(e) for e in expected] check_files(test_dir, expected) class TestDebian(BaseArchiveTestCase): + def test_extract_deb_package_1(self): test_file = self.get_test_loc('archive/deb/adduser_3.112ubuntu1_all.deb') test_dir = self.get_temp_dir() @@ -1072,6 +1078,7 @@ def test_extract_deb_package_3(self): class TestAr(BaseArchiveTestCase): + def test_extract_ar_basic_7z(self): test_file = self.get_test_loc('archive/ar/liby.a') test_dir = self.get_temp_dir() @@ -1113,23 +1120,9 @@ def test_extract_ar_broken(self): test_file = self.get_test_loc('archive/ar/liby-corrupted.a') test_dir = self.get_temp_dir() result = archive.extract_ar(test_file, test_dir) - expected = [ - '__.SYMDEF', - 'main.o', - 'main_1.o', - 'main_10.o', - 'main_11.o', - 'main_2.o', - 'main_3.o', - 'main_4.o', - 'main_5.o', - 'main_6.o', - 'main_7.o', - 'main_8.o', - 'main_9.o' - ] + expected = ['__.SYMDEF', 'main.o'] check_files(test_dir, expected) - assert ['main.o: Incorrect file header signature'] == result + assert ['None: \nIncorrect file header signature'] == result def test_extract_ar_with_invalid_path(self): test_file = self.get_test_loc('archive/ar/ar_invalidpath.ar') @@ -1156,12 +1149,12 @@ def test_extract_ar_with_relative_path_libarch(self): test_dir = self.get_temp_dir() result = archive.libarchive2.extract(test_file, test_dir) expected_warns = [ - '/: Invalid string table', - "/: Invalid string table\nCan't find long filename for entry" + "'//': \nInvalid string table", + "'/0': \nCan't find long filename for entry" ] assert expected_warns == result # inccorrect for now: need this: ['__.SYMDEF', 'release/init.obj'] - expected = ['dot', 'dot_1', 'dot_2', 'dot_3'] + expected = ['0', 'dot', 'dot_1', 'dot_2'] check_files(test_dir, expected) def test_extract_ar_with_relative_path_and_backslashes_in_names_libarch(self): @@ -1169,50 +1162,86 @@ def test_extract_ar_with_relative_path_and_backslashes_in_names_libarch(self): test_dir = self.get_temp_dir() result = archive.libarchive2.extract(test_file, test_dir) expected_warns = [ - '/: Invalid string table', - "/: Invalid string table\nCan't find long filename for entry" + u"'//': \nInvalid string table", + u"'/0': \nCan't find long filename for entry", + u"'/34': \nCan't find long filename for entry", + u"'/68': \nCan't find long filename for entry", + u"'/104': \nCan't find long filename for entry", + u"'/137': \nCan't find long filename for entry", + u"'/173': \nCan't find long filename for entry", + u"'/205': \nCan't find long filename for entry", + u"'/239': \nCan't find long filename for entry", + u"'/275': \nCan't find long filename for entry", + u"'/311': \nCan't find long filename for entry", + u"'/344': \nCan't find long filename for entry", + u"'/375': \nCan't find long filename for entry", + u"'/406': \nCan't find long filename for entry", + u"'/442': \nCan't find long filename for entry", + u"'/477': \nCan't find long filename for entry", + u"'/512': \nCan't find long filename for entry", + u"'/545': \nCan't find long filename for entry", + u"'/577': \nCan't find long filename for entry", + u"'/611': \nCan't find long filename for entry", + u"'/645': \nCan't find long filename for entry", + u"'/681': \nCan't find long filename for entry", + u"'/717': \nCan't find long filename for entry", + u"'/750': \nCan't find long filename for entry", + u"'/784': \nCan't find long filename for entry", + u"'/818': \nCan't find long filename for entry", + u"'/853': \nCan't find long filename for entry", + u"'/888': \nCan't find long filename for entry", + u"'/923': \nCan't find long filename for entry", + u"'/957': \nCan't find long filename for entry", + u"'/993': \nCan't find long filename for entry", + u"'/1027': \nCan't find long filename for entry", + u"'/1058': \nCan't find long filename for entry", + u"'/1089': \nCan't find long filename for entry" ] assert expected_warns == result # 7zip is better, but has a security bug for now + # GNU ar works fine otherwise, but there are portability issues expected = [ + '0', + '1027', + '104', + '1058', + '1089', + '137', + '173', + '205', + '239', + '275', + '311', + '34', + '344', + '375', + '406', + '442', + '477', + '512', + '545', + '577', + '611', + '645', + '68', + '681', + '717', + '750', + '784', + '818', + '853', + '888', + '923', + '957', + '993', 'dot', 'dot_1', - 'dot_10', - 'dot_11', - 'dot_12', - 'dot_13', - 'dot_14', - 'dot_15', - 'dot_16', - 'dot_17', - 'dot_18', - 'dot_19', - 'dot_2', - 'dot_20', - 'dot_21', - 'dot_22', - 'dot_23', - 'dot_24', - 'dot_25', - 'dot_26', - 'dot_27', - 'dot_28', - 'dot_29', - 'dot_3', - 'dot_30', - 'dot_31', - 'dot_32', - 'dot_33', - 'dot_34', - 'dot_35', - 'dot_4', - 'dot_5', - 'dot_6', - 'dot_7', - 'dot_8', - 'dot_9' + 'dot_2' ] + if on_linux: + expected = [bytes(e) for e in expected] + check_files(test_dir, expected) def test_extract_ar_with_relative_path_and_backslashes_in_names_7z(self): @@ -1304,6 +1333,7 @@ def test_extract_ar_with_permissions(self): class TestCpio(BaseArchiveTestCase): + def test_extract_cpio_basic(self): test_file = self.get_test_loc('archive/cpio/elfinfo-1.0-1.fc9.src.cpio') test_dir = self.get_temp_dir() @@ -1327,8 +1357,12 @@ def test_extract_cpio_broken2(self): test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio') test_dir = self.get_temp_dir() result = archive.extract_cpio(test_file, test_dir) - assert ['elfinfo-1.0.tar.gz', 'elfinfo-1_1.0.tar.gz'] == sorted(os.listdir(test_dir)) - assert ['elfinfo-1.0.tar.gz: Skipped 72 bytes before finding valid header'] == result + expected = sorted(['elfinfo-1.0.tar.gz', 'elfinfo.spec']) + if on_linux: + expected = [bytes(e) for e in expected] + + assert expected == sorted(os.listdir(test_dir)) + assert ["'elfinfo.spec': \nSkipped 72 bytes before finding valid header"] == result def test_extract_cpio_with_absolute_path(self): assert not os.path.exists('/tmp/subdir') @@ -1371,7 +1405,6 @@ def test_extract_cpio_with_invalidpath(self): result = os.path.join(test_dir, 'this/that') assert os.path.exists(result) - def test_extract_cpio_with_weird_filename_extension(self): test_file = self.get_test_loc('archive/cpio/t.cpio.foo') test_dir = self.get_temp_dir() @@ -1381,6 +1414,7 @@ def test_extract_cpio_with_weird_filename_extension(self): expected = ['/t/', '/t/t.txt'] assert expected == extracted + class TestRpm(BaseArchiveTestCase): def test_extract_rpm_basic_1(self): @@ -1430,6 +1464,7 @@ def test_extract_rpm_broken(self): class TestExtractTwice(BaseArchiveTestCase): + def test_extract_twice_with_rpm_with_xz_compressed_cpio(self): test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm') test_dir = self.get_temp_dir() @@ -1511,7 +1546,7 @@ def test_extract_twice_with_rpm_with_xz_compressed_cpio(self): def test_extract_twice_can_extract_to_relative_paths(self): # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir - # To use relative paths, we use our tmp dir at the root of the code + # To use relative paths, we use our tmp dir at the root of the code tree from os.path import dirname, join, abspath, exists import shutil import tempfile @@ -1535,6 +1570,7 @@ def test_extract_twice_can_extract_to_relative_paths(self): class TestRar(BaseArchiveTestCase): + def test_extract_rar_basic(self): test_file = self.get_test_loc('archive/rar/basic.rar') test_dir = self.get_temp_dir() @@ -1610,6 +1646,7 @@ def test_extract_rar_with_non_ascii_path(self): class TestSevenZip(BaseArchiveTestCase): + def test_extract_7z_basic(self): test_file = self.get_test_loc('archive/7z/z.7z') test_dir = self.get_temp_dir() @@ -1725,6 +1762,7 @@ def test_extract_7z_basic_with_space_in_file_name(self): class TestIso(BaseArchiveTestCase): + def test_extract_iso_basic(self): test_file = self.get_test_loc('archive/iso/small.iso') test_dir = self.get_temp_dir() @@ -1754,6 +1792,7 @@ def test_extract_iso_basic_with_with_weird_filename_extension(self): class TestXzLzma(BaseArchiveTestCase): + def check_lzma_extract(self, extract_fun, test_file, expected): """ Run the 'extract_fun' function using the 'test_file' file as an input @@ -1846,6 +1885,7 @@ def test_extract_archive_tar_lzma_5(self): class TestDia(BaseArchiveTestCase): + def test_extract_dia_basic(self): test_file = self.get_test_loc('archive/dia/dia.dia') test_dir = self.get_temp_dir() @@ -1901,6 +1941,7 @@ def test_extract_can_get_extractor_and_uncompress_dia_files(self): class TestTarZ(BaseArchiveTestCase): + def test_extract_tarz_compress_basic(self): test_file = self.get_test_loc('archive/Z/tkWWW-0.11.tar.Z') test_dir = self.get_temp_dir() @@ -1917,6 +1958,7 @@ def test_extract_z_compress_basic(self): class TestXar(BaseArchiveTestCase): + def test_extract_xar_basic(self): test_file = self.get_test_loc('archive/xar/xar-1.4.xar') test_dir = self.get_temp_dir() @@ -1928,6 +1970,7 @@ def test_extract_xar_basic(self): class TestCb7(BaseArchiveTestCase): + def test_get_extractor_cb7(self): test_file = self.get_test_loc('archive/cb7/t .cb7') result = archive.get_extractor(test_file) @@ -1950,7 +1993,9 @@ def test_extract_cb7_basic_with_weird_filename_extension(self): expected = ['/t/', '/t/t.txt'] assert expected == extracted + class TestCab(BaseArchiveTestCase): + def test_get_extractor_cab(self): test_file = self.get_test_loc('archive/cab/basic.cab') result = archive.get_extractor(test_file) @@ -1973,7 +2018,9 @@ def test_extract_cab_basic_with_weird_filename_extension(self): expected = ['/t/', '/t/t.txt'] assert expected == extracted + class TestCbr(BaseArchiveTestCase): + def test_get_extractor_cbr(self): test_file = self.get_test_loc('archive/cbr/t.cbr') result = archive.get_extractor(test_file) @@ -1996,7 +2043,9 @@ def test_extract_cbr_basic_with_weird_filename_extension(self): expected = ['/t/', '/t/t.txt'] assert expected == extracted + class TestCbt(BaseArchiveTestCase): + def test_get_extractor_cbt(self): test_file = self.get_test_loc('archive/cbt/t.cbt') result = archive.get_extractor(test_file) @@ -2019,7 +2068,9 @@ def test_extract_cbt_basic_with_weird_filename_extension(self): expected = ['/t/', '/t/t.txt'] assert expected == extracted + class TestCbz(BaseArchiveTestCase): + def test_get_extractor_cbz(self): test_file = self.get_test_loc('archive/cbz/t.cbz') result = archive.get_extractor(test_file) @@ -2114,7 +2165,7 @@ def check_extract(self, test_function, test_file, expected_suffix, expected_warn return len_test_dir = len(test_dir) - extracted = sorted(path[len_test_dir:] for path in fileutils.file_iter(test_dir)) + extracted = sorted(path[len_test_dir:] for path in fileutils.resource_iter(test_dir, with_dirs=False)) extracted = [unicode(p) for p in extracted] extracted = [to_posix(p) for p in extracted] @@ -2146,7 +2197,7 @@ def test_extract_7zip_with_weird_filenames_with_libarchive(self): def test_extract_ar_with_weird_filenames_with_libarchive(self): test_file = self.get_test_loc('archive/weird_names/weird_names.ar') - warns = ['COM3.txt: Incorrect file header signature', 'com4: Incorrect file header signature'] + warns = ['None: \nIncorrect file header signature'] self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch') def test_extract_cpio_with_weird_filenames_with_libarchive(self): @@ -2177,7 +2228,7 @@ def test_extract_7zip_with_weird_filenames_with_libarchive(self): def test_extract_ar_with_weird_filenames_with_libarchive(self): test_file = self.get_test_loc('archive/weird_names/weird_names.ar') - warns = ['COM3.txt: Incorrect file header signature', 'com4: Incorrect file header signature'] + warns = ['None: \nIncorrect file header signature'] self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch') def test_extract_cpio_with_weird_filenames_with_libarchive(self): @@ -2208,7 +2259,7 @@ def test_extract_7zip_with_weird_filenames_with_libarchive(self): def test_extract_ar_with_weird_filenames_with_libarchive(self): test_file = self.get_test_loc('archive/weird_names/weird_names.ar') - warns = ['COM3.txt: Incorrect file header signature', 'com4: Incorrect file header signature'] + warns = [u'None: \nIncorrect file header signature'] self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch') def test_extract_cpio_with_weird_filenames_with_libarchive(self): @@ -2328,48 +2379,76 @@ class TestExtractArchiveWithIllegalFilenamesWithPytarOnLinuxWarnings(TestExtract @skipIf(not on_mac, 'Run only on Mac because of specific test expectations.') -class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac(ExtractArchiveWithIllegalFilenamesTestCase): - check_only_warnings = False +class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings(ExtractArchiveWithIllegalFilenamesTestCase): + check_only_warnings = True - @expectedFailure # not a problem: we use libarchive for these def test_extract_7zip_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.7z') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - @expectedFailure # not a problem: we use libarchive for these def test_extract_ar_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.ar') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - @expectedFailure # not a problem: we use libarchive for these def test_extract_cpio_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.cpio') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - @expectedFailure # This is a problem def test_extract_iso_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.iso') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - @expectedFailure # This is a problem, but unrar seems to fail the same way def test_extract_rar_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.rar') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - @expectedFailure # not a problem: we use libarchive for these def test_extract_tar_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.tar') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - @expectedFailure # not a problem: we use libarchive for these def test_extract_zip_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.zip') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') @skipIf(not on_mac, 'Run only on Mac because of specific test expectations.') -class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac): - check_only_warnings = True +class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings): + check_only_warnings = False + + # not a problem: we use libarchive for these + test_extract_7zip_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_7zip_with_weird_filenames_with_sevenzip) + + # not a problem: we use libarchive for these + test_extract_ar_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_ar_with_weird_filenames_with_sevenzip) + + # not a problem: we use libarchive for these + test_extract_cpio_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_cpio_with_weird_filenames_with_sevenzip) + + # This is a problem + test_extract_iso_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_iso_with_weird_filenames_with_sevenzip) + + # This is a problem, but unrar seems to fail the same way + test_extract_rar_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_rar_with_weird_filenames_with_sevenzip) + + # not a problem: we use libarchive for these + test_extract_tar_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_tar_with_weird_filenames_with_sevenzip) + + # not a problem: we use libarchive for these + test_extract_zip_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings + .test_extract_zip_with_weird_filenames_with_sevenzip) @skipIf(not on_mac, 'Run only on Mac because of specific test expectations.') @@ -2439,7 +2518,6 @@ class TestExtractArchiveWithIllegalFilenamesWithPytarOnMacWarnings(TestExtractAr class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin(ExtractArchiveWithIllegalFilenamesTestCase): check_only_warnings = False - @expectedFailure # not a problem: we use libarchive for these def test_extract_7zip_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.7z') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') @@ -2462,7 +2540,6 @@ def test_extract_rar_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.rar') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') - # The results are not correct but not a problem: we use libarchive for these def test_extract_tar_with_weird_filenames_with_sevenzip(self): test_file = self.get_test_loc('archive/weird_names/weird_names.tar') self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip') @@ -2477,6 +2554,11 @@ def test_extract_zip_with_weird_filenames_with_sevenzip(self): class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWinWarning(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin): check_only_warnings = True + # The results are not correct but not a problem: we use libarchive for these + test_extract_7zip_with_weird_filenames_with_sevenzip = expectedFailure( + TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin + .test_extract_7zip_with_weird_filenames_with_sevenzip) + @skipIf(not on_windows, 'Run only on Windows because of specific test expectations.') class TestExtractArchiveWithIllegalFilenamesWithPytarOnWin(ExtractArchiveWithIllegalFilenamesTestCase): diff --git a/tests/extractcode/test_extract.py b/tests/extractcode/test_extract.py index d5d9f11e96f..5c3a051d472 100644 --- a/tests/extractcode/test_extract.py +++ b/tests/extractcode/test_extract.py @@ -873,7 +873,7 @@ def test_walk_can_be_extended_while_walking(self): def test_extract_can_extract_to_relative_paths(self): # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir - # To use relative paths, we use our tmp dir at the root of the code + # To use relative paths, we use our tmp dir at the root of the code tree from os.path import dirname, join, abspath scancode_root = dirname(dirname(dirname(__file__))) scancode_tmp = join(scancode_root, 'tmp') diff --git a/tests/extractcode/test_extractcode.py b/tests/extractcode/test_extractcode.py index 9d8cf294735..3ed0a7ddf0a 100644 --- a/tests/extractcode/test_extractcode.py +++ b/tests/extractcode/test_extractcode.py @@ -22,16 +22,17 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function - +from __future__ import absolute_import +from __future__ import print_function from os.path import dirname +from os.path import exists from os.path import join from commoncode.testcase import FileBasedTesting from commoncode import fileutils from extractcode import new_name -from os.path import exists + class TestNewName(FileBasedTesting): test_data_dir = join(dirname(__file__), 'data') diff --git a/tests/extractcode/test_patch.py b/tests/extractcode/test_patch.py index da8767a8f8b..4138101e153 100644 --- a/tests/extractcode/test_patch.py +++ b/tests/extractcode/test_patch.py @@ -26,7 +26,6 @@ from __future__ import print_function from __future__ import unicode_literals - import codecs import json import os diff --git a/tests/extractcode/test_tar.py b/tests/extractcode/test_tar.py index 0e1e7b41bb9..9bab4c43559 100644 --- a/tests/extractcode/test_tar.py +++ b/tests/extractcode/test_tar.py @@ -26,7 +26,6 @@ from __future__ import print_function from __future__ import unicode_literals - import os from unittest.case import skipIf @@ -38,6 +37,7 @@ class TestTarGzip(BaseArchiveTestCase): + def test_extract_targz_basic(self): test_file = self.get_test_loc('archive/tgz/tarred_gzipped.tar.gz') test_dir = self.get_temp_dir() @@ -141,6 +141,7 @@ def test_extract_targz_from_apache_should_not_return_errors(self): class TestTarBz2(BaseArchiveTestCase): + def test_extract_tar_bz2_basic(self): test_file = self.get_test_loc('archive/tbz/tarred_bzipped.tar.bz2') test_dir = self.get_temp_dir() @@ -217,6 +218,7 @@ def test_extract_tar_bz2_multistream(self): class TestTar(BaseArchiveTestCase): + def test_extract_tar_basic(self): test_file = self.get_test_loc('archive/tar/tarred.tar') test_dir = self.get_temp_dir() @@ -304,7 +306,7 @@ def test_extract_tar_archive_with_special_files(self): assert sorted(expected_warnings) == sorted(result) @skipIf(True, 'Unicode tar paths are not handled well yet: we use libarchive instead') - def test_extract_python_testtar_tar_archive_with_special_files(self): + def test_tar_extract_python_testtar_tar_archive_with_special_files(self): test_file = self.get_test_loc('archive/tar/testtar.tar') # this is from: # https://hg.python.org/cpython/raw-file/bff88c866886/Lib/test/testtar.tar diff --git a/tests/formattedcode/data/csv/flatten_scan/full.json-expected b/tests/formattedcode/data/csv/flatten_scan/full.json-expected index 73ec2b55d2d..3ee87b68b6c 100644 --- a/tests/formattedcode/data/csv/flatten_scan/full.json-expected +++ b/tests/formattedcode/data/csv/flatten_scan/full.json-expected @@ -35,11 +35,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_sevenzip.py", @@ -56,11 +53,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_sevenzip.py", @@ -164,11 +158,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_tar.py", @@ -185,11 +176,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_tar.py", @@ -269,11 +257,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_patch.py", @@ -290,11 +275,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_patch.py", @@ -362,11 +344,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_extract.py", @@ -383,11 +362,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_extract.py", @@ -455,11 +431,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/extractcode_assert_utils.py", @@ -476,11 +449,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/extractcode_assert_utils.py", @@ -548,11 +518,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_extractcode.py", @@ -569,11 +536,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_extractcode.py", @@ -641,11 +605,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_archive.py", @@ -662,11 +623,8 @@ "start_line": 4, "end_line": 23, "matched_rule__identifier": "apache-2.0_scancode.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0", - "scancode-acknowledgment" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0 AND scancode-acknowledgment" }, { "Resource": "/test_archive.py", @@ -1597,10 +1555,8 @@ "start_line": 386, "end_line": 386, "matched_rule__identifier": "apache-2.0_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0" }, { "Resource": "/data/extract/TODO/org-jvnet-glassfish-comms-sipagent_1530.jar", @@ -1617,10 +1573,8 @@ "start_line": 386, "end_line": 386, "matched_rule__identifier": "cddl-1.0_6.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "cddl-1.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "cddl-1.0" }, { "Resource": "/data/extract/TODO/org-jvnet-glassfish-comms-sipagent_1530.jar", @@ -4525,10 +4479,8 @@ "start_line": 1141, "end_line": 1149, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff.expected", @@ -4545,10 +4497,8 @@ "start_line": 1151, "end_line": 1151, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff.expected", @@ -4565,10 +4515,8 @@ "start_line": 1152, "end_line": 1152, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff.expected", @@ -4585,10 +4533,8 @@ "start_line": 1153, "end_line": 1153, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff.expected", @@ -4605,10 +4551,8 @@ "start_line": 1770, "end_line": 1770, "matched_rule__identifier": "gpl_71.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff.expected", @@ -4907,10 +4851,8 @@ "start_line": 1118, "end_line": 1126, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff", @@ -4927,10 +4869,8 @@ "start_line": 1128, "end_line": 1128, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff", @@ -4947,10 +4887,8 @@ "start_line": 1129, "end_line": 1129, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff", @@ -4967,10 +4905,8 @@ "start_line": 1130, "end_line": 1130, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff", @@ -4987,10 +4923,8 @@ "start_line": 1681, "end_line": 1681, "matched_rule__identifier": "gpl_71.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/dnsmasq_2.63-1.diff", @@ -6504,10 +6438,8 @@ "start_line": 160, "end_line": 173, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6524,10 +6456,8 @@ "start_line": 405, "end_line": 418, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6544,10 +6474,8 @@ "start_line": 564, "end_line": 577, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6564,10 +6492,8 @@ "start_line": 652, "end_line": 665, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6584,10 +6510,8 @@ "start_line": 735, "end_line": 748, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6604,10 +6528,8 @@ "start_line": 824, "end_line": 837, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6624,10 +6546,8 @@ "start_line": 887, "end_line": 900, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -6644,10 +6564,8 @@ "start_line": 984, "end_line": 997, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected", @@ -7381,10 +7299,8 @@ "start_line": 131, "end_line": 144, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7401,10 +7317,8 @@ "start_line": 370, "end_line": 383, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7421,10 +7335,8 @@ "start_line": 505, "end_line": 518, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7441,10 +7353,8 @@ "start_line": 587, "end_line": 600, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7461,10 +7371,8 @@ "start_line": 658, "end_line": 671, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7481,10 +7389,8 @@ "start_line": 741, "end_line": 754, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7501,10 +7407,8 @@ "start_line": 798, "end_line": 811, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -7521,10 +7425,8 @@ "start_line": 889, "end_line": 902, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch", @@ -8210,10 +8112,8 @@ "start_line": 69, "end_line": 82, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/prepatches/webcore_videoplane.patch.expected", @@ -8362,10 +8262,8 @@ "start_line": 58, "end_line": 71, "matched_rule__identifier": "lgpl-2.0-plus_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/webkit/opensource/prepatches/webcore_videoplane.patch", @@ -8925,10 +8823,8 @@ "start_line": 102, "end_line": 114, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -8945,10 +8841,8 @@ "start_line": 2287, "end_line": 2287, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -8965,10 +8859,8 @@ "start_line": 2305, "end_line": 2313, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -8985,10 +8877,8 @@ "start_line": 2315, "end_line": 2315, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9005,10 +8895,8 @@ "start_line": 2402, "end_line": 2410, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9025,10 +8913,8 @@ "start_line": 2412, "end_line": 2412, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9045,10 +8931,8 @@ "start_line": 3174, "end_line": 3182, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9065,10 +8949,8 @@ "start_line": 3184, "end_line": 3184, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9085,10 +8967,8 @@ "start_line": 4119, "end_line": 4127, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9105,10 +8985,8 @@ "start_line": 4129, "end_line": 4129, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9125,10 +9003,8 @@ "start_line": 4175, "end_line": 4183, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9145,10 +9021,8 @@ "start_line": 4185, "end_line": 4185, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected", @@ -9381,10 +9255,8 @@ "start_line": 772, "end_line": 772, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9401,10 +9273,8 @@ "start_line": 801, "end_line": 801, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9421,10 +9291,8 @@ "start_line": 819, "end_line": 819, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9441,10 +9309,8 @@ "start_line": 876, "end_line": 878, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9461,10 +9327,8 @@ "start_line": 946, "end_line": 946, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9481,10 +9345,8 @@ "start_line": 974, "end_line": 974, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9501,10 +9363,8 @@ "start_line": 1029, "end_line": 1031, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9521,10 +9381,8 @@ "start_line": 1920, "end_line": 1922, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9541,10 +9399,8 @@ "start_line": 2814, "end_line": 2816, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9561,10 +9417,8 @@ "start_line": 3370, "end_line": 3372, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9581,10 +9435,8 @@ "start_line": 3779, "end_line": 3781, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9601,10 +9453,8 @@ "start_line": 4093, "end_line": 4095, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9621,10 +9471,8 @@ "start_line": 4262, "end_line": 4264, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9641,10 +9489,8 @@ "start_line": 4682, "end_line": 4684, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9661,10 +9507,8 @@ "start_line": 5058, "end_line": 5060, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9681,10 +9525,8 @@ "start_line": 6100, "end_line": 6102, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9701,10 +9543,8 @@ "start_line": 6684, "end_line": 6686, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9721,10 +9561,8 @@ "start_line": 7445, "end_line": 7445, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9741,10 +9579,8 @@ "start_line": 7475, "end_line": 7477, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9761,10 +9597,8 @@ "start_line": 7575, "end_line": 7577, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9781,10 +9615,8 @@ "start_line": 7872, "end_line": 7874, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9801,10 +9633,8 @@ "start_line": 8400, "end_line": 8402, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9821,10 +9651,8 @@ "start_line": 8513, "end_line": 8515, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9841,10 +9669,8 @@ "start_line": 8621, "end_line": 8623, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9861,10 +9687,8 @@ "start_line": 8728, "end_line": 8730, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9881,10 +9705,8 @@ "start_line": 9787, "end_line": 9789, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9901,10 +9723,8 @@ "start_line": 10453, "end_line": 10455, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9921,10 +9741,8 @@ "start_line": 10747, "end_line": 10749, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9941,10 +9759,8 @@ "start_line": 11336, "end_line": 11338, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9961,10 +9777,8 @@ "start_line": 11513, "end_line": 11515, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -9981,10 +9795,8 @@ "start_line": 11632, "end_line": 11634, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected", @@ -12422,10 +12234,8 @@ "start_line": 695, "end_line": 695, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12442,10 +12252,8 @@ "start_line": 718, "end_line": 718, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12462,10 +12270,8 @@ "start_line": 736, "end_line": 736, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12482,10 +12288,8 @@ "start_line": 787, "end_line": 789, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12502,10 +12306,8 @@ "start_line": 857, "end_line": 857, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12522,10 +12324,8 @@ "start_line": 885, "end_line": 885, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12542,10 +12342,8 @@ "start_line": 928, "end_line": 930, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12562,10 +12360,8 @@ "start_line": 1813, "end_line": 1815, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12582,10 +12378,8 @@ "start_line": 2701, "end_line": 2703, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12602,10 +12396,8 @@ "start_line": 3251, "end_line": 3253, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12622,10 +12414,8 @@ "start_line": 3654, "end_line": 3656, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12642,10 +12432,8 @@ "start_line": 3962, "end_line": 3964, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12662,10 +12450,8 @@ "start_line": 4125, "end_line": 4127, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12682,10 +12468,8 @@ "start_line": 4539, "end_line": 4541, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12702,10 +12486,8 @@ "start_line": 4909, "end_line": 4911, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12722,10 +12504,8 @@ "start_line": 5945, "end_line": 5947, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12742,10 +12522,8 @@ "start_line": 6523, "end_line": 6525, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12762,10 +12540,8 @@ "start_line": 7284, "end_line": 7284, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12782,10 +12558,8 @@ "start_line": 7308, "end_line": 7310, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12802,10 +12576,8 @@ "start_line": 7402, "end_line": 7404, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12822,10 +12594,8 @@ "start_line": 7693, "end_line": 7695, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12842,10 +12612,8 @@ "start_line": 8215, "end_line": 8217, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12862,10 +12630,8 @@ "start_line": 8322, "end_line": 8324, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12882,10 +12648,8 @@ "start_line": 8424, "end_line": 8426, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12902,10 +12666,8 @@ "start_line": 8525, "end_line": 8527, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12922,10 +12684,8 @@ "start_line": 9578, "end_line": 9580, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12942,10 +12702,8 @@ "start_line": 10238, "end_line": 10240, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12962,10 +12720,8 @@ "start_line": 10526, "end_line": 10528, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -12982,10 +12738,8 @@ "start_line": 11109, "end_line": 11111, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -13002,10 +12756,8 @@ "start_line": 11280, "end_line": 11282, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -13022,10 +12774,8 @@ "start_line": 11375, "end_line": 11377, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff", @@ -16120,10 +15870,8 @@ "start_line": 34, "end_line": 35, "matched_rule__identifier": "curl.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "curl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "curl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19x3_board.patch", @@ -16140,10 +15888,8 @@ "start_line": 34, "end_line": 35, "matched_rule__identifier": "gpl-2.0-plus_6.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19x3_board.patch", @@ -16244,10 +15990,8 @@ "start_line": 99, "end_line": 111, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16264,10 +16008,8 @@ "start_line": 2239, "end_line": 2239, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16284,10 +16026,8 @@ "start_line": 2273, "end_line": 2281, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16304,10 +16044,8 @@ "start_line": 2283, "end_line": 2283, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16324,10 +16062,8 @@ "start_line": 3023, "end_line": 3031, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16344,10 +16080,8 @@ "start_line": 3033, "end_line": 3033, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16364,10 +16098,8 @@ "start_line": 3122, "end_line": 3130, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16384,10 +16116,8 @@ "start_line": 3132, "end_line": 3132, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16404,10 +16134,8 @@ "start_line": 4066, "end_line": 4074, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16424,10 +16152,8 @@ "start_line": 4076, "end_line": 4076, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16444,10 +16170,8 @@ "start_line": 4121, "end_line": 4129, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16464,10 +16188,8 @@ "start_line": 4131, "end_line": 4131, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected", @@ -16610,10 +16332,8 @@ "start_line": 42, "end_line": 66, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/motorola_rootdisk.c.patch", @@ -16630,10 +16350,8 @@ "start_line": 204, "end_line": 204, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/motorola_rootdisk.c.patch", @@ -16821,10 +16539,8 @@ "start_line": 101, "end_line": 125, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/fan_ctrl.patch.expected", @@ -16841,10 +16557,8 @@ "start_line": 138, "end_line": 138, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/fan_ctrl.patch.expected", @@ -16888,10 +16602,8 @@ "start_line": 42, "end_line": 66, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nand.patch", @@ -16908,10 +16620,8 @@ "start_line": 364, "end_line": 364, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nand.patch", @@ -16988,10 +16698,8 @@ "start_line": 104, "end_line": 116, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17008,10 +16716,8 @@ "start_line": 2289, "end_line": 2289, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17028,10 +16734,8 @@ "start_line": 2308, "end_line": 2316, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17048,10 +16752,8 @@ "start_line": 2318, "end_line": 2318, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17068,10 +16770,8 @@ "start_line": 2406, "end_line": 2414, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17088,10 +16788,8 @@ "start_line": 2416, "end_line": 2416, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17108,10 +16806,8 @@ "start_line": 3180, "end_line": 3188, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17128,10 +16824,8 @@ "start_line": 3190, "end_line": 3190, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17148,10 +16842,8 @@ "start_line": 4126, "end_line": 4134, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17168,10 +16860,8 @@ "start_line": 4136, "end_line": 4136, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17188,10 +16878,8 @@ "start_line": 4183, "end_line": 4191, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17208,10 +16896,8 @@ "start_line": 4193, "end_line": 4193, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected", @@ -17687,10 +17373,8 @@ "start_line": 51, "end_line": 52, "matched_rule__identifier": "curl.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "curl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "curl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19x3_board.patch.expected", @@ -17707,10 +17391,8 @@ "start_line": 51, "end_line": 52, "matched_rule__identifier": "gpl-2.0-plus_6.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19x3_board.patch.expected", @@ -17985,10 +17667,8 @@ "start_line": 72, "end_line": 96, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/fan_ctrl.patch", @@ -18005,10 +17685,8 @@ "start_line": 109, "end_line": 109, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/fan_ctrl.patch", @@ -18079,10 +17757,8 @@ "start_line": 1569, "end_line": 1569, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch.expected", @@ -18099,10 +17775,8 @@ "start_line": 3001, "end_line": 3001, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch.expected", @@ -18119,10 +17793,8 @@ "start_line": 3592, "end_line": 3592, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch.expected", @@ -18139,10 +17811,8 @@ "start_line": 4625, "end_line": 4625, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch.expected", @@ -18282,10 +17952,8 @@ "start_line": 88, "end_line": 100, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18302,10 +17970,8 @@ "start_line": 2273, "end_line": 2273, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18322,10 +17988,8 @@ "start_line": 2286, "end_line": 2294, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18342,10 +18006,8 @@ "start_line": 2296, "end_line": 2296, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18362,10 +18024,8 @@ "start_line": 2378, "end_line": 2386, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18382,10 +18042,8 @@ "start_line": 2388, "end_line": 2388, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18402,10 +18060,8 @@ "start_line": 3141, "end_line": 3149, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18422,10 +18078,8 @@ "start_line": 3151, "end_line": 3151, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18442,10 +18096,8 @@ "start_line": 4081, "end_line": 4089, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18462,10 +18114,8 @@ "start_line": 4091, "end_line": 4091, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18482,10 +18132,8 @@ "start_line": 4132, "end_line": 4140, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18502,10 +18150,8 @@ "start_line": 4142, "end_line": 4142, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch", @@ -18834,10 +18480,8 @@ "start_line": 1438, "end_line": 1438, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch", @@ -18854,10 +18498,8 @@ "start_line": 2870, "end_line": 2870, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch", @@ -18874,10 +18516,8 @@ "start_line": 3461, "end_line": 3461, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch", @@ -18894,10 +18534,8 @@ "start_line": 4494, "end_line": 4494, "matched_rule__identifier": "gpl_62.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch", @@ -18968,10 +18606,8 @@ "start_line": 77, "end_line": 77, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/localversion.patch", @@ -19135,10 +18771,8 @@ "start_line": 59, "end_line": 83, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nand.patch.expected", @@ -19155,10 +18789,8 @@ "start_line": 381, "end_line": 381, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nand.patch.expected", @@ -19229,10 +18861,8 @@ "start_line": 82, "end_line": 94, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19249,10 +18879,8 @@ "start_line": 2222, "end_line": 2222, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19269,10 +18897,8 @@ "start_line": 2244, "end_line": 2252, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19289,10 +18915,8 @@ "start_line": 2254, "end_line": 2254, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19309,10 +18933,8 @@ "start_line": 2988, "end_line": 2996, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19329,10 +18951,8 @@ "start_line": 2998, "end_line": 2998, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19349,10 +18969,8 @@ "start_line": 3081, "end_line": 3089, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19369,10 +18987,8 @@ "start_line": 3091, "end_line": 3091, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19389,10 +19005,8 @@ "start_line": 4019, "end_line": 4027, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19409,10 +19023,8 @@ "start_line": 4029, "end_line": 4029, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19429,10 +19041,8 @@ "start_line": 4068, "end_line": 4076, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19449,10 +19059,8 @@ "start_line": 4078, "end_line": 4078, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.4.patch", @@ -19811,10 +19419,8 @@ "start_line": 10, "end_line": 22, "matched_rule__identifier": "gpl-2.0_34.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx.h.patch", @@ -19969,10 +19575,8 @@ "start_line": 85, "end_line": 97, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -19989,10 +19593,8 @@ "start_line": 2270, "end_line": 2270, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20009,10 +19611,8 @@ "start_line": 2282, "end_line": 2290, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20029,10 +19629,8 @@ "start_line": 2292, "end_line": 2292, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20049,10 +19647,8 @@ "start_line": 2373, "end_line": 2381, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20069,10 +19665,8 @@ "start_line": 2383, "end_line": 2383, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20089,10 +19683,8 @@ "start_line": 3133, "end_line": 3141, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20109,10 +19701,8 @@ "start_line": 3143, "end_line": 3143, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20129,10 +19719,8 @@ "start_line": 4072, "end_line": 4080, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20149,10 +19737,8 @@ "start_line": 4082, "end_line": 4082, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20169,10 +19755,8 @@ "start_line": 4122, "end_line": 4130, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20189,10 +19773,8 @@ "start_line": 4132, "end_line": 4132, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch", @@ -20335,10 +19917,8 @@ "start_line": 15, "end_line": 27, "matched_rule__identifier": "gpl-2.0_34.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx.h.patch.expected", @@ -20445,10 +20025,8 @@ "start_line": 105, "end_line": 117, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20465,10 +20043,8 @@ "start_line": 2290, "end_line": 2290, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20485,10 +20061,8 @@ "start_line": 2309, "end_line": 2317, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20505,10 +20079,8 @@ "start_line": 2319, "end_line": 2319, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20525,10 +20097,8 @@ "start_line": 2407, "end_line": 2415, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20545,10 +20115,8 @@ "start_line": 2417, "end_line": 2417, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20565,10 +20133,8 @@ "start_line": 3182, "end_line": 3190, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20585,10 +20151,8 @@ "start_line": 3192, "end_line": 3192, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20605,10 +20169,8 @@ "start_line": 4128, "end_line": 4136, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20625,10 +20187,8 @@ "start_line": 4138, "end_line": 4138, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20645,10 +20205,8 @@ "start_line": 4185, "end_line": 4193, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20665,10 +20223,8 @@ "start_line": 4195, "end_line": 4195, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected", @@ -20811,10 +20367,8 @@ "start_line": 85, "end_line": 85, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/i2c-stm.c.patch2.expected", @@ -20915,10 +20469,8 @@ "start_line": 82, "end_line": 82, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/pmb.c.patch", @@ -21019,10 +20571,8 @@ "start_line": 56, "end_line": 80, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nor.patch.expected", @@ -21039,10 +20589,8 @@ "start_line": 219, "end_line": 219, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nor.patch.expected", @@ -21560,10 +21108,8 @@ "start_line": 39, "end_line": 63, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nor.patch", @@ -21580,10 +21126,8 @@ "start_line": 202, "end_line": 202, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/vip19xx_nor.patch", @@ -21702,10 +21246,8 @@ "start_line": 2317, "end_line": 2319, "matched_rule__identifier": "gpl_15.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21722,11 +21264,8 @@ "start_line": 2318, "end_line": 2318, "matched_rule__identifier": "lgpl-2.1_released.RULE", - "matched_rule__license_choice": true, - "matched_rule__licenses": [ - "gpl-2.0", - "lgpl-2.1" - ] + "matched_rule__license_choice": "y", + "matched_rule__licenses": "gpl-2.0 OR lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21743,11 +21282,8 @@ "start_line": 2318, "end_line": 2318, "matched_rule__identifier": "lgpl-2.1_released.RULE", - "matched_rule__license_choice": true, - "matched_rule__licenses": [ - "gpl-2.0", - "lgpl-2.1" - ] + "matched_rule__license_choice": "y", + "matched_rule__licenses": "gpl-2.0 OR lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21764,10 +21300,8 @@ "start_line": 2321, "end_line": 2321, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21784,10 +21318,8 @@ "start_line": 2321, "end_line": 2321, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21804,10 +21336,8 @@ "start_line": 2712, "end_line": 2714, "matched_rule__identifier": "lgpl-2.1_22.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21824,10 +21354,8 @@ "start_line": 2716, "end_line": 2716, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21844,10 +21372,8 @@ "start_line": 2716, "end_line": 2716, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21864,10 +21390,8 @@ "start_line": 2787, "end_line": 2789, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21884,10 +21408,8 @@ "start_line": 3202, "end_line": 3202, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21904,10 +21426,8 @@ "start_line": 3205, "end_line": 3205, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21924,10 +21444,8 @@ "start_line": 3205, "end_line": 3205, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21944,10 +21462,8 @@ "start_line": 3246, "end_line": 3248, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21964,10 +21480,8 @@ "start_line": 3588, "end_line": 3588, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -21984,10 +21498,8 @@ "start_line": 3591, "end_line": 3591, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22004,10 +21516,8 @@ "start_line": 3591, "end_line": 3591, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22024,10 +21534,8 @@ "start_line": 3648, "end_line": 3650, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22044,10 +21552,8 @@ "start_line": 6004, "end_line": 6004, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22064,10 +21570,8 @@ "start_line": 6024, "end_line": 6024, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22084,10 +21588,8 @@ "start_line": 6027, "end_line": 6027, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22104,10 +21606,8 @@ "start_line": 6027, "end_line": 6027, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22124,10 +21624,8 @@ "start_line": 6067, "end_line": 6069, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22144,10 +21642,8 @@ "start_line": 13609, "end_line": 13609, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22164,10 +21660,8 @@ "start_line": 13612, "end_line": 13612, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22184,10 +21678,8 @@ "start_line": 13612, "end_line": 13612, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22204,10 +21696,8 @@ "start_line": 14520, "end_line": 14520, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22224,10 +21714,8 @@ "start_line": 14523, "end_line": 14523, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22244,10 +21732,8 @@ "start_line": 14523, "end_line": 14523, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22264,10 +21750,8 @@ "start_line": 14549, "end_line": 14551, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22284,10 +21768,8 @@ "start_line": 14928, "end_line": 14928, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22304,10 +21786,8 @@ "start_line": 14931, "end_line": 14931, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22324,10 +21804,8 @@ "start_line": 14931, "end_line": 14931, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22344,10 +21822,8 @@ "start_line": 14967, "end_line": 14969, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22364,10 +21840,8 @@ "start_line": 15226, "end_line": 15226, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22384,10 +21858,8 @@ "start_line": 15229, "end_line": 15229, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22404,10 +21876,8 @@ "start_line": 15229, "end_line": 15229, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22424,10 +21894,8 @@ "start_line": 15264, "end_line": 15266, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22444,10 +21912,8 @@ "start_line": 15516, "end_line": 15516, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22464,10 +21930,8 @@ "start_line": 15519, "end_line": 15519, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22484,10 +21948,8 @@ "start_line": 15519, "end_line": 15519, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22504,10 +21966,8 @@ "start_line": 15557, "end_line": 15559, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22524,10 +21984,8 @@ "start_line": 15703, "end_line": 15703, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22544,10 +22002,8 @@ "start_line": 15706, "end_line": 15706, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22564,10 +22020,8 @@ "start_line": 15706, "end_line": 15706, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22584,10 +22038,8 @@ "start_line": 15752, "end_line": 15752, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22604,10 +22056,8 @@ "start_line": 15755, "end_line": 15755, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22624,10 +22074,8 @@ "start_line": 15755, "end_line": 15755, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22644,10 +22092,8 @@ "start_line": 15805, "end_line": 15807, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22664,10 +22110,8 @@ "start_line": 15868, "end_line": 15868, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22684,10 +22128,8 @@ "start_line": 15871, "end_line": 15871, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22704,10 +22146,8 @@ "start_line": 15871, "end_line": 15871, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22724,10 +22164,8 @@ "start_line": 15914, "end_line": 15916, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22744,10 +22182,8 @@ "start_line": 16133, "end_line": 16133, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22764,10 +22200,8 @@ "start_line": 16136, "end_line": 16136, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22784,10 +22218,8 @@ "start_line": 16136, "end_line": 16136, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22804,10 +22236,8 @@ "start_line": 16181, "end_line": 16203, "matched_rule__identifier": "bsd-new_19.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "bsd-new" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "bsd-new" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22824,10 +22254,8 @@ "start_line": 16356, "end_line": 16356, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22844,10 +22272,8 @@ "start_line": 16359, "end_line": 16359, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22864,10 +22290,8 @@ "start_line": 16359, "end_line": 16359, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22884,10 +22308,8 @@ "start_line": 16388, "end_line": 16390, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22904,10 +22326,8 @@ "start_line": 16933, "end_line": 16933, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22924,10 +22344,8 @@ "start_line": 16936, "end_line": 16936, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22944,10 +22362,8 @@ "start_line": 16936, "end_line": 16936, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22964,10 +22380,8 @@ "start_line": 16983, "end_line": 16985, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -22984,10 +22398,8 @@ "start_line": 17022, "end_line": 17022, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -23004,10 +22416,8 @@ "start_line": 17025, "end_line": 17025, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -23024,10 +22434,8 @@ "start_line": 17025, "end_line": 17025, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -23044,10 +22452,8 @@ "start_line": 17056, "end_line": 17056, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -23064,10 +22470,8 @@ "start_line": 17059, "end_line": 17059, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -23084,10 +22488,8 @@ "start_line": 17059, "end_line": 17059, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected", @@ -23938,10 +23340,8 @@ "start_line": 87, "end_line": 99, "matched_rule__identifier": "gpl-2.0-plus_49.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -23958,10 +23358,8 @@ "start_line": 2272, "end_line": 2272, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -23978,10 +23376,8 @@ "start_line": 2285, "end_line": 2293, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -23998,10 +23394,8 @@ "start_line": 2295, "end_line": 2295, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24018,10 +23412,8 @@ "start_line": 2377, "end_line": 2385, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24038,10 +23430,8 @@ "start_line": 2387, "end_line": 2387, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24058,10 +23448,8 @@ "start_line": 3139, "end_line": 3147, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24078,10 +23466,8 @@ "start_line": 3149, "end_line": 3149, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24098,10 +23484,8 @@ "start_line": 4079, "end_line": 4087, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24118,10 +23502,8 @@ "start_line": 4089, "end_line": 4089, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24138,10 +23520,8 @@ "start_line": 4130, "end_line": 4138, "matched_rule__identifier": "gpl-2.0-plus_3.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0-plus" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0-plus" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24158,10 +23538,8 @@ "start_line": 4140, "end_line": 4140, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch", @@ -24367,10 +23745,8 @@ "start_line": 80, "end_line": 80, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24408,10 +23784,8 @@ "start_line": 2288, "end_line": 2290, "matched_rule__identifier": "gpl_15.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24428,11 +23802,8 @@ "start_line": 2289, "end_line": 2289, "matched_rule__identifier": "lgpl-2.1_released.RULE", - "matched_rule__license_choice": true, - "matched_rule__licenses": [ - "gpl-2.0", - "lgpl-2.1" - ] + "matched_rule__license_choice": "y", + "matched_rule__licenses": "gpl-2.0 OR lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24449,11 +23820,8 @@ "start_line": 2289, "end_line": 2289, "matched_rule__identifier": "lgpl-2.1_released.RULE", - "matched_rule__license_choice": true, - "matched_rule__licenses": [ - "gpl-2.0", - "lgpl-2.1" - ] + "matched_rule__license_choice": "y", + "matched_rule__licenses": "gpl-2.0 OR lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24470,10 +23838,8 @@ "start_line": 2292, "end_line": 2292, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24490,10 +23856,8 @@ "start_line": 2292, "end_line": 2292, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24510,10 +23874,8 @@ "start_line": 2665, "end_line": 2667, "matched_rule__identifier": "lgpl-2.1_22.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24530,10 +23892,8 @@ "start_line": 2669, "end_line": 2669, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24550,10 +23910,8 @@ "start_line": 2669, "end_line": 2669, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24570,10 +23928,8 @@ "start_line": 2734, "end_line": 2736, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24590,10 +23946,8 @@ "start_line": 3143, "end_line": 3143, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24610,10 +23964,8 @@ "start_line": 3146, "end_line": 3146, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24630,10 +23982,8 @@ "start_line": 3146, "end_line": 3146, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24650,10 +24000,8 @@ "start_line": 3181, "end_line": 3183, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24670,10 +24018,8 @@ "start_line": 3517, "end_line": 3517, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24690,10 +24036,8 @@ "start_line": 3520, "end_line": 3520, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24710,10 +24054,8 @@ "start_line": 3520, "end_line": 3520, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24730,10 +24072,8 @@ "start_line": 3571, "end_line": 3573, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24750,10 +24090,8 @@ "start_line": 5927, "end_line": 5927, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24770,10 +24108,8 @@ "start_line": 5941, "end_line": 5941, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24790,10 +24126,8 @@ "start_line": 5944, "end_line": 5944, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24810,10 +24144,8 @@ "start_line": 5944, "end_line": 5944, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24830,10 +24162,8 @@ "start_line": 5978, "end_line": 5980, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24850,10 +24180,8 @@ "start_line": 13514, "end_line": 13514, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24870,10 +24198,8 @@ "start_line": 13517, "end_line": 13517, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24890,10 +24216,8 @@ "start_line": 13517, "end_line": 13517, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24910,10 +24234,8 @@ "start_line": 14419, "end_line": 14419, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24930,10 +24252,8 @@ "start_line": 14422, "end_line": 14422, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24950,10 +24270,8 @@ "start_line": 14422, "end_line": 14422, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24970,10 +24288,8 @@ "start_line": 14442, "end_line": 14444, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -24990,10 +24306,8 @@ "start_line": 14815, "end_line": 14815, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25010,10 +24324,8 @@ "start_line": 14818, "end_line": 14818, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25030,10 +24342,8 @@ "start_line": 14818, "end_line": 14818, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25050,10 +24360,8 @@ "start_line": 14848, "end_line": 14850, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25070,10 +24378,8 @@ "start_line": 15101, "end_line": 15101, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25090,10 +24396,8 @@ "start_line": 15104, "end_line": 15104, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25110,10 +24414,8 @@ "start_line": 15104, "end_line": 15104, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25130,10 +24432,8 @@ "start_line": 15133, "end_line": 15135, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25150,10 +24450,8 @@ "start_line": 15379, "end_line": 15379, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25170,10 +24468,8 @@ "start_line": 15382, "end_line": 15382, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25190,10 +24486,8 @@ "start_line": 15382, "end_line": 15382, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25210,10 +24504,8 @@ "start_line": 15414, "end_line": 15416, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25230,10 +24522,8 @@ "start_line": 15554, "end_line": 15554, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25250,10 +24540,8 @@ "start_line": 15557, "end_line": 15557, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25270,10 +24558,8 @@ "start_line": 15557, "end_line": 15557, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25290,10 +24576,8 @@ "start_line": 15597, "end_line": 15597, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25310,10 +24594,8 @@ "start_line": 15600, "end_line": 15600, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25330,10 +24612,8 @@ "start_line": 15600, "end_line": 15600, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25350,10 +24630,8 @@ "start_line": 15644, "end_line": 15646, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25370,10 +24648,8 @@ "start_line": 15701, "end_line": 15701, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25390,10 +24666,8 @@ "start_line": 15704, "end_line": 15704, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25410,10 +24684,8 @@ "start_line": 15704, "end_line": 15704, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25430,10 +24702,8 @@ "start_line": 15741, "end_line": 15743, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25450,10 +24720,8 @@ "start_line": 15954, "end_line": 15954, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25470,10 +24738,8 @@ "start_line": 15957, "end_line": 15957, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25490,10 +24756,8 @@ "start_line": 15957, "end_line": 15957, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25510,10 +24774,8 @@ "start_line": 15996, "end_line": 16018, "matched_rule__identifier": "bsd-new_19.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "bsd-new" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "bsd-new" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25530,10 +24792,8 @@ "start_line": 16165, "end_line": 16165, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25550,10 +24810,8 @@ "start_line": 16168, "end_line": 16168, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25570,10 +24828,8 @@ "start_line": 16168, "end_line": 16168, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25590,10 +24846,8 @@ "start_line": 16191, "end_line": 16193, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25610,10 +24864,8 @@ "start_line": 16730, "end_line": 16730, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25630,10 +24882,8 @@ "start_line": 16733, "end_line": 16733, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25650,10 +24900,8 @@ "start_line": 16733, "end_line": 16733, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25670,10 +24918,8 @@ "start_line": 16774, "end_line": 16776, "matched_rule__identifier": "gpl-2.0_8.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25690,10 +24936,8 @@ "start_line": 16807, "end_line": 16807, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25710,10 +24954,8 @@ "start_line": 16810, "end_line": 16810, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25730,10 +24972,8 @@ "start_line": 16810, "end_line": 16810, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25750,10 +24990,8 @@ "start_line": 16835, "end_line": 16835, "matched_rule__identifier": "lgpl-2.1_36.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl-2.1" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl-2.1" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25770,10 +25008,8 @@ "start_line": 16838, "end_line": 16838, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -25790,10 +25026,8 @@ "start_line": 16838, "end_line": 16838, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch", @@ -26476,10 +25710,8 @@ "start_line": 59, "end_line": 83, "matched_rule__identifier": "dual-bsd-gpl.LICENSE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "dual-bsd-gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "dual-bsd-gpl" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/motorola_rootdisk.c.patch.expected", @@ -26496,10 +25728,8 @@ "start_line": 221, "end_line": 221, "matched_rule__identifier": "gpl-2.0_43.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/patch/patches/misc/linux-st710x/patches/motorola_rootdisk.c.patch.expected", @@ -29427,10 +28657,8 @@ "start_line": 10, "end_line": 10, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/archive/TODO/rpmticket_6327/gettext-runtime-32bit-0.17-61.40.x86_64.rpm", @@ -29447,10 +28675,8 @@ "start_line": 10, "end_line": 10, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/archive/TODO/rpmticket_6327/gettext-runtime-32bit-0.17-61.40.x86_64.rpm", @@ -29467,10 +28693,8 @@ "start_line": 98, "end_line": 98, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/archive/TODO/rpmticket_6327/gettext-runtime-32bit-0.17-61.40.x86_64.rpm", @@ -29487,10 +28711,8 @@ "start_line": 98, "end_line": 98, "matched_rule__identifier": "gpl_61.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/archive/TODO/rpmticket_6327/gettext-runtime-32bit-0.17-61.40.x86_64.rpm", @@ -30529,10 +29751,8 @@ "start_line": 135, "end_line": 135, "matched_rule__identifier": "gpl-2.0_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/archive/cpio/elfinfo-1.0-1.fc9.src.cpio", @@ -30588,10 +29808,8 @@ "start_line": 135, "end_line": 135, "matched_rule__identifier": "gpl-2.0_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/archive/cpio/cpio_trailing.cpio", @@ -30689,10 +29907,8 @@ "start_line": 138, "end_line": 138, "matched_rule__identifier": "gpl-2.0_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/archive/cpio/cpio_broken.cpio", @@ -35135,10 +34351,8 @@ "start_line": 9, "end_line": 9, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/archive/rpm/broken.rpm", @@ -35273,10 +34487,8 @@ "start_line": 9, "end_line": 9, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/archive/rpm/renamed.rpm", @@ -35340,10 +34552,8 @@ "start_line": 9, "end_line": 9, "matched_rule__identifier": "lgpl_20.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "lgpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "lgpl" }, { "Resource": "/data/archive/rpm/python-glc-0.7.1-1.src.rpm", @@ -35407,10 +34617,8 @@ "start_line": 8, "end_line": 8, "matched_rule__identifier": "gpl-2.0_75.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/archive/rpm/elfinfo-1.0-1.fc9.src.rpm", @@ -35474,10 +34682,8 @@ "start_line": 8, "end_line": 8, "matched_rule__identifier": "gpl-2.0_75.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl-2.0" }, { "Resource": "/data/archive/rpm/rpm_trailing.rpm", @@ -36792,10 +35998,8 @@ "start_line": 1619, "end_line": 1619, "matched_rule__identifier": "gpl_63.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "gpl" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "gpl" }, { "Resource": "/data/archive/bz2/bzip2_with_gentoo_trailing_data/sys-libs%253Azlib-1.2.3-r1%7E1.tbz2", @@ -36812,10 +36016,8 @@ "start_line": 2139, "end_line": 2139, "matched_rule__identifier": "zlib_10.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "zlib" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "zlib" }, { "Resource": "/data/archive/bz2/bzip2_with_gentoo_trailing_data/sys-libs%253Azlib-1.2.3-r1%7E1.tbz2", diff --git a/tests/formattedcode/data/csv/flatten_scan/minimal.json-expected b/tests/formattedcode/data/csv/flatten_scan/minimal.json-expected index 0d33ece20da..d8a72a7d78d 100644 --- a/tests/formattedcode/data/csv/flatten_scan/minimal.json-expected +++ b/tests/formattedcode/data/csv/flatten_scan/minimal.json-expected @@ -18,10 +18,8 @@ "start_line": 4, "end_line": 5, "matched_rule__identifier": "apache-2.0_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0" }, { "Resource": "/srp_vfy.c", @@ -38,10 +36,8 @@ "start_line": 4, "end_line": 4, "matched_rule__identifier": "openssl-ssleay_2.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "openssl-ssleay" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "openssl-ssleay" }, { "Resource": "/srp_vfy.c", @@ -74,10 +70,8 @@ "start_line": 4, "end_line": 5, "matched_rule__identifier": "apache-2.0_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0" }, { "Resource": "/srp_lib.c", @@ -94,10 +88,8 @@ "start_line": 4, "end_line": 4, "matched_rule__identifier": "openssl-ssleay_2.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "openssl-ssleay" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "openssl-ssleay" }, { "Resource": "/srp_lib.c", diff --git a/tests/formattedcode/data/csv/flatten_scan/package_license_value_null.json-expected b/tests/formattedcode/data/csv/flatten_scan/package_license_value_null.json-expected index 1955d928eed..e9968fb1e4b 100644 --- a/tests/formattedcode/data/csv/flatten_scan/package_license_value_null.json-expected +++ b/tests/formattedcode/data/csv/flatten_scan/package_license_value_null.json-expected @@ -18,10 +18,8 @@ "start_line": 4, "end_line": 5, "matched_rule__identifier": "apache-2.0_23.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "apache-2.0" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "apache-2.0" }, { "Resource": "/srp_vfy.c", @@ -38,10 +36,8 @@ "start_line": 4, "end_line": 4, "matched_rule__identifier": "openssl-ssleay_2.RULE", - "matched_rule__license_choice": false, - "matched_rule__licenses": [ - "openssl-ssleay" - ] + "matched_rule__license_choice": "", + "matched_rule__licenses": "openssl-ssleay" }, { "Resource": "/srp_vfy.c", diff --git a/tests/formattedcode/data/csv/livescan/expected.csv b/tests/formattedcode/data/csv/livescan/expected.csv index aeaaa9d531d..ed8e9c06718 100644 --- a/tests/formattedcode/data/csv/livescan/expected.csv +++ b/tests/formattedcode/data/csv/livescan/expected.csv @@ -1,19 +1,19 @@ -Resource,type,name,base_name,extension,date,size,sha1,md5,files_count,mime_type,file_type,programming_language,is_binary,is_text,is_archive,is_media,is_source,is_script,scan_errors,license__key,license__score,license__short_name,license__category,license__owner,license__homepage_url,license__text_url,license__reference_url,license__spdx_license_key,license__spdx_url,start_line,end_line,matched_rule__identifier,matched_rule__license_choice,matched_rule__licenses,copyright,copyright_holder,email,url,package__type,package__name,package__version,package__primary_language,package__summary,package__description,package__size,package__release_date,package__authors,package__homepage_url,package__notes,package__download_urls,package__bug_tracking_url,package__vcs_repository,package__copyright_top_level -/package.json,file,package.json,package,.json,2017-10-03,2200,918376afce796ef90eeda1d6695f2289c90491ac,1f66239a9b850c5e60a9382dbe2162d2,,text/plain,"ASCII text, with very long lines",JSON,False,True,False,False,True,False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,mit,15.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit_27.RULE,False,[u'mit'],,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,mit,100.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit.LICENSE,False,[u'mit'],,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,Copyright (c) 2012 LearnBoost < tj@learnboost.com>,,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,,LearnBoost <,,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,12,12,,,,,,tj@learnboost.com,,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,16,16,,,,,,,https://github.com/visionmedia/node-cookie-signature.git,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,27,27,,,,,,,https://github.com/visionmedia/node-cookie-signature/issues,,,,,,,,,,,,,,, -/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TJ Holowaychuk,,,https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.3.tgz,,, -/json2csv.rb,file,json2csv.rb,json2csv,.rb,2017-10-03,1014,92a83e5f8566bee7c83cf798c1b8912d609f56e0,380b7a5f483db7ace853b8f9dca5bfec,,text/x-python,"Python script, ASCII text executable",Ruby,False,True,False,False,True,True,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,apache-2.0,89.53,Apache 2.0,Permissive,Apache Software Foundation,http://www.apache.org/licenses/,http://www.apache.org/licenses/LICENSE-2.0,https://enterprise.dejacode.com/urn/urn:dje:license:apache-2.0,Apache-2.0,https://spdx.org/licenses/Apache-2.0,5,14,apache-2.0_7.RULE,False,[u'apache-2.0'],,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,Copyright (c) 2017 nexB Inc. and others.,,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,,nexB Inc. and others.,,,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,http://nexb.com/,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,https://github.com/nexB/scancode-toolkit/,,,,,,,,,,,,,,, -/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,10,10,,,,,,,http://apache.org/licenses/LICENSE-2.0,,,,,,,,,,,,,,, -/license,file,license,license,,2017-10-03,679,75c5490a718ddd45e40e0cc7ce0c756abc373123,b965a762efb9421cf1bf4405f336e278,,text/plain,ASCII text,,False,True,False,False,False,False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -/license,,,,,,,,,,,,,,,,,,,,gpl-2.0-plus,100.00,GPL 2.0 or later,Copyleft,Free Software Foundation (FSF),http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,https://enterprise.dejacode.com/urn/urn:dje:license:gpl-2.0-plus,GPL-2.0+,https://spdx.org/licenses/GPL-2.0,1,12,gpl-2.0-plus.LICENSE,False,[u'gpl-2.0-plus'],,,,,,,,,,,,,,,,,,, +Resource,type,name,base_name,extension,size,date,sha1,md5,mime_type,file_type,programming_language,is_binary,is_text,is_archive,is_media,is_source,is_script,files_count,dirs_count,size_count,scan_errors,license__key,license__score,license__short_name,license__category,license__owner,license__homepage_url,license__text_url,license__reference_url,license__spdx_license_key,license__spdx_url,start_line,end_line,matched_rule__identifier,matched_rule__license_choice,matched_rule__licenses,copyright,copyright_holder,email,url,package__type,package__name,package__version,package__primary_language,package__summary,package__description,package__size,package__release_date,package__authors,package__homepage_url,package__notes,package__download_urls,package__bug_tracking_url,package__vcs_repository,package__copyright_top_level +/json2csv.rb,file,json2csv.rb,json2csv,.rb,1014,2017-10-03,92a83e5f8566bee7c83cf798c1b8912d609f56e0,380b7a5f483db7ace853b8f9dca5bfec,text/x-python,"Python script, ASCII text executable",Ruby,False,True,False,False,True,True,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,apache-2.0,89.53,Apache 2.0,Permissive,Apache Software Foundation,http://www.apache.org/licenses/,http://www.apache.org/licenses/LICENSE-2.0,https://enterprise.dejacode.com/urn/urn:dje:license:apache-2.0,Apache-2.0,https://spdx.org/licenses/Apache-2.0,5,14,apache-2.0_7.RULE,,apache-2.0,,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,Copyright (c) 2017 nexB Inc. and others.,,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,3,3,,,,,nexB Inc. and others.,,,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,http://nexb.com/,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,4,4,,,,,,,https://github.com/nexB/scancode-toolkit/,,,,,,,,,,,,,,, +/json2csv.rb,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,10,10,,,,,,,http://apache.org/licenses/LICENSE-2.0,,,,,,,,,,,,,,, +/license,file,license,license,,679,2017-10-03,75c5490a718ddd45e40e0cc7ce0c756abc373123,b965a762efb9421cf1bf4405f336e278,text/plain,ASCII text,,False,True,False,False,False,False,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +/license,,,,,,,,,,,,,,,,,,,,,,gpl-2.0-plus,100.00,GPL 2.0 or later,Copyleft,Free Software Foundation (FSF),http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html,https://enterprise.dejacode.com/urn/urn:dje:license:gpl-2.0-plus,GPL-2.0+,https://spdx.org/licenses/GPL-2.0,1,12,gpl-2.0-plus.LICENSE,,gpl-2.0-plus,,,,,,,,,,,,,,,,,,, +/package.json,file,package.json,package,.json,2200,2017-10-03,918376afce796ef90eeda1d6695f2289c90491ac,1f66239a9b850c5e60a9382dbe2162d2,text/plain,"ASCII text, with very long lines",JSON,False,True,False,False,True,False,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,mit,15.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit_27.RULE,,mit,,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,mit,100.00,MIT License,Permissive,MIT,http://opensource.org/licenses/mit-license.php,http://opensource.org/licenses/mit-license.php,https://enterprise.dejacode.com/urn/urn:dje:license:mit,MIT,https://spdx.org/licenses/MIT,24,24,mit.LICENSE,,mit,,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,Copyright (c) 2012 LearnBoost < tj@learnboost.com>,,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,23,26,,,,,LearnBoost <,,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,12,12,,,,,,tj@learnboost.com,,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,16,16,,,,,,,https://github.com/visionmedia/node-cookie-signature.git,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,27,27,,,,,,,https://github.com/visionmedia/node-cookie-signature/issues,,,,,,,,,,,,,,, +/package.json,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TJ Holowaychuk,,,https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.3.tgz,,, diff --git a/tests/formattedcode/data/csv/srp.csv b/tests/formattedcode/data/csv/srp.csv index 6f6556de5da..965ced7a341 100644 --- a/tests/formattedcode/data/csv/srp.csv +++ b/tests/formattedcode/data/csv/srp.csv @@ -1,4 +1,5 @@ Resource,scan_errors,copyright,start_line,end_line,copyright_holder +/srp,,,,, /srp/build.info,,,,, /srp/srp_lib.c,,,,, /srp/srp_lib.c,,Copyright 2011-2016 The OpenSSL Project,2,2, diff --git a/tests/formattedcode/data/csv/tree/expected.csv b/tests/formattedcode/data/csv/tree/expected.csv index 304dee89a97..3a646d8148f 100644 --- a/tests/formattedcode/data/csv/tree/expected.csv +++ b/tests/formattedcode/data/csv/tree/expected.csv @@ -1,4 +1,5 @@ Resource,scan_errors,copyright,start_line,end_line,copyright_holder +/scan,,,,, /scan/copy1.c,,,,, /scan/copy1.c,,"Copyright (c) 2000 ACME, Inc.",1,1, /scan/copy1.c,,,1,1,"ACME, Inc." diff --git a/tests/formattedcode/data/json/simple-expected.json b/tests/formattedcode/data/json/simple-expected.json index 22ed5e36e73..9b273013432 100644 --- a/tests/formattedcode/data/json/simple-expected.json +++ b/tests/formattedcode/data/json/simple-expected.json @@ -1,15 +1,41 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, - "--package": true, "--info": true, - "--license-score": 0, - "--format": "json" + "--json": "", + "--license": true, + "--package": true }, "files_count": 1, "files": [ + { + "path": "simple", + "type": "directory", + "name": "simple", + "base_name": "simple", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "files_count": 1, + "dirs_count": 0, + "size_count": 55, + "scan_errors": [] + }, { "path": "simple/copyright_acme_c-c.c", "type": "file", @@ -19,7 +45,6 @@ "size": 55, "sha1": "e2466d5b764d27fb301ceb439ffb5da22e43ab1d", "md5": "bdf7c572beb4094c2059508fa73c05a4", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text, with no line terminators", "programming_language": "C", @@ -29,7 +54,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -44,7 +68,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/formattedcode/data/json/simple-expected.jsonlines b/tests/formattedcode/data/json/simple-expected.jsonlines index 519ffd60496..c643a3ffdd4 100644 --- a/tests/formattedcode/data/json/simple-expected.jsonlines +++ b/tests/formattedcode/data/json/simple-expected.jsonlines @@ -1,40 +1,67 @@ [ - { - "header": { - "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", - "scancode_version": "2.1.0.post69.536f354.dirty.20171004191716", - "scancode_options": { - "--info": true, - "--license-score": 0, - "--format": "jsonlines" - }, - "files_count": 1 - } - }, - { - "files": [ - { - "path": "simple/copyright_acme_c-c.c", - "type": "file", - "name": "copyright_acme_c-c.c", - "base_name": "copyright_acme_c-c", - "extension": ".c", - "date": "2017-10-03", - "size": 55, - "sha1": "e2466d5b764d27fb301ceb439ffb5da22e43ab1d", - "md5": "bdf7c572beb4094c2059508fa73c05a4", - "files_count": null, - "mime_type": "text/plain", - "file_type": "UTF-8 Unicode text, with no line terminators", - "programming_language": "C", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - } - ] + { + "header": { + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--info": true, + "--json-lines": "" + }, + "files_count": 1 } + }, + { + "files": [ + { + "path": "simple", + "type": "directory", + "name": "simple", + "base_name": "simple", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "files_count": 1, + "dirs_count": 0, + "size_count": 55, + "scan_errors": [] + } + ] + }, + { + "files": [ + { + "path": "simple/copyright_acme_c-c.c", + "type": "file", + "name": "copyright_acme_c-c.c", + "base_name": "copyright_acme_c-c", + "extension": ".c", + "size": 55, + "sha1": "e2466d5b764d27fb301ceb439ffb5da22e43ab1d", + "md5": "bdf7c572beb4094c2059508fa73c05a4", + "mime_type": "text/plain", + "file_type": "UTF-8 Unicode text, with no line terminators", + "programming_language": "C", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] + } ] \ No newline at end of file diff --git a/tests/formattedcode/data/json/simple-expected.jsonpp b/tests/formattedcode/data/json/simple-expected.jsonpp index 940c30598b3..b542ca37de6 100644 --- a/tests/formattedcode/data/json/simple-expected.jsonpp +++ b/tests/formattedcode/data/json/simple-expected.jsonpp @@ -1,15 +1,41 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, - "--package": true, "--info": true, - "--license-score": 0, - "--format": "json-pp" + "--json-pp": "", + "--license": true, + "--package": true }, "files_count": 1, "files": [ + { + "path": "simple", + "type": "directory", + "name": "simple", + "base_name": "simple", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "files_count": 1, + "dirs_count": 0, + "size_count": 55, + "scan_errors": [] + }, { "path": "simple/copyright_acme_c-c.c", "type": "file", @@ -19,7 +45,6 @@ "size": 55, "sha1": "e2466d5b764d27fb301ceb439ffb5da22e43ab1d", "md5": "bdf7c572beb4094c2059508fa73c05a4", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text, with no line terminators", "programming_language": "C", @@ -29,7 +54,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -44,7 +68,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/formattedcode/data/json/tree/expected.json b/tests/formattedcode/data/json/tree/expected.json index 966717566af..235cef0a411 100644 --- a/tests/formattedcode/data/json/tree/expected.json +++ b/tests/formattedcode/data/json/tree/expected.json @@ -1,15 +1,15 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, + "--info": true, + "--json-pp": "", "--license": true, "--package": true, - "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--strip-root": true }, - "files_count": 8, + "files_count": 7, "files": [ { "path": "copy1.c", @@ -20,7 +20,6 @@ "size": 91, "sha1": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", "md5": "fc7f53659b7a9db8b6dff0638641778e", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -30,7 +29,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -45,7 +43,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "copy2.c", @@ -56,7 +58,6 @@ "size": 91, "sha1": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", "md5": "fc7f53659b7a9db8b6dff0638641778e", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -66,7 +67,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -81,7 +81,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "copy3.c", @@ -92,7 +96,6 @@ "size": 91, "sha1": "c91811eb5fdc7ab440355f9f8d1580e1518b0c2f", "md5": "e999e21c9d7de4d0f943aefbb6f21b99", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -102,7 +105,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -117,7 +119,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "subdir", @@ -125,10 +131,9 @@ "name": "subdir", "base_name": "subdir", "extension": "", - "size": 361, + "size": 0, "sha1": null, "md5": null, - "files_count": 4, "mime_type": null, "file_type": null, "programming_language": null, @@ -138,10 +143,13 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], - "packages": [] + "packages": [], + "files_count": 4, + "dirs_count": 0, + "size_count": 361, + "scan_errors": [] }, { "path": "subdir/copy1.c", @@ -152,7 +160,6 @@ "size": 91, "sha1": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", "md5": "fc7f53659b7a9db8b6dff0638641778e", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -162,7 +169,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -177,7 +183,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "subdir/copy2.c", @@ -188,7 +198,6 @@ "size": 91, "sha1": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", "md5": "fc7f53659b7a9db8b6dff0638641778e", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -198,7 +207,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -213,7 +221,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "subdir/copy3.c", @@ -224,7 +236,6 @@ "size": 84, "sha1": "389af7e629a9853056e42b262d5e30bf4579a74f", "md5": "290627a1387288ef77ae7e07946f3ecf", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -234,7 +245,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -249,7 +259,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "subdir/copy4.c", @@ -260,7 +274,6 @@ "size": 95, "sha1": "58748872d25374160692f1ed7075d0fe80a544b1", "md5": "88e46475db9b1a68f415f6a3544eeb16", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text", "programming_language": "C", @@ -270,7 +283,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [ { @@ -285,7 +297,11 @@ "end_line": 1 } ], - "packages": [] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/license_known/expected.rdf b/tests/formattedcode/data/spdx/license_known/expected.rdf index 49602d49c82..d4e4521afbe 100644 --- a/tests/formattedcode/data/spdx/license_known/expected.rdf +++ b/tests/formattedcode/data/spdx/license_known/expected.rdf @@ -3,12 +3,16 @@ "@xmlns:ns1": "http://spdx.org/rdf/terms#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "ns1:SpdxDocument": { + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", + "ns1:dataLicense": { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + }, "ns1:describesPackage": { "ns1:Package": { - "ns1:downloadLocation": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + "ns1:copyrightText": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" }, - "ns1:licenseDeclared": { + "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:hasFile": [ @@ -18,67 +22,63 @@ "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" + "ns1:licenseDeclared": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoFromFiles": [ { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" }, { - "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" } ], "ns1:name": "scan" } }, - "ns1:dataLicense": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" - }, - "ns1:specVersion": "SPDX-2.1", "ns1:referencesFile": [ { "ns1:File": { - "ns1:fileName": "./scan/apache-2.0.LICENSE", "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "2b8b815229aa8a61e483fb4ba0588b8b6c491890", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "172444e7c137eb5cd3cae530aca0879c90f7fada" } }, + "ns1:copyrightText": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" + }, + "ns1:fileName": "./scan/cc0-1.0.LICENSE", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoInFile": { - "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" - }, - "ns1:copyrightText": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" } } }, { "ns1:File": { - "ns1:licenseInfoInFile": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" - }, "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "172444e7c137eb5cd3cae530aca0879c90f7fada", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "2b8b815229aa8a61e483fb4ba0588b8b6c491890" } }, - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, "ns1:copyrightText": { "@rdf:resource": "http://spdx.org/rdf/terms#none" }, - "ns1:fileName": "./scan/cc0-1.0.LICENSE" + "ns1:fileName": "./scan/apache-2.0.LICENSE", + "ns1:licenseConcluded": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, + "ns1:licenseInfoInFile": { + "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" + } } } ], - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/license_known/expected_with_text.rdf b/tests/formattedcode/data/spdx/license_known/expected_with_text.rdf index 22cdd8c71e5..d4e4521afbe 100644 --- a/tests/formattedcode/data/spdx/license_known/expected_with_text.rdf +++ b/tests/formattedcode/data/spdx/license_known/expected_with_text.rdf @@ -3,15 +3,28 @@ "@xmlns:ns1": "http://spdx.org/rdf/terms#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "ns1:SpdxDocument": { + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", + "ns1:dataLicense": { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + }, "ns1:describesPackage": { "ns1:Package": { - "ns1:name": "scan", - "ns1:licenseDeclared": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + "ns1:copyrightText": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" }, "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, + "ns1:hasFile": [ + null, + null + ], + "ns1:licenseConcluded": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, + "ns1:licenseDeclared": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, "ns1:licenseInfoFromFiles": [ { "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" @@ -20,65 +33,52 @@ "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" } ], - "ns1:copyrightText": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, - "ns1:hasFile": [ - null, - null - ] + "ns1:name": "scan" } }, - "ns1:dataLicense": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" - }, - "ns1:specVersion": "SPDX-2.1", "ns1:referencesFile": [ { "ns1:File": { - "ns1:licenseInfoInFile": { - "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" - }, "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "2b8b815229aa8a61e483fb4ba0588b8b6c491890", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "172444e7c137eb5cd3cae530aca0879c90f7fada" } }, - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, "ns1:copyrightText": { "@rdf:resource": "http://spdx.org/rdf/terms#none" }, - "ns1:fileName": "./scan/apache-2.0.LICENSE" + "ns1:fileName": "./scan/cc0-1.0.LICENSE", + "ns1:licenseConcluded": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, + "ns1:licenseInfoInFile": { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + } } }, { "ns1:File": { "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "172444e7c137eb5cd3cae530aca0879c90f7fada", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "2b8b815229aa8a61e483fb4ba0588b8b6c491890" } }, + "ns1:copyrightText": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" + }, + "ns1:fileName": "./scan/apache-2.0.LICENSE", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoInFile": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" - }, - "ns1:copyrightText": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, - "ns1:fileName": "./scan/cc0-1.0.LICENSE" + "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" + } } } ], - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/license_ref/expected.rdf b/tests/formattedcode/data/spdx/license_ref/expected.rdf index f12c0f93321..ff18de2701b 100644 --- a/tests/formattedcode/data/spdx/license_ref/expected.rdf +++ b/tests/formattedcode/data/spdx/license_ref/expected.rdf @@ -1,59 +1,61 @@ { "rdf:RDF": { "@xmlns:ns1": "http://spdx.org/rdf/terms#", - "@xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", + "@xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "ns1:SpdxDocument": { - "ns1:hasExtractedLicensingInfo": { - "ns1:ExtractedLicensingInfo": { - "ns1:extractedText": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "ns1:licenseId": "LicenseRef-scancode-acknowledgment" - } + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", + "ns1:dataLicense": { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, "ns1:describesPackage": { "ns1:Package": { - "ns1:hasFile": null, + "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:licenseDeclared": { + "ns1:hasFile": null, + "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:name": "scan", - "ns1:licenseConcluded": { + "ns1:licenseDeclared": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoFromFiles": [ { "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" }, + { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + }, { "ns1:ExtractedLicensingInfo": { "ns1:extractedText": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "ns1:licenseId": "LicenseRef-scancode-acknowledgment" + "ns1:licenseId": "LicenseRef-scancode-acknowledgment", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml" } - }, - { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" } ], - "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others." + "ns1:name": "scan" } }, - "ns1:specVersion": "SPDX-2.1", - "ns1:dataLicense": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + "ns1:hasExtractedLicensingInfo": { + "ns1:ExtractedLicensingInfo": { + "ns1:extractedText": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", + "ns1:licenseId": "LicenseRef-scancode-acknowledgment", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml" + } }, "ns1:referencesFile": { "ns1:File": { "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "f9c28fa2714ad0c2e36d3e5561afb0031fa76831", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "f9c28fa2714ad0c2e36d3e5561afb0031fa76831" } }, + "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", + "ns1:fileName": "./scan/NOTICE", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, @@ -67,16 +69,14 @@ { "ns1:ExtractedLicensingInfo": { "ns1:extractedText": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "ns1:licenseId": "LicenseRef-scancode-acknowledgment" + "ns1:licenseId": "LicenseRef-scancode-acknowledgment", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml" } } - ], - "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", - "ns1:fileName": "./scan/NOTICE" + ] } }, - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/license_ref/expected_with_text.rdf b/tests/formattedcode/data/spdx/license_ref/expected_with_text.rdf index 3745b692426..757e7ea506f 100644 --- a/tests/formattedcode/data/spdx/license_ref/expected_with_text.rdf +++ b/tests/formattedcode/data/spdx/license_ref/expected_with_text.rdf @@ -1,82 +1,82 @@ { "rdf:RDF": { "@xmlns:ns1": "http://spdx.org/rdf/terms#", - "@xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", + "@xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "ns1:SpdxDocument": { - "ns1:hasExtractedLicensingInfo": { - "ns1:ExtractedLicensingInfo": { - "ns1:extractedText": "The ScanCode software is licensed under the Apache License version 2.0.\nData generated with ScanCode require an acknowledgment.\nScanCode is a trademark of nexB Inc.\n\nYou may not use this software except in compliance with the License.\nYou may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\nWhen you publish or redistribute any data created with ScanCode or any ScanCode\nderivative work, you must accompany this data with the following acknowledgment:\n\n Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n OR CONDITIONS OF ANY KIND, either express or implied. No content created from\n ScanCode should be considered or used as legal advice. Consult an Attorney\n for any legal advice.\n ScanCode is a free software code scanning tool from nexB Inc. and others.\n Visit https://github.com/nexB/scancode-toolkit/ for support and download", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "ns1:licenseId": "LicenseRef-scancode-acknowledgment" - } + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", + "ns1:dataLicense": { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, "ns1:describesPackage": { "ns1:Package": { + "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", + "ns1:downloadLocation": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, "ns1:hasFile": null, - "ns1:licenseDeclared": { + "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:downloadLocation": { + "ns1:licenseDeclared": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoFromFiles": [ { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" }, { - "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, { "ns1:ExtractedLicensingInfo": { "ns1:extractedText": "The ScanCode software is licensed under the Apache License version 2.0.\nData generated with ScanCode require an acknowledgment.\nScanCode is a trademark of nexB Inc.\n\nYou may not use this software except in compliance with the License.\nYou may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\nWhen you publish or redistribute any data created with ScanCode or any ScanCode\nderivative work, you must accompany this data with the following acknowledgment:\n\n Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n OR CONDITIONS OF ANY KIND, either express or implied. No content created from\n ScanCode should be considered or used as legal advice. Consult an Attorney\n for any legal advice.\n ScanCode is a free software code scanning tool from nexB Inc. and others.\n Visit https://github.com/nexB/scancode-toolkit/ for support and download", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "ns1:licenseId": "LicenseRef-scancode-acknowledgment" + "ns1:licenseId": "LicenseRef-scancode-acknowledgment", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml" } } ], - "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, "ns1:name": "scan" } }, - "ns1:specVersion": "SPDX-2.1", - "ns1:dataLicense": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + "ns1:hasExtractedLicensingInfo": { + "ns1:ExtractedLicensingInfo": { + "ns1:extractedText": "The ScanCode software is licensed under the Apache License version 2.0.\nData generated with ScanCode require an acknowledgment.\nScanCode is a trademark of nexB Inc.\n\nYou may not use this software except in compliance with the License.\nYou may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\nWhen you publish or redistribute any data created with ScanCode or any ScanCode\nderivative work, you must accompany this data with the following acknowledgment:\n\n Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n OR CONDITIONS OF ANY KIND, either express or implied. No content created from\n ScanCode should be considered or used as legal advice. Consult an Attorney\n for any legal advice.\n ScanCode is a free software code scanning tool from nexB Inc. and others.\n Visit https://github.com/nexB/scancode-toolkit/ for support and download", + "ns1:licenseId": "LicenseRef-scancode-acknowledgment", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml" + } }, "ns1:referencesFile": { "ns1:File": { - "ns1:fileName": "./scan/NOTICE", "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "f9c28fa2714ad0c2e36d3e5561afb0031fa76831", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "f9c28fa2714ad0c2e36d3e5561afb0031fa76831" } }, + "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", + "ns1:fileName": "./scan/NOTICE", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": "Copyright (c) 2017 nexB Inc. and others.", "ns1:licenseInfoInFile": [ { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" }, { - "@rdf:resource": "http://spdx.org/licenses/Apache-2.0" + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, { "ns1:ExtractedLicensingInfo": { "ns1:extractedText": "The ScanCode software is licensed under the Apache License version 2.0.\nData generated with ScanCode require an acknowledgment.\nScanCode is a trademark of nexB Inc.\n\nYou may not use this software except in compliance with the License.\nYou may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\nWhen you publish or redistribute any data created with ScanCode or any ScanCode\nderivative work, you must accompany this data with the following acknowledgment:\n\n Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n OR CONDITIONS OF ANY KIND, either express or implied. No content created from\n ScanCode should be considered or used as legal advice. Consult an Attorney\n for any legal advice.\n ScanCode is a free software code scanning tool from nexB Inc. and others.\n Visit https://github.com/nexB/scancode-toolkit/ for support and download", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml", - "ns1:licenseId": "LicenseRef-scancode-acknowledgment" + "ns1:licenseId": "LicenseRef-scancode-acknowledgment", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/scancode-acknowledgment.yml" } } ] } }, - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/or_later/expected.rdf b/tests/formattedcode/data/spdx/or_later/expected.rdf index 9b319226c3a..ba15c1fa3f1 100644 --- a/tests/formattedcode/data/spdx/or_later/expected.rdf +++ b/tests/formattedcode/data/spdx/or_later/expected.rdf @@ -3,48 +3,48 @@ "@xmlns:ns1": "http://spdx.org/rdf/terms#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "ns1:SpdxDocument": { + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", "ns1:dataLicense": { "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, "ns1:describesPackage": { "ns1:Package": { + "ns1:copyrightText": "Copyright 2010, Red Hat, Inc.", "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, + "ns1:hasFile": null, + "ns1:licenseConcluded": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, "ns1:licenseDeclared": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:name": "or_later", "ns1:licenseInfoFromFiles": { "@rdf:resource": "http://spdx.org/licenses/LGPL-2.1+" }, - "ns1:copyrightText": "Copyright 2010, Red Hat, Inc.", - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, - "ns1:hasFile": null + "ns1:name": "or_later" } }, - "ns1:specVersion": "SPDX-2.1", "ns1:referencesFile": { "ns1:File": { - "ns1:fileName": "./test.java", "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "0c5bf934430394112921e7a1a8128176606d32ca", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "0c5bf934430394112921e7a1a8128176606d32ca" } }, + "ns1:copyrightText": "Copyright 2010, Red Hat, Inc.", + "ns1:fileName": "./test.java", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": "Copyright 2010, Red Hat, Inc.", "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/licenses/LGPL-2.1+" } } }, - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/simple/expected.rdf b/tests/formattedcode/data/spdx/simple/expected.rdf index 35bf0c8305c..cd628e1e064 100644 --- a/tests/formattedcode/data/spdx/simple/expected.rdf +++ b/tests/formattedcode/data/spdx/simple/expected.rdf @@ -3,52 +3,52 @@ "@xmlns:ns1": "http://spdx.org/rdf/terms#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "ns1:SpdxDocument": { + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", + "ns1:dataLicense": { + "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" + }, "ns1:describesPackage": { "ns1:Package": { - "ns1:name": "simple", - "ns1:licenseDeclared": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + "ns1:copyrightText": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" }, "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, + "ns1:hasFile": null, "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" + "ns1:licenseDeclared": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoFromFiles": { "@rdf:resource": "http://spdx.org/rdf/terms#none" }, - "ns1:hasFile": null + "ns1:name": "simple" } }, - "ns1:specVersion": "SPDX-2.1", - "ns1:dataLicense": { - "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" - }, "ns1:referencesFile": { "ns1:File": { - "ns1:fileName": "./simple/test.txt", "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "b8a793cce3c3a4cd3a4646ddbe86edd542ed0cd8", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "b8a793cce3c3a4cd3a4646ddbe86edd542ed0cd8" } }, + "ns1:copyrightText": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" + }, + "ns1:fileName": "./test.txt", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, - "ns1:copyrightText": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" } } }, - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/simple/expected.tv b/tests/formattedcode/data/spdx/simple/expected.tv index 3f5fe575a66..7ef5f485fd7 100644 --- a/tests/formattedcode/data/spdx/simple/expected.tv +++ b/tests/formattedcode/data/spdx/simple/expected.tv @@ -17,7 +17,7 @@ PackageLicenseConcluded: NOASSERTION PackageLicenseInfoFromFiles: NONE PackageCopyrightText: NONE # File -FileName: ./simple/test.txt +FileName: ./test.txt FileChecksum: SHA1: b8a793cce3c3a4cd3a4646ddbe86edd542ed0cd8 LicenseConcluded: NOASSERTION LicenseInfoInFile: NONE diff --git a/tests/formattedcode/data/spdx/tree/expected.rdf b/tests/formattedcode/data/spdx/tree/expected.rdf index 65e40a151aa..de1f5843f6f 100644 --- a/tests/formattedcode/data/spdx/tree/expected.rdf +++ b/tests/formattedcode/data/spdx/tree/expected.rdf @@ -3,59 +3,87 @@ "@xmlns:ns1": "http://spdx.org/rdf/terms#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "ns1:SpdxDocument": { + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", "ns1:dataLicense": { "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, + "ns1:describesPackage": { + "ns1:Package": { + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:downloadLocation": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, + "ns1:hasFile": [ + null, + null, + null, + null, + null, + null, + null + ], + "ns1:licenseConcluded": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, + "ns1:licenseDeclared": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, + "ns1:licenseInfoFromFiles": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" + }, + "ns1:name": "scan" + } + }, "ns1:referencesFile": [ { "ns1:File": { "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "389af7e629a9853056e42b262d5e30bf4579a74f" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/subdir/copy3.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", - "ns1:fileName": "./scan/copy1.c" + } } }, { "ns1:File": { - "ns1:licenseInfoInFile": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/copy1.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", - "ns1:fileName": "./scan/copy2.c" + "ns1:licenseInfoInFile": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" + } } }, { "ns1:File": { "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/copy2.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:fileName": "./scan/subdir/copy1.c", - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/rdf/terms#none" } @@ -63,17 +91,17 @@ }, { "ns1:File": { - "ns1:fileName": "./scan/copy3.c", "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "c91811eb5fdc7ab440355f9f8d1580e1518b0c2f", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/subdir/copy1.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/rdf/terms#none" } @@ -83,15 +111,15 @@ "ns1:File": { "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "58748872d25374160692f1ed7075d0fe80a544b1", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/subdir/copy2.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:fileName": "./scan/subdir/copy4.c", - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/rdf/terms#none" } @@ -101,68 +129,40 @@ "ns1:File": { "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "389af7e629a9853056e42b262d5e30bf4579a74f", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "58748872d25374160692f1ed7075d0fe80a544b1" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/subdir/copy4.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", - "ns1:fileName": "./scan/subdir/copy3.c" + } } }, { "ns1:File": { - "ns1:licenseInfoInFile": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, "ns1:checksum": { "ns1:Checksum": { - "ns1:checksumValue": "3922760d8492eb8f853c10a627f5a73f9eaec6ff", - "ns1:algorithm": "SHA1" + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "c91811eb5fdc7ab440355f9f8d1580e1518b0c2f" } }, + "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", + "ns1:fileName": "./scan/copy3.c", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", - "ns1:fileName": "./scan/subdir/copy2.c" + "ns1:licenseInfoInFile": { + "@rdf:resource": "http://spdx.org/rdf/terms#none" + } } } ], - "ns1:specVersion": "SPDX-2.1", - "ns1:describesPackage": { - "ns1:Package": { - "ns1:hasFile": [ - null, - null, - null, - null, - null, - null, - null - ], - "ns1:licenseDeclared": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, - "ns1:name": "scan", - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, - "ns1:copyrightText": "Copyright (c) 2000 ACME, Inc.", - "ns1:licenseInfoFromFiles": { - "@rdf:resource": "http://spdx.org/rdf/terms#none" - }, - "ns1:downloadLocation": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - } - } - }, - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/data/spdx/unicode/expected.rdf b/tests/formattedcode/data/spdx/unicode/expected.rdf index 6a5babd4fbf..89592719834 100644 --- a/tests/formattedcode/data/spdx/unicode/expected.rdf +++ b/tests/formattedcode/data/spdx/unicode/expected.rdf @@ -1,66 +1,66 @@ { "rdf:RDF": { "@xmlns:ns1": "http://spdx.org/rdf/terms#", - "@xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", + "@xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "ns1:SpdxDocument": { - "ns1:hasExtractedLicensingInfo": { - "ns1:ExtractedLicensingInfo": { - "ns1:extractedText": "SOFTWARE LICENSE\n *\n * This software is provided subject to the following terms and conditions,\n * which you should read carefully before using the software. Using this\n * software indicates your acceptance of these terms and conditions. If you do\n * not agree with these terms and conditions, do not use the software.\n *\n * [Copyright] \u00a9 [2005] [Agere] [Systems] [Inc].\n * [All] [rights] [reserved].\n *\n * Redistribution and use in source or binary forms, with or without\n * modifications, are permitted provided that the following conditions are met:\n *\n * . Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following Disclaimer as comments in the code as\n * well as in the documentation and/or other materials provided with the\n * distribution.\n *\n * . Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following Disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * . Neither the name of Agere Systems Inc. nor the names of the contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * Disclaimer\n *\n * THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY\n * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN\n * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/agere-bsd.yml", - "ns1:licenseId": "LicenseRef-agere-bsd" - } - }, - "ns1:referencesFile": { - "ns1:File": { - "ns1:licenseInfoInFile": { - "ns1:ExtractedLicensingInfo": { - "ns1:extractedText": "SOFTWARE LICENSE\n *\n * This software is provided subject to the following terms and conditions,\n * which you should read carefully before using the software. Using this\n * software indicates your acceptance of these terms and conditions. If you do\n * not agree with these terms and conditions, do not use the software.\n *\n * [Copyright] \u00a9 [2005] [Agere] [Systems] [Inc].\n * [All] [rights] [reserved].\n *\n * Redistribution and use in source or binary forms, with or without\n * modifications, are permitted provided that the following conditions are met:\n *\n * . Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following Disclaimer as comments in the code as\n * well as in the documentation and/or other materials provided with the\n * distribution.\n *\n * . Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following Disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * . Neither the name of Agere Systems Inc. nor the names of the contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * Disclaimer\n *\n * THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY\n * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN\n * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/agere-bsd.yml", - "ns1:licenseId": "LicenseRef-agere-bsd" - } - }, - "ns1:checksum": { - "ns1:Checksum": { - "ns1:checksumValue": "3903b654c47ea95203567230d72093ad1c5c4b90", - "ns1:algorithm": "SHA1" - } - }, - "ns1:licenseConcluded": { - "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" - }, - "ns1:copyrightText": "Copyright (c) 2005 Agere Systems Inc.\nCopyright (c) 2005 Agere Systems Inc.", - "ns1:fileName": "./et131x.h" - } - }, - "ns1:specVersion": "SPDX-2.1", + "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS", "ns1:dataLicense": { "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" }, "ns1:describesPackage": { "ns1:Package": { + "ns1:copyrightText": "Copyright (c) 2005 Agere Systems Inc.", "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, + "ns1:hasFile": null, + "ns1:licenseConcluded": { + "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" + }, "ns1:licenseDeclared": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:hasFile": null, "ns1:licenseInfoFromFiles": { "ns1:ExtractedLicensingInfo": { "ns1:extractedText": "SOFTWARE LICENSE\n *\n * This software is provided subject to the following terms and conditions,\n * which you should read carefully before using the software. Using this\n * software indicates your acceptance of these terms and conditions. If you do\n * not agree with these terms and conditions, do not use the software.\n *\n * [Copyright] \u00a9 [2005] [Agere] [Systems] [Inc].\n * [All] [rights] [reserved].\n *\n * Redistribution and use in source or binary forms, with or without\n * modifications, are permitted provided that the following conditions are met:\n *\n * . Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following Disclaimer as comments in the code as\n * well as in the documentation and/or other materials provided with the\n * distribution.\n *\n * . Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following Disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * . Neither the name of Agere Systems Inc. nor the names of the contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * Disclaimer\n *\n * THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY\n * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN\n * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE", - "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/agere-bsd.yml", - "ns1:licenseId": "LicenseRef-agere-bsd" + "ns1:licenseId": "LicenseRef-agere-bsd", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/agere-bsd.yml" } }, - "ns1:copyrightText": "Copyright (c) 2005 Agere Systems Inc.", + "ns1:name": "unicode" + } + }, + "ns1:hasExtractedLicensingInfo": { + "ns1:ExtractedLicensingInfo": { + "ns1:extractedText": "SOFTWARE LICENSE\n *\n * This software is provided subject to the following terms and conditions,\n * which you should read carefully before using the software. Using this\n * software indicates your acceptance of these terms and conditions. If you do\n * not agree with these terms and conditions, do not use the software.\n *\n * [Copyright] \u00a9 [2005] [Agere] [Systems] [Inc].\n * [All] [rights] [reserved].\n *\n * Redistribution and use in source or binary forms, with or without\n * modifications, are permitted provided that the following conditions are met:\n *\n * . Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following Disclaimer as comments in the code as\n * well as in the documentation and/or other materials provided with the\n * distribution.\n *\n * . Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following Disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * . Neither the name of Agere Systems Inc. nor the names of the contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * Disclaimer\n *\n * THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY\n * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN\n * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE", + "ns1:licenseId": "LicenseRef-agere-bsd", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/agere-bsd.yml" + } + }, + "ns1:referencesFile": { + "ns1:File": { + "ns1:checksum": { + "ns1:Checksum": { + "ns1:algorithm": "SHA1", + "ns1:checksumValue": "3903b654c47ea95203567230d72093ad1c5c4b90" + } + }, + "ns1:copyrightText": "Copyright (c) 2005 Agere Systems Inc.\nCopyright (c) 2005 Agere Systems Inc.", + "ns1:fileName": "./et131x.h", "ns1:licenseConcluded": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" }, - "ns1:name": "unicode" + "ns1:licenseInfoInFile": { + "ns1:ExtractedLicensingInfo": { + "ns1:extractedText": "SOFTWARE LICENSE\n *\n * This software is provided subject to the following terms and conditions,\n * which you should read carefully before using the software. Using this\n * software indicates your acceptance of these terms and conditions. If you do\n * not agree with these terms and conditions, do not use the software.\n *\n * [Copyright] \u00a9 [2005] [Agere] [Systems] [Inc].\n * [All] [rights] [reserved].\n *\n * Redistribution and use in source or binary forms, with or without\n * modifications, are permitted provided that the following conditions are met:\n *\n * . Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following Disclaimer as comments in the code as\n * well as in the documentation and/or other materials provided with the\n * distribution.\n *\n * . Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following Disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * . Neither the name of Agere Systems Inc. nor the names of the contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * Disclaimer\n *\n * THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY\n * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN\n * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n * DAMAGE", + "ns1:licenseId": "LicenseRef-agere-bsd", + "rdfs:comment": "See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/agere-bsd.yml" + } + } } }, - "@rdf:about": "http://www.spdx.org/tools#SPDXANALYSIS" + "ns1:specVersion": "SPDX-2.1" } } } \ No newline at end of file diff --git a/tests/formattedcode/test_format_csv.py b/tests/formattedcode/test_output_csv.py similarity index 91% rename from tests/formattedcode/test_format_csv.py rename to tests/formattedcode/test_output_csv.py index d2f0e7195be..63fbbd88128 100644 --- a/tests/formattedcode/test_format_csv.py +++ b/tests/formattedcode/test_output_csv.py @@ -38,8 +38,7 @@ from scancode.cli_test_utils import run_scan_click from scancode.cli_test_utils import run_scan_plain -from formattedcode.format_csv import flatten_scan - +from formattedcode.output_csv import flatten_scan test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') @@ -193,9 +192,8 @@ def test_csv_minimal(): test_dir = test_env.get_test_loc('csv/srp') result_file = test_env.get_temp_file('csv') expected_file = test_env.get_test_loc('csv/srp.csv') - result = run_scan_click(['--copyright', '--format', 'csv', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--copyright', test_dir, '--output-csv', result_file] + run_scan_click(args) check_csvs(result_file, expected_file) @@ -203,23 +201,15 @@ def test_csv_tree(): test_dir = test_env.get_test_loc('csv/tree/scan') result_file = test_env.get_temp_file('csv') expected_file = test_env.get_test_loc('csv/tree/expected.csv') - result = run_scan_click(['--copyright', '--format', 'csv', test_dir, result_file]) - assert result.exit_code == 0 + args = ['--copyright', test_dir, '--output-csv', result_file] + run_scan_click(args) check_csvs(result_file, expected_file) def test_can_process_live_scan_with_all_options(): test_dir = test_env.get_test_loc('csv/livescan/scan') result_file = test_env.get_temp_file('csv') - rc, stdout, stderr = run_scan_plain( - ['-clip', '--email', '--url', '--strip-root', '--format', 'csv', - test_dir, result_file]) - try: - assert rc == 0 - except: - print(stdout, stderr) - print(stdout, stderr) - raise - + args = ['-clip', '--email', '--url', '--strip-root', test_dir, '--output-csv', result_file] + run_scan_plain(args) expected_file = test_env.get_test_loc('csv/livescan/expected.csv') - check_csvs(result_file, expected_file, regen=False) + check_csvs(result_file, expected_file) diff --git a/tests/formattedcode/test_format_json.py b/tests/formattedcode/test_output_json.py similarity index 77% rename from tests/formattedcode/test_format_json.py rename to tests/formattedcode/test_output_json.py index 62629d71f85..e49e85023ce 100644 --- a/tests/formattedcode/test_format_json.py +++ b/tests/formattedcode/test_output_json.py @@ -33,7 +33,6 @@ from scancode.cli_test_utils import check_json_scan from scancode.cli_test_utils import run_scan_click - test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') @@ -41,51 +40,34 @@ def test_json_pretty_print(): test_dir = test_env.get_test_loc('json/simple') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['-clip', '--format', 'json-pp', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + args = ['-clip', test_dir, '--json-pp', result_file] + run_scan_click(args) expected = test_env.get_test_loc('json/simple-expected.jsonpp') - check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True, regen=False) + check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True) def test_json_compact(): test_dir = test_env.get_test_loc('json/simple') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['-clip', '--format', 'json', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + run_scan_click(['-clip', test_dir, '--json', result_file]) with open(result_file, 'rb') as res: - assert len(res.read().splitlines())==1 - + assert len(res.read().splitlines()) == 1 expected = test_env.get_test_loc('json/simple-expected.json') - check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True, regen=False) + check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True) def test_scan_output_does_not_truncate_copyright_json(): test_dir = test_env.get_test_loc('json/tree/scan/') result_file = test_env.get_temp_file('test.json') - - result = run_scan_click( - ['-clip', '--strip-root', '--format', 'json', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + run_scan_click(['-clip', '--strip-root', test_dir, '--json-pp', result_file]) expected = test_env.get_test_loc('json/tree/expected.json') - check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True, regen=False) + check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True) def test_scan_output_does_not_truncate_copyright_with_json_to_stdout(): test_dir = test_env.get_test_loc('json/tree/scan/') result_file = test_env.get_temp_file('test.json') - - result = run_scan_click( - ['-clip', '--strip-root', '--format', 'json', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + args = ['-clip', '--strip-root', test_dir, '--json-pp', result_file] + run_scan_click(args) expected = test_env.get_test_loc('json/tree/expected.json') - check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True, regen=False) + check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True) diff --git a/tests/formattedcode/test_format_jsonlines.py b/tests/formattedcode/test_output_jsonlines.py similarity index 75% rename from tests/formattedcode/test_format_jsonlines.py rename to tests/formattedcode/test_output_jsonlines.py index 18483dc67a6..9c01cdefcd9 100644 --- a/tests/formattedcode/test_format_jsonlines.py +++ b/tests/formattedcode/test_output_jsonlines.py @@ -35,7 +35,6 @@ from commoncode.testcase import FileDrivenTesting from scancode.cli_test_utils import run_scan_click - test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') @@ -48,10 +47,9 @@ def remove_variable_data(scan_result): for line in scan_result: header = line.get('header') if header: - del header['scancode_version'] + header.pop('scancode_version', None) for scanned_file in line.get('files', []): - if 'date' in scanned_file: - del scanned_file['date'] + scanned_file.pop('date', None) def check_jsonlines_scan(expected_file, result_file, regen=False): @@ -66,9 +64,8 @@ def check_jsonlines_scan(expected_file, result_file, regen=False): if regen: with open(expected_file, 'wb') as reg: - json.dump(result, reg) - - expected = _load_json_result(expected_file) + json.dump(result, reg, indent=2, separators=(',', ': ')) + expected = _load_json_result_for_jsonlines(expected_file) remove_variable_data(expected) assert expected == result @@ -82,7 +79,7 @@ def _load_jsonlines_result(result_file): return [json.loads(line, object_pairs_hook=OrderedDict) for line in res] -def _load_json_result(result_file): +def _load_json_result_for_jsonlines(result_file): """ Load the result file as utf-8 JSON """ @@ -93,10 +90,31 @@ def _load_json_result(result_file): def test_jsonlines(): test_dir = test_env.get_test_loc('json/simple') result_file = test_env.get_temp_file('jsonline') - - result = run_scan_click(['-i', '--format', 'jsonlines', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + run_scan_click(['-i', test_dir, '--json-lines', result_file]) expected = test_env.get_test_loc('json/simple-expected.jsonlines') check_jsonlines_scan(test_env.get_test_loc(expected), result_file, regen=False) + + +def test_jsonlines_with_timing(): + test_dir = test_env.get_test_loc('json/simple') + result_file = test_env.get_temp_file('jsonline') + run_scan_click(['-i', '--timing', test_dir, '--json-lines', result_file]) + file_results = _load_jsonlines_result(result_file) + first_line = True + for res in file_results: + if first_line: + # skip header + first_line = False + continue + scan_timings = res['files'][0]['scan_timings'] + + if not res['files'][0]['type'] == 'file': + # should be an empty dict for dirs + assert not scan_timings + continue + + assert scan_timings + + for scanner, timing in scan_timings.items(): + assert scanner in ('info',) + assert timing diff --git a/tests/formattedcode/test_format_spdx.py b/tests/formattedcode/test_output_spdx.py similarity index 72% rename from tests/formattedcode/test_format_spdx.py rename to tests/formattedcode/test_output_spdx.py index 545e64c82d2..5c8f64d4d24 100644 --- a/tests/formattedcode/test_format_spdx.py +++ b/tests/formattedcode/test_output_spdx.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,11 +23,12 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals import codecs +from collections import OrderedDict import os import re @@ -37,7 +38,6 @@ from scancode.cli_test_utils import run_scan_click from scancode.cli_test_utils import run_scan_plain - test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') @@ -70,32 +70,34 @@ def load_and_clean_rdf(location): """ content = codecs.open(location, encoding='utf-8').read() content = strip_variable_text(content) - data = xmltodict.parse(content, dict_constructor=dict) + data = xmltodict.parse(content, dict_constructor=OrderedDict) return sort_nested(data) def sort_nested(data): """ - Return a new dict with any nested list sorted recursively. + Return a new ordered and sorted mapping or sequence from a `data` mapping or + sequence with any nested sequences or mappings sorted recursively. """ - if isinstance(data, dict): - new_data = {} - for k, v in data.items(): - if isinstance(v, list): - v = sorted(v) - if isinstance(v, dict): + seqtypes = list, tuple + maptypes = OrderedDict, dict + coltypes = seqtypes + maptypes + + if isinstance(data, maptypes): + new_data = OrderedDict() + for k, v in sorted(data.items()): + if isinstance(v, coltypes): v = sort_nested(v) new_data[k] = v - return new_data - elif isinstance(data, list): + return OrderedDict(sorted(new_data.items())) + + elif isinstance(data, seqtypes): new_data = [] for v in sorted(data): - if isinstance(v, list): - v = sort_nested(v) - if isinstance(v, dict): + if isinstance(v, coltypes): v = sort_nested(v) new_data.append(v) - return new_data + return sorted(new_data) def check_rdf_scan(expected_file, result_file, regen=False): @@ -108,11 +110,13 @@ def check_rdf_scan(expected_file, result_file, regen=False): if regen: expected = result with codecs.open(expected_file, 'w', encoding='utf-8') as o: - json.dump(expected, o, indent=2) + json.dump(result, o, indent=2) else: with codecs.open(expected_file, 'r', encoding='utf-8') as i: - expected = sort_nested(json.load(i)) - assert expected == result + expected = json.load(i, object_pairs_hook=OrderedDict) + expected = load_and_clean_rdf(result_file) + + assert json.dumps(expected, indent=2) == json.dumps(result, indent=2) def load_and_clean_tv(location): @@ -145,8 +149,7 @@ def test_spdx_rdf_basic(): test_file = test_env.get_test_loc('spdx/simple/test.txt') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/simple/expected.rdf') - result = run_scan_click(['--format', 'spdx-rdf', test_file, result_file]) - assert result.exit_code == 0 + run_scan_click([test_file, '-clip', '--output-spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @@ -154,8 +157,7 @@ def test_spdx_tv_basic(): test_dir = test_env.get_test_loc('spdx/simple/test.txt') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/simple/expected.tv') - result = run_scan_click(['--format', 'spdx-tv', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click([test_dir, '-clip', '--output-spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @@ -163,8 +165,7 @@ def test_spdx_rdf_with_known_licenses(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_known/expected.rdf') - result = run_scan_click(['--format', 'spdx-rdf', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click([test_dir, '-clip', '--output-spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @@ -172,8 +173,7 @@ def test_spdx_rdf_with_license_ref(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_ref/expected.rdf') - result = run_scan_click(['--format', 'spdx-rdf', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click([test_dir, '-clip', '--output-spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @@ -181,8 +181,7 @@ def test_spdx_tv_with_known_licenses(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_known/expected.tv') - result = run_scan_click(['--format', 'spdx-tv', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click([test_dir, '-clip', '--output-spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @@ -190,8 +189,7 @@ def test_spdx_tv_with_license_ref(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_ref/expected.tv') - result = run_scan_click(['--format', 'spdx-tv', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click([test_dir, '-clip', '--output-spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @@ -199,8 +197,7 @@ def test_spdx_rdf_with_known_licenses_with_text(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_known/expected_with_text.rdf') - result = run_scan_click(['--format', 'spdx-rdf', '--license-text', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click([ '-clip', '--license-text', test_dir, '--output-spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @@ -208,8 +205,7 @@ def test_spdx_rdf_with_license_ref_with_text(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_ref/expected_with_text.rdf') - result = run_scan_click(['--format', 'spdx-rdf', '--license-text', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click(['-clip', '--license-text', test_dir, '--output-spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @@ -217,8 +213,7 @@ def test_spdx_tv_with_known_licenses_with_text(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_known/expected_with_text.tv') - result = run_scan_click(['--format', 'spdx-tv', '--license-text', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click(['-clip', '--license-text', test_dir, '--output-spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @@ -226,8 +221,7 @@ def test_spdx_tv_with_license_ref_with_text(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_ref/expected_with_text.tv') - result = run_scan_click(['--format', 'spdx-tv', '--license-text', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click(['-clip', '--license-text', test_dir, '--output-spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @@ -235,8 +229,7 @@ def test_spdx_tv_tree(): test_dir = test_env.get_test_loc('spdx/tree/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/tree/expected.tv') - result = run_scan_click(['--format', 'spdx-tv', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click(['-clip', test_dir, '--output-spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @@ -244,8 +237,7 @@ def test_spdx_rdf_tree(): test_dir = test_env.get_test_loc('spdx/tree/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/tree/expected.rdf') - result = run_scan_click(['--format', 'spdx-rdf', test_dir, result_file]) - assert result.exit_code == 0 + run_scan_click(['-clip', test_dir, '--output-spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @@ -253,48 +245,27 @@ def test_spdx_tv_with_unicode_license_text_does_not_fail(): test_file = test_env.get_test_loc('spdx/unicode/et131x.h') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/unicode/expected.tv') - rc, stdout, stderr = run_scan_plain([ - '--license', '--copyright', '--info', - '--format', 'spdx-tv', '--strip-root', '--license-text', - '--diag', - test_file, result_file - ]) - if rc != 0: - print('stdout', stdout) - print('stderr', stderr) - assert rc == 0 - check_tv_scan(expected_file, result_file, regen=False) + args = ['--license', '--copyright', '--info', '--strip-root', '--license-text', + '--license-diag', test_file, '--output-spdx-tv', result_file] + run_scan_plain(args) + check_tv_scan(expected_file, result_file) def test_spdx_rdf_with_unicode_license_text_does_not_fail(): test_file = test_env.get_test_loc('spdx/unicode/et131x.h') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/unicode/expected.rdf') - rc, stdout, stderr = run_scan_plain([ - '--license', '--copyright', '--info', - '--format', 'spdx-rdf', '--strip-root', '--license-text', - '--diag', - test_file, result_file - ]) - if rc != 0: - print('stdout', stdout) - print('stderr', stderr) - assert rc == 0 - check_rdf_scan(expected_file, result_file, regen=False) + args = ['--license', '--copyright', '--info', '--strip-root', + '--license-text', '--license-diag', test_file, '--output-spdx-rdf', result_file] + run_scan_plain(args) + check_rdf_scan(expected_file, result_file) def test_spdx_rdf_with_or_later_license_does_not_fail(): test_file = test_env.get_test_loc('spdx/or_later/test.java') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/or_later/expected.rdf') - rc, stdout, stderr = run_scan_plain([ - '--license', '--copyright', '--info', - '--format', 'spdx-rdf', '--strip-root', '--license-text', - '--diag', - test_file, result_file - ]) - if rc != 0: - print('stdout', stdout) - print('stderr', stderr) - assert rc == 0 - check_rdf_scan(expected_file, result_file, regen=False) + args = ['--license', '--copyright', '--info', '--strip-root', '--license-text', + '--license-diag', test_file, '--output-spdx-rdf', result_file] + run_scan_plain(args) + check_rdf_scan(expected_file, result_file) diff --git a/tests/formattedcode/test_format_templated.py b/tests/formattedcode/test_output_templated.py similarity index 80% rename from tests/formattedcode/test_format_templated.py rename to tests/formattedcode/test_output_templated.py index edf1181e177..75b03f61210 100644 --- a/tests/formattedcode/test_format_templated.py +++ b/tests/formattedcode/test_output_templated.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -30,12 +30,12 @@ import os import re +from scancode_config import __version__ + from commoncode import fileutils from commoncode.testcase import FileDrivenTesting -from scancode import __version__ from scancode.cli_test_utils import run_scan_click - test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') @@ -43,11 +43,7 @@ def test_paths_are_posix_paths_in_html_app_format_output(): test_dir = test_env.get_test_loc('templated/simple') result_file = test_env.get_temp_file(extension='html', file_name='test_html') - - result = run_scan_click(['--copyright', '--format', 'html-app', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + run_scan_click(['--copyright', test_dir, '--output-html-app', result_file]) # the data we want to test is in the data.json file data_file = os.path.join(fileutils.parent_directory(result_file), 'test_html_files', 'data.json') assert '/copyright_acme_c-c.c' in open(data_file).read() @@ -58,10 +54,7 @@ def test_paths_are_posix_paths_in_html_app_format_output(): def test_paths_are_posix_in_html_format_output(): test_dir = test_env.get_test_loc('templated/simple') result_file = test_env.get_temp_file('html') - - result = run_scan_click(['--copyright', '--format', 'html', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + run_scan_click(['--copyright', test_dir, '--output-html', result_file]) results = open(result_file).read() assert '/copyright_acme_c-c.c' in results assert __version__ in results @@ -70,11 +63,7 @@ def test_paths_are_posix_in_html_format_output(): def test_scanned_path_is_present_in_html_app_output(): test_dir = test_env.get_test_loc('templated/html_app') result_file = test_env.get_temp_file('test.html') - - result = run_scan_click(['--copyright', '--format', 'html-app', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - + run_scan_click(['--copyright', '--output-html-app', result_file, test_dir]) results = open(result_file).read() assert 'ScanCode scan results for: %(test_dir)s' % locals() in results assert '

' % locals() in results @@ -87,12 +76,9 @@ def test_scan_html_output_does_not_truncate_copyright_html(): test_dir = test_env.get_test_loc('templated/tree/scan/') result_file = test_env.get_temp_file('test.html') - result = run_scan_click( - ['-clip', '--strip-root', '--format', 'html', '-n', '3', - test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['-clip', '--strip-root', '--verbose', test_dir, '--output-html', result_file, '--verbose'] + run_scan_click(args) results = open(result_file).read() assert __version__ in results @@ -128,19 +114,17 @@ def test_scan_html_output_does_not_truncate_copyright_html(): def test_custom_format_with_custom_filename_fails_for_directory(): test_dir = test_env.get_temp_dir('html') result_file = test_env.get_temp_file('html') - - result = run_scan_click(['--format', test_dir, test_dir, result_file]) - assert result.exit_code != 0 - assert 'Unknwow or invalid template file path' in result.output + args = ['--info', '--custom-template', test_dir, '--output-custom', result_file, test_dir] + result = run_scan_click(args, expected_rc=2) + assert 'Invalid value for "--custom-template": Path' in result.output def test_custom_format_with_custom_filename(): test_dir = test_env.get_test_loc('templated/simple') custom_template = test_env.get_test_loc('templated/sample-template.html') result_file = test_env.get_temp_file('html') - - result = run_scan_click(['--format', custom_template, test_dir, result_file]) - assert result.exit_code == 0 + args = ['--info', '--custom-template', custom_template, '--output-custom', result_file, test_dir] + run_scan_click(args) results = open(result_file).read() assert 'Custom Template' in results assert __version__ in results diff --git a/tests/licensedcode/data/index/test__add_rules.json b/tests/licensedcode/data/index/test__add_rules.json index 70699ae6c09..014dd598949 100644 --- a/tests/licensedcode/data/index/test__add_rules.json +++ b/tests/licensedcode/data/index/test__add_rules.json @@ -1,403 +1,403 @@ { "bsd-no-mod_1": { "warranty": [ - 47, + 47, 120 - ], + ], "interruption": [ 194 - ], + ], "requirement": [ 81 - ], + ], "exemplary": [ 171 - ], + ], "negligence": [ 211 - ], + ], "caused": [ 196 - ], + ], "fitness": [ 151 - ], + ], "herein": [ 21 - ], + ], "conditioned": [ 74 - ], + ], "minimum": [ 61 - ], + ], "including": [ - 76, - 139, - 175, + 76, + 139, + 175, 210 - ], + ], "names": [ - 89, + 89, 98 - ], + ], "substantially": [ 78 - ], + ], "further": [ 83 - ], + ], "event": [ 160 - ], + ], "substitute": [ 182 - ], + ], "use": [ - 2, - 188, + 2, + 188, 221 - ], + ], "strict": [ 206 - ], + ], "unmodified": [ - 23, + 23, 27 - ], + ], "shall": [ 161 - ], + ], "merchantibility": [ 149 - ], + ], "forms": [ 7 - ], + ], "tort": [ 209 - ], + ], "contained": [ 20 - ], + ], "materials": [ 19 - ], + ], "damages": [ - 174, + 174, 233 - ], + ], "above": [ - 36, + 36, 92 - ], + ], "endorse": [ 106 - ], + ], "consequential": [ 173 - ], + ], "neither": [ 87 - ], + ], "warranties": [ - 138, + 138, 146 - ], + ], "particular": [ 154 - ], + ], "notice": [ 38 - ], + ], "used": [ - 26, + 26, 104 - ], + ], "form": [ 57 - ], + ], "permission": [ 118 - ], + ], "express": [ 135 - ], + ], "however": [ 195 - ], + ], "possibility": [ 230 - ], + ], "otherwise": [ 213 - ], + ], "liability": [ - 202, + 202, 207 - ], + ], "met": [ 16 - ], + ], "goods": [ 183 - ], + ], "purpose": [ 155 - ], + ], "noninfringement": [ 148 - ], + ], "advised": [ 227 - ], + ], "promote": [ 108 - ], + ], "nor": [ 96 - ], + ], "must": [ - 33, - 58, + 33, + 58, 72 - ], + ], "loss": [ 186 - ], + ], "redistributions": [ - 29, + 29, 54 - ], + ], "disclaimed": [ 157 - ], + ], "contract": [ 205 - ], + ], "reproduce": [ 59 - ], + ], "procurement": [ 180 - ], + ], "implied": [ - 137, + 137, 145 - ], + ], "liable": [ 168 - ], + ], "retain": [ 34 - ], + ], "redistribution": [ - 0, - 71, + 0, + 71, 85 - ], + ], "arising": [ 214 - ], + ], "modification": [ 52 - ], + ], "profits": [ 191 - ], + ], "disclaimer": [ - 48, - 50, - 63, - 67, + 48, + 50, + 63, + 67, 80 ] - }, + }, "bsd-new_0": { "interruption": [ 173 - ], + ], "exemplary": [ 150 - ], + ], "negligence": [ 190 - ], + ], "caused": [ 175 - ], + ], "fitness": [ 126 - ], + ], "direct": [ 146 - ], + ], "including": [ - 115, - 154, + 115, + 154, 189 - ], + ], "names": [ 76 - ], + ], "owner": [ 139 - ], + ], "indirect": [ 147 - ], + ], "event": [ 135 - ], + ], "substitute": [ 161 - ], + ], "use": [ - 2, - 167, + 2, + 167, 200 - ], + ], "strict": [ 185 - ], + ], "shall": [ 136 - ], + ], "damage": [ 212 - ], + ], "forms": [ 7 - ], + ], "tort": [ 188 - ], + ], "materials": [ 63 - ], + ], "damages": [ 153 - ], + ], "above": [ - 28, + 28, 46 - ], + ], "endorse": [ 84 - ], + ], "consequential": [ 152 - ], + ], "neither": [ 68 - ], + ], "warranties": [ - 114, + 114, 122 - ], + ], "particular": [ 129 - ], + ], "notice": [ - 30, + 30, 48 - ], + ], "used": [ 82 - ], + ], "form": [ 42 - ], + ], "permission": [ 96 - ], + ], "express": [ 111 - ], + ], "however": [ 174 - ], + ], "possibility": [ 209 - ], + ], "otherwise": [ 192 - ], + ], "liability": [ - 181, + 181, 186 - ], + ], "met": [ 20 - ], + ], "goods": [ 162 - ], + ], "purpose": [ 130 - ], + ], "advised": [ 206 - ], + ], "promote": [ 86 - ], + ], "nor": [ 74 - ], + ], "must": [ - 25, + 25, 43 - ], + ], "loss": [ 165 - ], + ], "redistributions": [ - 21, + 21, 39 - ], + ], "disclaimed": [ 132 - ], + ], "incidental": [ 148 - ], + ], "contract": [ 184 - ], + ], "reproduce": [ 44 - ], + ], "procurement": [ 159 - ], + ], "implied": [ - 113, + 113, 121 - ], + ], "liable": [ 143 - ], + ], "retain": [ 26 - ], + ], "merchantability": [ 124 - ], + ], "redistribution": [ 0 - ], + ], "arising": [ 193 - ], + ], "modification": [ 11 - ], + ], "profits": [ 170 - ], + ], "disclaimer": [ - 38, + 38, 56 ] } diff --git a/tests/licensedcode/data/index/test__add_rules_with_templates.json b/tests/licensedcode/data/index/test__add_rules_with_templates.json index 80df55df84a..6a1b178da6b 100644 --- a/tests/licensedcode/data/index/test__add_rules_with_templates.json +++ b/tests/licensedcode/data/index/test__add_rules_with_templates.json @@ -1,402 +1,402 @@ { "bsd-no-mod_1": { "warranty": [ - 45, + 45, 118 - ], + ], "interruption": [ 192 - ], + ], "requirement": [ 79 - ], + ], "exemplary": [ 169 - ], + ], "negligence": [ 209 - ], + ], "caused": [ 194 - ], + ], "fitness": [ 149 - ], + ], "herein": [ 19 - ], + ], "conditioned": [ 72 - ], + ], "minimum": [ 59 - ], + ], "including": [ - 74, - 137, - 173, + 74, + 137, + 173, 208 - ], + ], "names": [ - 87, + 87, 96 - ], + ], "substantially": [ 76 - ], + ], "further": [ 81 - ], + ], "event": [ 158 - ], + ], "substitute": [ 180 - ], + ], "use": [ - 186, + 186, 219 - ], + ], "strict": [ 204 - ], + ], "unmodified": [ - 21, + 21, 25 - ], + ], "shall": [ 159 - ], + ], "merchantibility": [ 147 - ], + ], "forms": [ 5 - ], + ], "tort": [ 207 - ], + ], "contained": [ 18 - ], + ], "materials": [ 17 - ], + ], "damages": [ - 172, + 172, 231 - ], + ], "above": [ - 34, + 34, 90 - ], + ], "endorse": [ 104 - ], + ], "consequential": [ 171 - ], + ], "neither": [ 85 - ], + ], "warranties": [ - 136, + 136, 144 - ], + ], "particular": [ 152 - ], + ], "notice": [ 36 - ], + ], "used": [ - 24, + 24, 102 - ], + ], "form": [ 55 - ], + ], "permission": [ 116 - ], + ], "express": [ 133 - ], + ], "however": [ 193 - ], + ], "possibility": [ 228 - ], + ], "otherwise": [ 211 - ], + ], "liability": [ - 200, + 200, 205 - ], + ], "met": [ 14 - ], + ], "goods": [ 181 - ], + ], "purpose": [ 153 - ], + ], "noninfringement": [ 146 - ], + ], "advised": [ 225 - ], + ], "promote": [ 106 - ], + ], "nor": [ 94 - ], + ], "must": [ - 31, - 56, + 31, + 56, 70 - ], + ], "loss": [ 184 - ], + ], "redistributions": [ - 27, + 27, 52 - ], + ], "disclaimed": [ 155 - ], + ], "contract": [ 203 - ], + ], "reproduce": [ 57 - ], + ], "procurement": [ 178 - ], + ], "implied": [ - 135, + 135, 143 - ], + ], "liable": [ 166 - ], + ], "retain": [ 32 - ], + ], "redistribution": [ - 0, - 69, + 0, + 69, 83 - ], + ], "arising": [ 212 - ], + ], "modification": [ 50 - ], + ], "profits": [ 189 - ], + ], "disclaimer": [ - 46, - 48, - 61, - 65, + 46, + 48, + 61, + 65, 78 ] - }, + }, "bsd-new_0": { "interruption": [ 171 - ], + ], "exemplary": [ 148 - ], + ], "negligence": [ 188 - ], + ], "caused": [ 173 - ], + ], "fitness": [ 124 - ], + ], "direct": [ 144 - ], + ], "including": [ - 113, - 152, + 113, + 152, 187 - ], + ], "names": [ 74 - ], + ], "owner": [ 137 - ], + ], "indirect": [ 145 - ], + ], "event": [ 133 - ], + ], "substitute": [ 159 - ], + ], "use": [ - 2, - 165, + 2, + 165, 198 - ], + ], "strict": [ 183 - ], + ], "shall": [ 134 - ], + ], "damage": [ 210 - ], + ], "forms": [ 7 - ], + ], "tort": [ 186 - ], + ], "materials": [ 63 - ], + ], "damages": [ 151 - ], + ], "above": [ - 28, + 28, 46 - ], + ], "endorse": [ 82 - ], + ], "consequential": [ 150 - ], + ], "neither": [ 68 - ], + ], "warranties": [ - 112, + 112, 120 - ], + ], "particular": [ 127 - ], + ], "notice": [ - 30, + 30, 48 - ], + ], "used": [ 80 - ], + ], "form": [ 42 - ], + ], "permission": [ 94 - ], + ], "express": [ 109 - ], + ], "however": [ 172 - ], + ], "possibility": [ 207 - ], + ], "otherwise": [ 190 - ], + ], "liability": [ - 179, + 179, 184 - ], + ], "met": [ 20 - ], + ], "goods": [ 160 - ], + ], "purpose": [ 128 - ], + ], "advised": [ 204 - ], + ], "promote": [ 84 - ], + ], "nor": [ 72 - ], + ], "must": [ - 25, + 25, 43 - ], + ], "loss": [ 163 - ], + ], "redistributions": [ - 21, + 21, 39 - ], + ], "disclaimed": [ 130 - ], + ], "incidental": [ 146 - ], + ], "contract": [ 182 - ], + ], "reproduce": [ 44 - ], + ], "procurement": [ 157 - ], + ], "implied": [ - 111, + 111, 119 - ], + ], "liable": [ 141 - ], + ], "retain": [ 26 - ], + ], "merchantability": [ 122 - ], + ], "redistribution": [ 0 - ], + ], "arising": [ 191 - ], + ], "modification": [ 11 - ], + ], "profits": [ 168 - ], + ], "disclaimer": [ - 38, + 38, 56 ] } diff --git a/tests/licensedcode/data/index/test_init_with_rules.json b/tests/licensedcode/data/index/test_init_with_rules.json index 70699ae6c09..014dd598949 100644 --- a/tests/licensedcode/data/index/test_init_with_rules.json +++ b/tests/licensedcode/data/index/test_init_with_rules.json @@ -1,403 +1,403 @@ { "bsd-no-mod_1": { "warranty": [ - 47, + 47, 120 - ], + ], "interruption": [ 194 - ], + ], "requirement": [ 81 - ], + ], "exemplary": [ 171 - ], + ], "negligence": [ 211 - ], + ], "caused": [ 196 - ], + ], "fitness": [ 151 - ], + ], "herein": [ 21 - ], + ], "conditioned": [ 74 - ], + ], "minimum": [ 61 - ], + ], "including": [ - 76, - 139, - 175, + 76, + 139, + 175, 210 - ], + ], "names": [ - 89, + 89, 98 - ], + ], "substantially": [ 78 - ], + ], "further": [ 83 - ], + ], "event": [ 160 - ], + ], "substitute": [ 182 - ], + ], "use": [ - 2, - 188, + 2, + 188, 221 - ], + ], "strict": [ 206 - ], + ], "unmodified": [ - 23, + 23, 27 - ], + ], "shall": [ 161 - ], + ], "merchantibility": [ 149 - ], + ], "forms": [ 7 - ], + ], "tort": [ 209 - ], + ], "contained": [ 20 - ], + ], "materials": [ 19 - ], + ], "damages": [ - 174, + 174, 233 - ], + ], "above": [ - 36, + 36, 92 - ], + ], "endorse": [ 106 - ], + ], "consequential": [ 173 - ], + ], "neither": [ 87 - ], + ], "warranties": [ - 138, + 138, 146 - ], + ], "particular": [ 154 - ], + ], "notice": [ 38 - ], + ], "used": [ - 26, + 26, 104 - ], + ], "form": [ 57 - ], + ], "permission": [ 118 - ], + ], "express": [ 135 - ], + ], "however": [ 195 - ], + ], "possibility": [ 230 - ], + ], "otherwise": [ 213 - ], + ], "liability": [ - 202, + 202, 207 - ], + ], "met": [ 16 - ], + ], "goods": [ 183 - ], + ], "purpose": [ 155 - ], + ], "noninfringement": [ 148 - ], + ], "advised": [ 227 - ], + ], "promote": [ 108 - ], + ], "nor": [ 96 - ], + ], "must": [ - 33, - 58, + 33, + 58, 72 - ], + ], "loss": [ 186 - ], + ], "redistributions": [ - 29, + 29, 54 - ], + ], "disclaimed": [ 157 - ], + ], "contract": [ 205 - ], + ], "reproduce": [ 59 - ], + ], "procurement": [ 180 - ], + ], "implied": [ - 137, + 137, 145 - ], + ], "liable": [ 168 - ], + ], "retain": [ 34 - ], + ], "redistribution": [ - 0, - 71, + 0, + 71, 85 - ], + ], "arising": [ 214 - ], + ], "modification": [ 52 - ], + ], "profits": [ 191 - ], + ], "disclaimer": [ - 48, - 50, - 63, - 67, + 48, + 50, + 63, + 67, 80 ] - }, + }, "bsd-new_0": { "interruption": [ 173 - ], + ], "exemplary": [ 150 - ], + ], "negligence": [ 190 - ], + ], "caused": [ 175 - ], + ], "fitness": [ 126 - ], + ], "direct": [ 146 - ], + ], "including": [ - 115, - 154, + 115, + 154, 189 - ], + ], "names": [ 76 - ], + ], "owner": [ 139 - ], + ], "indirect": [ 147 - ], + ], "event": [ 135 - ], + ], "substitute": [ 161 - ], + ], "use": [ - 2, - 167, + 2, + 167, 200 - ], + ], "strict": [ 185 - ], + ], "shall": [ 136 - ], + ], "damage": [ 212 - ], + ], "forms": [ 7 - ], + ], "tort": [ 188 - ], + ], "materials": [ 63 - ], + ], "damages": [ 153 - ], + ], "above": [ - 28, + 28, 46 - ], + ], "endorse": [ 84 - ], + ], "consequential": [ 152 - ], + ], "neither": [ 68 - ], + ], "warranties": [ - 114, + 114, 122 - ], + ], "particular": [ 129 - ], + ], "notice": [ - 30, + 30, 48 - ], + ], "used": [ 82 - ], + ], "form": [ 42 - ], + ], "permission": [ 96 - ], + ], "express": [ 111 - ], + ], "however": [ 174 - ], + ], "possibility": [ 209 - ], + ], "otherwise": [ 192 - ], + ], "liability": [ - 181, + 181, 186 - ], + ], "met": [ 20 - ], + ], "goods": [ 162 - ], + ], "purpose": [ 130 - ], + ], "advised": [ 206 - ], + ], "promote": [ 86 - ], + ], "nor": [ 74 - ], + ], "must": [ - 25, + 25, 43 - ], + ], "loss": [ 165 - ], + ], "redistributions": [ - 21, + 21, 39 - ], + ], "disclaimed": [ 132 - ], + ], "incidental": [ 148 - ], + ], "contract": [ 184 - ], + ], "reproduce": [ 44 - ], + ], "procurement": [ 159 - ], + ], "implied": [ - 113, + 113, 121 - ], + ], "liable": [ 143 - ], + ], "retain": [ 26 - ], + ], "merchantability": [ 124 - ], + ], "redistribution": [ 0 - ], + ], "arising": [ 193 - ], + ], "modification": [ 11 - ], + ], "profits": [ 170 - ], + ], "disclaimer": [ - 38, + 38, 56 ] } diff --git a/tests/licensedcode/data/models/licenses.expected.json b/tests/licensedcode/data/models/licenses.expected.json index dff77edc128..ff308c45cd4 100644 --- a/tests/licensedcode/data/models/licenses.expected.json +++ b/tests/licensedcode/data/models/licenses.expected.json @@ -1,141 +1,141 @@ [ { - "key": "w3c-docs-19990405", - "short_name": "W3C-DOCS-19990405", - "name": "W3C Document Notice and License (1999-04-05)", - "category": "Permissive Restricted", - "owner": "W3C - World Wide Web Consortium", + "key": "w3c-docs-19990405", + "short_name": "W3C-DOCS-19990405", + "name": "W3C Document Notice and License (1999-04-05)", + "category": "Permissive Restricted", + "owner": "W3C - World Wide Web Consortium", "homepage_url": "http://www.w3.org/Consortium/Legal/copyright-documents-19990405" - }, + }, { - "key": "gpl-2.0-library", - "short_name": "GPL 2.0 with Library exception", - "name": "GNU General Public License 2.0 with Library exception", - "category": "Copyleft Limited", - "owner": "Grammatica", - "is_exception": true, + "key": "gpl-2.0-library", + "short_name": "GPL 2.0 with Library exception", + "name": "GNU General Public License 2.0 with Library exception", + "category": "Copyleft Limited", + "owner": "Grammatica", + "is_exception": true, "other_urls": [ "http://grammatica.percederberg.net/index.html" ] - }, + }, { - "key": "bsd-ack-carrot2", - "short_name": "BSD Acknowledgment (Carrot2) License", - "name": "BSD Acknowledgment (Carrot2) License", - "category": "Permissive", - "owner": "Carrot2", - "homepage_url": "http://www.carrot2.org/carrot2.LICENSE", + "key": "bsd-ack-carrot2", + "short_name": "BSD Acknowledgment (Carrot2) License", + "name": "BSD Acknowledgment (Carrot2) License", + "category": "Permissive", + "owner": "Carrot2", + "homepage_url": "http://www.carrot2.org/carrot2.LICENSE", "minimum_coverage": 80 - }, + }, { - "key": "gpl-3.0-plus", - "short_name": "GPL 3.0 or later", - "name": "GNU General Public License 3.0 or later", - "category": "Copyleft", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/gpl-3.0-standalone.html", - "notes": "notes from SPDX:\nThis license was released: 29 June 2007 This license is OSI certified.", - "is_or_later": true, - "base_license": "gpl-3.0", + "key": "gpl-3.0-plus", + "short_name": "GPL 3.0 or later", + "name": "GNU General Public License 3.0 or later", + "category": "Copyleft", + "owner": "Free Software Foundation (FSF)", + "homepage_url": "http://www.gnu.org/licenses/gpl-3.0-standalone.html", + "notes": "notes from SPDX:\nThis license was released: 29 June 2007 This license is OSI certified.", + "is_or_later": true, + "base_license": "gpl-3.0", "spdx_license_key": "GPL-3.0+" - }, + }, { - "key": "apache-2.0", - "short_name": "Apache 2.0", - "name": "Apache License 2.0", - "category": "Permissive", - "owner": "Apache Software Foundation", - "homepage_url": "http://www.apache.org/licenses/", - "spdx_license_key": "Apache-2.0", + "key": "apache-2.0", + "short_name": "Apache 2.0", + "name": "Apache License 2.0", + "category": "Permissive", + "owner": "Apache Software Foundation", + "homepage_url": "http://www.apache.org/licenses/", + "spdx_license_key": "Apache-2.0", "text_urls": [ "http://www.apache.org/licenses/LICENSE-2.0" - ], - "osi_url": "http://opensource.org/licenses/apache2.0.php", + ], + "osi_url": "http://opensource.org/licenses/apache2.0.php", "faq_url": "http://www.apache.org/foundation/licence-FAQ.html" - }, + }, { - "key": "gpl-1.0-plus", - "short_name": "GPL 1.0 or later", - "name": "GNU General Public License 1.0 or later", - "category": "Copyleft", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html", - "notes": "notes from SPDX:\nThis license was released: February 1989.", - "next_version": "gpl-2.0-plus", - "is_or_later": true, - "base_license": "gpl-1.0", + "key": "gpl-1.0-plus", + "short_name": "GPL 1.0 or later", + "name": "GNU General Public License 1.0 or later", + "category": "Copyleft", + "owner": "Free Software Foundation (FSF)", + "homepage_url": "http://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html", + "notes": "notes from SPDX:\nThis license was released: February 1989.", + "next_version": "gpl-2.0-plus", + "is_or_later": true, + "base_license": "gpl-1.0", "spdx_license_key": "GPL-1.0+" - }, + }, { - "key": "gpl-2.0-plus", - "short_name": "GPL 2.0 or later", - "name": "GNU General Public License 2.0 or later", - "category": "Copyleft", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html", - "notes": "notes from SPDX:\nThis license was released: June 1991 This license is OSI certified.", - "next_version": "gpl-3.0-plus", - "is_or_later": true, - "base_license": "gpl-2.0", + "key": "gpl-2.0-plus", + "short_name": "GPL 2.0 or later", + "name": "GNU General Public License 2.0 or later", + "category": "Copyleft", + "owner": "Free Software Foundation (FSF)", + "homepage_url": "http://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html", + "notes": "notes from SPDX:\nThis license was released: June 1991 This license is OSI certified.", + "next_version": "gpl-3.0-plus", + "is_or_later": true, + "base_license": "gpl-2.0", "spdx_license_key": "GPL-2.0+" - }, + }, { - "key": "gpl-1.0", - "short_name": "GPL 1.0", - "name": "GNU General Public License 1.0", - "category": "Copyleft", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/gpl-1.0.html", - "notes": "notes from SPDX:\nThis license was released: February 1989.", - "next_version": "gpl-2.0", - "spdx_license_key": "GPL-1.0", + "key": "gpl-1.0", + "short_name": "GPL 1.0", + "name": "GNU General Public License 1.0", + "category": "Copyleft", + "owner": "Free Software Foundation (FSF)", + "homepage_url": "http://www.gnu.org/licenses/gpl-1.0.html", + "notes": "notes from SPDX:\nThis license was released: February 1989.", + "next_version": "gpl-2.0", + "spdx_license_key": "GPL-1.0", "text_urls": [ "http://www.gnu.org/licenses/gpl-1.0.txt" - ], - "faq_url": "http://www.gnu.org/licenses/gpl-faq.html", + ], + "faq_url": "http://www.gnu.org/licenses/gpl-faq.html", "other_urls": [ "http://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html" ] - }, + }, { - "key": "gpl-3.0", - "short_name": "GPL 3.0", - "name": "GNU General Public License 3.0", - "category": "Copyleft", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/gpl-3.0.html", - "notes": "notes from SPDX:\nThis license was released: 29 June 2007 This license is OSI certified.", - "spdx_license_key": "GPL-3.0", + "key": "gpl-3.0", + "short_name": "GPL 3.0", + "name": "GNU General Public License 3.0", + "category": "Copyleft", + "owner": "Free Software Foundation (FSF)", + "homepage_url": "http://www.gnu.org/licenses/gpl-3.0.html", + "notes": "notes from SPDX:\nThis license was released: 29 June 2007 This license is OSI certified.", + "spdx_license_key": "GPL-3.0", "text_urls": [ - "http://www.gnu.org/licenses/gpl-3.0.txt", + "http://www.gnu.org/licenses/gpl-3.0.txt", "http://www.gnu.org/licenses/gpl-3.0-standalone.html" - ], - "osi_url": "http://opensource.org/licenses/gpl-3.0.html", - "faq_url": "http://www.gnu.org/licenses/gpl-faq.html", + ], + "osi_url": "http://opensource.org/licenses/gpl-3.0.html", + "faq_url": "http://www.gnu.org/licenses/gpl-faq.html", "other_urls": [ "http://www.gnu.org/licenses/quick-guide-gplv3.html" ] - }, + }, { - "key": "gpl-2.0", - "short_name": "GPL 2.0", - "name": "GNU General Public License 2.0", - "category": "Copyleft", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/gpl-2.0.html", - "notes": "This is the last version of the GPL text as published by the FSF. This license was released: June 1991 This license is OSI certified.\n", - "next_version": "gpl-3.0", - "spdx_license_key": "GPL-2.0", + "key": "gpl-2.0", + "short_name": "GPL 2.0", + "name": "GNU General Public License 2.0", + "category": "Copyleft", + "owner": "Free Software Foundation (FSF)", + "homepage_url": "http://www.gnu.org/licenses/gpl-2.0.html", + "notes": "This is the last version of the GPL text as published by the FSF. This license was released: June 1991 This license is OSI certified.\n", + "next_version": "gpl-3.0", + "spdx_license_key": "GPL-2.0", "text_urls": [ - "http://www.gnu.org/licenses/gpl-2.0.txt", + "http://www.gnu.org/licenses/gpl-2.0.txt", "http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt" - ], - "osi_url": "http://opensource.org/licenses/gpl-license.php", - "faq_url": "http://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html", + ], + "osi_url": "http://opensource.org/licenses/gpl-license.php", + "faq_url": "http://www.gnu.org/licenses/old-licenses/gpl-2.0-faq.html", "other_urls": [ - "http://creativecommons.org/licenses/GPL/2.0/", - "http://creativecommons.org/choose/cc-gpl", - "http://creativecommons.org/images/public/cc-GPL-a.png", + "http://creativecommons.org/licenses/GPL/2.0/", + "http://creativecommons.org/choose/cc-gpl", + "http://creativecommons.org/images/public/cc-GPL-a.png", "http://creativecommons.org/licenses/GPL/2.0/legalcode.pt" ] } diff --git a/tests/licensedcode/data/models/rules.expected.json b/tests/licensedcode/data/models/rules.expected.json index a8e63110c3d..4f8e327f0cc 100644 --- a/tests/licensedcode/data/models/rules.expected.json +++ b/tests/licensedcode/data/models/rules.expected.json @@ -3,51 +3,51 @@ "licenses": [ "apache-2.0" ] - }, + }, { "licenses": [ "gpl-1.0" ] - }, + }, { "licenses": [ "gpl-1.0-plus" ] - }, + }, { "licenses": [ "gpl-2.0" ] - }, + }, { "licenses": [ "gpl-2.0-library" ] - }, + }, { "licenses": [ "gpl-2.0-plus" ] - }, + }, { "licenses": [ "gpl-3.0" ] - }, + }, { "licenses": [ "gpl-3.0-plus" ] - }, + }, { "licenses": [ "w3c-docs-19990405" ] - }, + }, { "licenses": [ "bsd-ack-carrot2" - ], + ], "minimum_coverage": 80 } ] \ No newline at end of file diff --git a/tests/licensedcode/data/perf/whatever.py b/tests/licensedcode/data/perf/whatever.py index 132cb692b3a..52e19fcc1e2 100644 --- a/tests/licensedcode/data/perf/whatever.py +++ b/tests/licensedcode/data/perf/whatever.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# flake8: noqa # # Stonith module for RILOE Stonith device # diff --git a/tests/licensedcode/license_test_utils.py b/tests/licensedcode/license_test_utils.py index 92be2126122..701a22fd176 100644 --- a/tests/licensedcode/license_test_utils.py +++ b/tests/licensedcode/license_test_utils.py @@ -40,13 +40,13 @@ unicode except NameError: # Python 3 - unicode = str - + unicode = str # NOQA """ License test utilities. """ + def make_license_test_function( expected_licenses, test_file, test_data_file, test_name, detect_negative=True, min_score=0, diff --git a/tests/licensedcode/test_cache.py b/tests/licensedcode/test_cache.py index 6dcaf2d0b4c..54b149b45d0 100644 --- a/tests/licensedcode/test_cache.py +++ b/tests/licensedcode/test_cache.py @@ -33,7 +33,7 @@ from commoncode import fileutils from commoncode import hash from licensedcode import cache - +from licensedcode.cache import get_license_cache_paths TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') @@ -113,197 +113,122 @@ def test_tree_checksum_is_different_when_file_is_removed(self): after = cache.tree_checksum(test_dir) assert before != after - def test_get_or_build_index_through_cache(self): + def test_build_index(self): # note: this is a rather complex test because caching involves some globals - license_index_cache_dir = self.get_temp_dir('index_cache') - _index_lock_file = os.path.join(license_index_cache_dir, 'lockfile') - _tree_checksum_file = os.path.join(license_index_cache_dir, 'tree_checksums') - _index_cache_file = os.path.join(license_index_cache_dir, 'index_cache') - - _tree_base_dir = self.get_temp_dir('src_dir') - - _licenses_dir = self.get_test_loc('cache/data', copy=True) - _licenses_data_dir = os.path.join(_licenses_dir, 'licenses') - _rules_data_dir = os.path.join(_licenses_dir, 'rules') + cache_dir = self.get_temp_dir('index_cache') + lock_file, checksum_file, cache_file = get_license_cache_paths(cache_dir=cache_dir) + tree_base_dir = self.get_temp_dir('src_dir') + licenses_data_dir = self.get_test_loc('cache/data/licenses', copy=True) + rules_data_dir = self.get_test_loc('cache/data/rules', copy=True) - _timeout = 10 + timeout = 10 - assert not os.path.exists(_tree_checksum_file) - assert not os.path.exists(_index_cache_file) - assert not os.path.exists(_index_lock_file) + assert not os.path.exists(checksum_file) + assert not os.path.exists(cache_file) + assert not os.path.exists(lock_file) check_consistency = True - return_index = False # when a new index is built, new index files are created - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - - assert os.path.exists(_tree_checksum_file) - assert os.path.exists(_index_cache_file) - assert not os.path.exists(_index_lock_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + + assert os.path.exists(checksum_file) + assert os.path.exists(cache_file) + assert not os.path.exists(lock_file) # when nothing changed a new index files is not created - tree_before = open(_tree_checksum_file).read() - idx_checksum_before = hash.sha1(_index_cache_file) - idx_date_before = date.get_file_mtime(_index_cache_file) - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - assert tree_before == open(_tree_checksum_file).read() - assert idx_checksum_before == hash.sha1(_index_cache_file) - assert idx_date_before == date.get_file_mtime(_index_cache_file) + tree_before = open(checksum_file).read() + idx_checksum_before = hash.sha1(cache_file) + idx_date_before = date.get_file_mtime(cache_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + assert tree_before == open(checksum_file).read() + assert idx_checksum_before == hash.sha1(cache_file) + assert idx_date_before == date.get_file_mtime(cache_file) # now add some file in the source tree - new_file = os.path.join(_tree_base_dir, 'some file') + new_file = os.path.join(tree_base_dir, 'some file') with open(new_file, 'wb') as nf: nf.write('somthing') # when check_consistency is False, the index is not rebuild when # new files are added check_consistency = False - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - assert tree_before == open(_tree_checksum_file).read() - assert idx_checksum_before == hash.sha1(_index_cache_file) - assert idx_date_before == date.get_file_mtime(_index_cache_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + assert tree_before == open(checksum_file).read() + assert idx_checksum_before == hash.sha1(cache_file) + assert idx_date_before == date.get_file_mtime(cache_file) # when check_consistency is True, the index is rebuilt when new # files are added check_consistency = True - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - assert tree_before != open(_tree_checksum_file).read() - assert idx_date_before != date.get_file_mtime(_index_cache_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + assert tree_before != open(checksum_file).read() + assert idx_date_before != date.get_file_mtime(cache_file) # now add some ignored file in the source tree - tree_before = open(_tree_checksum_file).read() - idx_checksum_before = hash.sha1(_index_cache_file) - idx_date_before = date.get_file_mtime(_index_cache_file) - new_file = os.path.join(_tree_base_dir, 'some file.pyc') + tree_before = open(checksum_file).read() + idx_checksum_before = hash.sha1(cache_file) + idx_date_before = date.get_file_mtime(cache_file) + new_file = os.path.join(tree_base_dir, 'some file.pyc') with open(new_file, 'wb') as nf: nf.write('somthing') check_consistency = True - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - - assert tree_before == open(_tree_checksum_file).read() - assert idx_checksum_before == hash.sha1(_index_cache_file) - assert idx_date_before == date.get_file_mtime(_index_cache_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + + assert tree_before == open(checksum_file).read() + assert idx_checksum_before == hash.sha1(cache_file) + assert idx_date_before == date.get_file_mtime(cache_file) # if the treechecksum file dies the index is rebuilt - fileutils.delete(_tree_checksum_file) - idx_checksum_before = hash.sha1(_index_cache_file) + fileutils.delete(checksum_file) + idx_checksum_before = hash.sha1(cache_file) check_consistency = False - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - - assert tree_before == open(_tree_checksum_file).read() - assert idx_date_before != date.get_file_mtime(_index_cache_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + + assert tree_before == open(checksum_file).read() + assert idx_date_before != date.get_file_mtime(cache_file) # if the index cache file dies the index is rebuilt - fileutils.delete(_index_cache_file) + fileutils.delete(cache_file) check_consistency = False - cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - - assert tree_before == open(_tree_checksum_file).read() - assert os.path.exists(_index_cache_file) + cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) - def test__load_index(self): - license_index_cache_dir = self.get_temp_dir('index_cache') - _index_lock_file = os.path.join(license_index_cache_dir, 'lockfile') - _tree_checksum_file = os.path.join(license_index_cache_dir, 'tree_checksums') - _index_cache_file = os.path.join(license_index_cache_dir, 'index_cache') + assert tree_before == open(checksum_file).read() + assert os.path.exists(cache_file) - _tree_base_dir = self.get_temp_dir('src_dir') + def test__load_index(self): + cache_dir = self.get_temp_dir('index_cache') - _licenses_dir = self.get_test_loc('cache/data', copy=True) - _licenses_data_dir = os.path.join(_licenses_dir, 'licenses') - _rules_data_dir = os.path.join(_licenses_dir, 'rules') + lock_file, checksum_file, cache_file = get_license_cache_paths(cache_dir=cache_dir) + tree_base_dir = self.get_temp_dir('src_dir') + licenses_data_dir = self.get_test_loc('cache/data/licenses', copy=True) + rules_data_dir = self.get_test_loc('cache/data/rules', copy=True) - _timeout = 10 + timeout = 10 - assert not os.path.exists(_tree_checksum_file) - assert not os.path.exists(_index_cache_file) - assert not os.path.exists(_index_lock_file) + assert not os.path.exists(checksum_file) + assert not os.path.exists(cache_file) + assert not os.path.exists(lock_file) check_consistency = True - return_index = True # Create a basic index - idx1 = cache.get_or_build_index_through_cache( - check_consistency, - return_index, - _tree_base_dir, - _tree_checksum_file, - _index_lock_file, - _index_cache_file, - _licenses_data_dir, - _rules_data_dir, - _timeout) - - assert os.path.exists(_tree_checksum_file) - assert os.path.exists(_index_cache_file) - assert not os.path.exists(_index_lock_file) - - idx2 = cache._load_index(_index_cache_file) + idx1 = cache.get_cached_index(cache_dir, check_consistency, timeout, + tree_base_dir, licenses_data_dir, rules_data_dir) + + assert os.path.exists(checksum_file) + assert os.path.exists(cache_file) + assert not os.path.exists(lock_file) + + idx2 = cache.load_index(cache_file) assert idx1.to_dict(True) == idx2.to_dict(True) diff --git a/tests/licensedcode/test_detect.py b/tests/licensedcode/test_detect.py index 11c80141213..5581f85c111 100644 --- a/tests/licensedcode/test_detect.py +++ b/tests/licensedcode/test_detect.py @@ -45,11 +45,11 @@ TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') - """ Test the core license detection mechanics. """ + class TestIndexMatch(FileBasedTesting): test_data_dir = TEST_DATA_DIR @@ -1018,7 +1018,7 @@ def test_match_texts_with_short_lgpl_and_gpl_notices(self): matches = idx.match(location=test_loc) assert 6 == len(matches) results = [m.matched_text(whole_lines=False) for m in matches] - expected =[ + expected = [ 'GNU General Public License (GPL', 'GNU Lesser General Public License (LGPL', 'GNU General Public License (GPL', diff --git a/tests/licensedcode/test_detection_datadriven.py b/tests/licensedcode/test_detection_datadriven.py index 359990f649d..4e3c3782b29 100644 --- a/tests/licensedcode/test_detection_datadriven.py +++ b/tests/licensedcode/test_detection_datadriven.py @@ -39,17 +39,16 @@ from license_test_utils import make_license_test_function - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data/licenses') # set to True to print matched texts on test failure. TRACE_TEXTS = True - """ Data-driven tests using expectations stored in YAML files. """ + class LicenseTest(object): """ A license detection test is used to verify that license detection works @@ -69,6 +68,7 @@ class LicenseTest(object): If the list of licenses is empty, then this test should not detect any license in the test file. """ + def __init__(self, data_file=None, test_file=None): self.data_file = data_file self.test_file = test_file diff --git a/tests/licensedcode/test_detection_validate.py b/tests/licensedcode/test_detection_validate.py index 364d21d1735..bfd83b9b1f3 100644 --- a/tests/licensedcode/test_detection_validate.py +++ b/tests/licensedcode/test_detection_validate.py @@ -35,11 +35,11 @@ from license_test_utils import make_license_test_function - """ Validate that each license text and each rule is properly detected. """ + def build_license_validation_tests(licenses_by_key, cls): """ Dynamically build an individual test method for each license texts in a licenses diff --git a/tests/licensedcode/test_index.py b/tests/licensedcode/test_index.py index c5f7a1536c0..efe1b3f53fc 100644 --- a/tests/licensedcode/test_index.py +++ b/tests/licensedcode/test_index.py @@ -39,7 +39,6 @@ from licensedcode.query import Query from licensedcode import match_seq - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') @@ -477,14 +476,6 @@ def test_match_with_template_and_multiple_rules(self): NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE """.split() -# q = Query(query_string=querys, idx=idx) - -# print('######################') -# print('######################') -# print('q=', querys.lower().replace('*', ' ').replace('/', ' '). split()) -# print('q2=', [None if t is None else idx.tokens_by_tid[t] for t in q.tokens_with_unknowns()]) -# print('######################') - qtext, itext = get_texts(match, query_string=querys, idx=idx) assert exp_qtext == qtext.split() diff --git a/tests/licensedcode/test_legal.py b/tests/licensedcode/test_legal.py index 215e4f47ead..3a896e4a5bf 100644 --- a/tests/licensedcode/test_legal.py +++ b/tests/licensedcode/test_legal.py @@ -31,7 +31,6 @@ from commoncode.testcase import FileBasedTesting from licensedcode import legal - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') diff --git a/tests/licensedcode/test_match.py b/tests/licensedcode/test_match.py index fdaee87f825..ff6109b86c0 100644 --- a/tests/licensedcode/test_match.py +++ b/tests/licensedcode/test_match.py @@ -39,7 +39,6 @@ from licensedcode.match import merge_matches from licensedcode.match import get_full_matched_text - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') @@ -715,6 +714,7 @@ def test_LicenseMatch_score_100_non_contiguous(self): m1 = LicenseMatch(rule=r1, qspan=Span(0, 19) | Span(30, 51), ispan=Span(0, 41)) assert m1.score() == 80.77 + class TestCollectLicenseMatchTexts(FileBasedTesting): test_data_dir = TEST_DATA_DIR diff --git a/tests/licensedcode/test_match_aho.py b/tests/licensedcode/test_match_aho.py index 19111531921..24786e58c3f 100644 --- a/tests/licensedcode/test_match_aho.py +++ b/tests/licensedcode/test_match_aho.py @@ -35,9 +35,9 @@ from licensedcode import models from licensedcode import query - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') + class TestMatchExact(FileBasedTesting): test_data_dir = TEST_DATA_DIR diff --git a/tests/licensedcode/test_match_hash.py b/tests/licensedcode/test_match_hash.py index 00476f919ec..69e41647b1b 100644 --- a/tests/licensedcode/test_match_hash.py +++ b/tests/licensedcode/test_match_hash.py @@ -36,11 +36,9 @@ from licensedcode import models from licensedcode import match_hash - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') - class TestHashMatch(FileBasedTesting): test_data_dir = TEST_DATA_DIR diff --git a/tests/licensedcode/test_match_seq.py b/tests/licensedcode/test_match_seq.py index bebfd81c7c1..9781d3878b2 100644 --- a/tests/licensedcode/test_match_seq.py +++ b/tests/licensedcode/test_match_seq.py @@ -36,9 +36,9 @@ from licensedcode.models import load_rules from licensedcode import match_seq - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') + class TestMatchSeq(FileBasedTesting): test_data_dir = TEST_DATA_DIR diff --git a/tests/licensedcode/test_models.py b/tests/licensedcode/test_models.py index 8bea4026ed5..95d57dd0e92 100644 --- a/tests/licensedcode/test_models.py +++ b/tests/licensedcode/test_models.py @@ -36,7 +36,6 @@ from licensedcode import index from licensedcode import models - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') @@ -91,6 +90,7 @@ def test_create_template_rule(self): assert 6 == test_rule.length def test_create_plain_rule_with_text_file(self): + def create_test_file(text): tf = self.get_temp_file() with open(tf, 'wb') as of: diff --git a/tests/licensedcode/test_performance.py b/tests/licensedcode/test_performance.py index 48e4a9a17f9..45c2fa7433b 100644 --- a/tests/licensedcode/test_performance.py +++ b/tests/licensedcode/test_performance.py @@ -35,12 +35,11 @@ from licensedcode import index from licensedcode import models - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') - # Instructions: Comment out the skip decorators to run a test. Do not commit without a skip + class TestMatchingPerf(FileBasedTesting): test_data_dir = TEST_DATA_DIR diff --git a/tests/licensedcode/test_query.py b/tests/licensedcode/test_query.py index 1b4fa3753eb..33f3ec9ec06 100644 --- a/tests/licensedcode/test_query.py +++ b/tests/licensedcode/test_query.py @@ -36,7 +36,6 @@ from licensedcode.models import Rule from licensedcode.query import Query - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') @@ -554,7 +553,7 @@ def test_query_run_and_tokenizing_breaking_works__with_plus_as_expected(self): result = [qr.to_dict() for qr in q.query_runs] expected = [ {'end': 121, 'start': 0, - 'tokens': + 'tokens': 'this library is free software you can redistribute it ' 'and or modify it under the terms of the gnu library ' 'general public license as published by the free software ' diff --git a/tests/licensedcode/test_tokenize.py b/tests/licensedcode/test_tokenize.py index 95c40f94079..5bdb6f19dae 100644 --- a/tests/licensedcode/test_tokenize.py +++ b/tests/licensedcode/test_tokenize.py @@ -44,7 +44,6 @@ from licensedcode.tokenize import matched_query_text_tokenizer from licensedcode.tokenize import tokens_and_non_tokens - TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') diff --git a/tests/packagedcode/data/pypi/setup.py b/tests/packagedcode/data/pypi/setup.py index b66230b99dd..90274e96609 100644 --- a/tests/packagedcode/data/pypi/setup.py +++ b/tests/packagedcode/data/pypi/setup.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- +# flake8: noqa from __future__ import absolute_import, print_function import io diff --git a/tests/packagedcode/data/pypi/setup2.py b/tests/packagedcode/data/pypi/setup2.py index b66230b99dd..90274e96609 100644 --- a/tests/packagedcode/data/pypi/setup2.py +++ b/tests/packagedcode/data/pypi/setup2.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- +# flake8: noqa from __future__ import absolute_import, print_function import io diff --git a/tests/packagedcode/packages_test_utils.py b/tests/packagedcode/packages_test_utils.py index 4419172f624..2e61491f40e 100644 --- a/tests/packagedcode/packages_test_utils.py +++ b/tests/packagedcode/packages_test_utils.py @@ -22,7 +22,8 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function from collections import OrderedDict import os.path @@ -50,7 +51,6 @@ def make_locations_relative(self, package_dict): package_dict[key] = values return package_dict - def check_package(self, package, expected_loc, regen=False, fix_locations=True): """ Helper to test a package object against an expected JSON file. diff --git a/tests/packagedcode/test_maven.py b/tests/packagedcode/test_maven.py index 1ef082236a3..8652a77644b 100644 --- a/tests/packagedcode/test_maven.py +++ b/tests/packagedcode/test_maven.py @@ -57,7 +57,7 @@ def test_is_pom_non_pom(self): def test_is_pom_maven2(self): test_dir = self.get_test_loc('maven2') - for test_file in fileutils.file_iter(test_dir): + for test_file in fileutils.resource_iter(test_dir, with_dirs=False): if test_file.endswith('.json'): continue @@ -70,7 +70,7 @@ def test_is_pom_not_misc2(self): def test_is_pom_m2(self): test_dir = self.get_test_loc('m2') - for test_file in fileutils.file_iter(test_dir): + for test_file in fileutils.resource_iter(test_dir, with_dirs=False): if test_file.endswith('.json'): continue @@ -404,11 +404,15 @@ def create_test_function(test_pom_loc, test_name, check_pom=True, regen=False): """ # closure on the test params if check_pom: + def test_pom(self): self.check_parse_pom(test_pom_loc, regen) + else: + def test_pom(self): self.check_parse_to_package(test_pom_loc, regen) + # set a proper function name to display in reports and use in discovery # function names are best as bytes if isinstance(test_name, unicode): @@ -438,24 +442,28 @@ def build_tests(test_dir, clazz, prefix='test_maven2_parse_', check_pom=True, re class TestMavenDataDrivenParsePom(BaseMavenCase): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + build_tests(test_dir='maven_misc/parse', clazz=TestMavenDataDrivenParsePom, prefix='test_maven2_parse_', check_pom=True, regen=False) class TestMavenDataDrivenParsePomBasic(BaseMavenCase): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + build_tests(test_dir='maven2', clazz=TestMavenDataDrivenParsePomBasic, prefix='test_maven2_basic_parse_', check_pom=True, regen=False) class TestMavenDataDrivenCreatePackageBasic(BaseMavenCase): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + build_tests(test_dir='maven2', clazz=TestMavenDataDrivenCreatePackageBasic, prefix='test_maven2_basic_package_', check_pom=False, regen=False) class TestMavenDataDrivenParsePomComprehensive(BaseMavenCase): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + # note: we use short dir names to deal with Windows long paths limitations build_tests(test_dir='m2', clazz=TestMavenDataDrivenParsePomComprehensive, prefix='test_maven2_parse', check_pom=True, regen=False) @@ -463,6 +471,7 @@ class TestMavenDataDrivenParsePomComprehensive(BaseMavenCase): class TestMavenDataDrivenCreatePackageComprehensive(BaseMavenCase): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + # note: we use short dir names to deal with Windows long paths limitations build_tests(test_dir='m2', clazz=TestMavenDataDrivenCreatePackageComprehensive, prefix='test_maven2_package', check_pom=False, regen=False) diff --git a/tests/packagedcode/test_nuget.py b/tests/packagedcode/test_nuget.py index 8d78c72783e..52c4681c0f0 100644 --- a/tests/packagedcode/test_nuget.py +++ b/tests/packagedcode/test_nuget.py @@ -22,14 +22,14 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function +from collections import OrderedDict import os.path from commoncode.testcase import FileBasedTesting - from packagedcode import nuget -from collections import OrderedDict class TestNuget(FileBasedTesting): @@ -138,5 +138,4 @@ def test_parse_creates_package_from_nuspec(self): ('legal_file_locations', []), ('license_expression', None), ('license_texts', []), ('notice_texts', []), ('dependencies', {}), ('related_packages', [])]) - assert expected == package.to_dict() diff --git a/tests/packagedcode/test_package_utils.py b/tests/packagedcode/test_package_utils.py index 34dd02a3473..ff1d5731f90 100644 --- a/tests/packagedcode/test_package_utils.py +++ b/tests/packagedcode/test_package_utils.py @@ -101,7 +101,7 @@ def test_parse_repo_url_13(self): test = 'git@gitlab.com:foo/private.git' expected = 'https://gitlab.com/foo/private.git' assert expected == parse_repo_url(test) - + def test_parse_git_repo_url_without_slash_slash(self): test = 'git@github.com/Filirom1/npm2aur.git' expected = 'https://github.com/Filirom1/npm2aur.git' diff --git a/tests/packagedcode/test_pypi.py b/tests/packagedcode/test_pypi.py index 4df781123f4..fbb4a3bd660 100644 --- a/tests/packagedcode/test_pypi.py +++ b/tests/packagedcode/test_pypi.py @@ -28,7 +28,6 @@ import os.path import shutil - from commoncode.testcase import FileBasedTesting from packagedcode import pypi diff --git a/tests/scancode/data/altpath/copyright.expected.json b/tests/scancode/data/altpath/copyright.expected.json index 38c437da23a..baf21fed85b 100644 --- a/tests/scancode/data/altpath/copyright.expected.json +++ b/tests/scancode/data/altpath/copyright.expected.json @@ -1,11 +1,11 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, "files_count": 1, "files": [ @@ -18,7 +18,6 @@ "size": 55, "sha1": "e2466d5b764d27fb301ceb439ffb5da22e43ab1d", "md5": "bdf7c572beb4094c2059508fa73c05a4", - "files_count": null, "mime_type": "text/plain", "file_type": "UTF-8 Unicode text, with no line terminators", "programming_language": "C", @@ -28,7 +27,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "copyrights": [ { "statements": [ @@ -41,7 +39,11 @@ "start_line": 1, "end_line": 1 } - ] + ], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/composer/composer.expected.json b/tests/scancode/data/composer/composer.expected.json index 13cdc7241b0..126dbc1d7d1 100644 --- a/tests/scancode/data/composer/composer.expected.json +++ b/tests/scancode/data/composer/composer.expected.json @@ -1,15 +1,14 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { - "--package": true, - "--license-score": 0, - "--format": "json" + "input": "", + "--json": "", + "--package": true }, "files_count": 1, "files": [ { - "path": "composer/composer.json", - "scan_errors": [], + "path": "composer.json", "packages": [ { "type": "phpcomposer", @@ -119,7 +118,8 @@ }, "related_packages": [] } - ] + ], + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/failing/patchelf.expected.json b/tests/scancode/data/failing/patchelf.expected.json index 9d803185f32..68f8b43fa6b 100644 --- a/tests/scancode/data/failing/patchelf.expected.json +++ b/tests/scancode/data/failing/patchelf.expected.json @@ -1,19 +1,19 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, "files_count": 1, "files": [ { "path": "patchelf.pdf", + "copyrights": [], "scan_errors": [ - "ERROR: copyrights: unpack requires a string argument of length 8" - ], - "copyrights": [] + "ERROR: for scanner: copyrights:\nerror: unpack requires a string argument of length 8\n" + ] } ] } \ No newline at end of file diff --git a/tests/scancode/data/help/help.txt b/tests/scancode/data/help/help.txt index 9ffd2a0e6ad..8030e231f4d 100644 --- a/tests/scancode/data/help/help.txt +++ b/tests/scancode/data/help/help.txt @@ -1,88 +1,122 @@ -Usage: scancode [OPTIONS] +Usage: scancode [OPTIONS] - scan the file or directory for origin clues and license and save - results to the . + scan the file or directory for license, origin and packages and save + results to FILE(s) using one or more ouput format option. - The scan results are printed to stdout if is not provided. - Error and progress is printed to stderr. + Error and progress are printed to stderr. Options: - scans: - -c, --copyright Scan for copyrights. [default] - -l, --license Scan for licenses. [default] - -p, --package Scan for packages. [default] - -e, --email Scan for emails. - -u, --url Scan for urls. - -i, --info Include information such as size, type, etc. - --license-score INTEGER Do not return license matches with scores lower + primary scans: + -l, --license Scan for licenses. + -p, --package Scan for packages. + -c, --copyright Scan for copyrights. + + other scans: + -i, --info Scan for file information (size, type, checksums, etc). + -e, --email Scan for emails. + -u, --url Scan for urls. + + scan options: + --license-diag Include diagnostic information in license scan + results. + --license-score INTEGER Do not return license matches with a score lower than this score. A number between 0 and 100. [default: 0] - --license-text Include the detected licenses matched text. Has - no effect unless --license is requested. + --license-text Include the detected licenses matched text. --license-url-template TEXT Set the template URL used for the license - reference URLs. In a template URL, curly braces - ({}) are replaced by the license key. [default: h - ttps://enterprise.dejacode.com/urn/urn:dje:licens - e:{}] - - output: - --strip-root Strip the root directory segment of all paths. The - default is to always include the last directory segment - of the scanned path such that all paths have a common - root directory. This cannot be combined with `--full- - root` option. - --full-root Report full, absolute paths. The default is to always - include the last directory segment of the scanned path - such that all paths have a common root directory. This - cannot be combined with the `--strip-root` option. - -f, --format Set format to one of: csv, html, html- - app, json, json-pp, jsonlines, spdx-rdf, spdx-tv or use - as the path to a custom template file - [default: json] - --verbose Print verbose file-by-file progress messages. - --quiet Do not print summary or progress messages. + reference URLs. Curly braces ({}) are replaced by + the license key. [default: https://enterprise.de + jacode.com/urn/urn:dje:license:{}] - pre-scan: - --ignore Ignore files matching . + output formats: + --json FILE Write scan output as compact JSON to FILE. + --json-pp FILE Write scan output as pretty-printed JSON to FILE. + --json-lines FILE Write scan output as JSON Lines to FILE. + --output-csv FILE Write scan output as CSV to FILE. + --output-html FILE Write scan output as HTML to FILE. + --output-custom FILE Write scan output to FILE formatted with the custom + Jinja template file. + --custom-template FILE Use this Jinja template FILE as a custom template. + --output-html-app FILE Write scan output as a mini HTML application to FILE. + --output-spdx-rdf FILE Write scan output as SPDX RDF to FILE. + --output-spdx-tv FILE Write scan output as SPDX Tag/Value to FILE. - post-scan: - --mark-source Set the "is_source" flag to true for directories that contain - over 90% of source files as direct children. Has no effect - unless the --info scan is requested. + output filters: --only-findings Only return files or directories with findings for the requested scans. Files and directories without findings are - omitted (not considering basic file information as findings). + omitted (file information is not treated as findings). + + output control: + --full-root Report full, absolute paths. The default is to always include + the last directory segment of the scanned path such that all + paths have a common root directory. + --strip-root Strip the root directory segment of all paths. The default is to + always include the last directory segment of the scanned path + such that all paths have a common root directory. - misc: - --reindex-licenses Force a check and possible reindexing of the cached - license index. + pre-scan: + --ignore Ignore files matching . + + post-scan: + --mark-source Set the "is_source" to true for directories that contain over + 90% of source files as children and descendants. Count the + number of source files in a directory as a new + source_file_counts attribute core: - -h, --help Show this message and exit. - -n, --processes INTEGER Scan using n parallel processes. [default: - 1] - --examples Show command examples and exit. - --about Show information about ScanCode and licensing and - exit. - --version Show the version and exit. - --diag Include additional diagnostic information such as - error messages or result details. - --timeout FLOAT Stop scanning a file if scanning takes longer than a - timeout in seconds. [default: 120] + --timeout Stop an unfinished file scan after a timeout in + seconds. [default: 120 seconds] + -n, --processes INT Set the number of parallel processes to use. Disable + parallel processing if 0. Also disable threading if + -1. [default: 1] + --quiet Do not print summary or progress. + --verbose Print progress as file-by-file path instead of a + progress bar. Print a verbose scan summary. + --cache-dir DIR Set the path to an existing directory where ScanCode + can cache files available across runs.If not set, the + value of the `SCANCODE_CACHE` environment variable is + used if available. If `SCANCODE_CACHE` is not set, a + default sub-directory in the user home directory is + used instead. [default: ~/.cache/scancode-tk/version] + --temp-dir DIR Set the path to an existing directory where ScanCode + can create temporary files. If not set, the value of + the `SCANCODE_TMP` environment variable is used if + available. If `SCANCODE_TMP` is not set, a default + sub-directory in the system temp directory is used + instead. [default: TMP/scancode-tk-] + --timing Collect scan timing for each scan/scanned file. + --max-in-memory INTEGER Maximum number of files and directories scan details + kept in memory during a scan. Additional files and + directories scan details above this number are cached + on-disk rather than in memory. Use 0 to use unlimited + memory and disable on-disk caching. Use -1 to use + only on-disk caching. [default: 10000] + + miscellaneous: + --reindex-licenses Check the license index cache and reindex if needed and + exit. + --test-mode Run ScanCode in a special "test mode". Only for testing. + + documentation: + -h, --help Show this message and exit. + --about Show information about ScanCode and licensing and exit. + --version Show the version and exit. + --examples Show command examples and exit. + --plugins Show the list of available ScanCode plugins and exit. Examples (use --examples for more): Scan the 'samples' directory for licenses and copyrights. - Save scan results to a JSON file: + Save scan results to the 'scancode_result.json' JSON file: - scancode --format json samples scancode_result.json + scancode --license --copyright --json=scancode_result.json samples - Scan the 'samples' directory for licenses and copyrights. Save scan results to - an HTML app file for interactive web browser results navigation. Additional app - files are saved to the 'myscan_files' directory: + Scan the 'samples' directory for licenses and package manifests. Print scan + results on screen as pretty-formatted JSON (using the special '-' FILE to print + to on screen/to stdout): - scancode --format html-app samples myscan.html + scancode --json-pp - --license --package samples Note: when you run scancode, a progress bar is displayed with a counter of the number of files processed. Use --verbose to display file-by-file diff --git a/tests/scancode/data/info/all.expected.json b/tests/scancode/data/info/all.expected.json index 37abfe5779f..dc7b27cb980 100644 --- a/tests/scancode/data/info/all.expected.json +++ b/tests/scancode/data/info/all.expected.json @@ -1,14 +1,14 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--license": true, + "--strip-root": true }, - "files_count": 11, + "files_count": 6, "files": [ { "path": "basic", @@ -16,11 +16,10 @@ "name": "basic", "base_name": "basic", "extension": "", + "size": 0, "date": null, - "size": 57066, "sha1": null, "md5": null, - "files_count": 6, "mime_type": null, "file_type": null, "programming_language": null, @@ -30,9 +29,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 6, + "dirs_count": 4, + "size_count": 57066, + "scan_errors": [] }, { "path": "basic/dbase.fdt", @@ -40,11 +42,10 @@ "name": "dbase.fdt", "base_name": "dbase", "extension": ".fdt", - "date": "2015-06-19", "size": 183, + "date": "2015-06-19", "sha1": "94f059d6478d5f460e79b6b948a7fa9849d4dfdc", "md5": "15240737ec72b9e88b485a663bd045f9", - "files_count": null, "mime_type": "application/octet-stream", "file_type": "data", "programming_language": null, @@ -54,9 +55,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir", @@ -64,11 +68,10 @@ "name": "dir", "base_name": "dir", "extension": "", + "size": 0, "date": null, - "size": 18486, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -78,9 +81,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 2, + "dirs_count": 1, + "size_count": 18486, + "scan_errors": [] }, { "path": "basic/dir/e.tar", @@ -88,11 +94,10 @@ "name": "e.tar", "base_name": "e", "extension": ".tar", - "date": "2015-06-19", "size": 10240, + "date": "2015-06-19", "sha1": "b655d4fc48e19910b9ab7a7102a8c051a39818f1", "md5": "393e789f4e4b2be93a46d0619380b445", - "files_count": null, "mime_type": "application/x-tar", "file_type": "POSIX tar archive (GNU)", "programming_language": null, @@ -102,9 +107,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir/subdir", @@ -112,11 +120,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 8246, "sha1": null, "md5": null, - "files_count": 1, "mime_type": null, "file_type": null, "programming_language": null, @@ -126,9 +133,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 1, + "dirs_count": 0, + "size_count": 8246, + "scan_errors": [] }, { "path": "basic/dir/subdir/a.aif", @@ -136,11 +146,10 @@ "name": "a.aif", "base_name": "a", "extension": ".aif", - "date": "2015-06-19", "size": 8246, + "date": "2015-06-19", "sha1": "03cfd28bb49d1dab105dcce1663630be1ddd821a", "md5": "b2b073a64e4d568ce7b641c1857a7116", - "files_count": null, "mime_type": "audio/x-aiff", "file_type": "IFF data, AIFF audio", "programming_language": null, @@ -150,9 +159,12 @@ "is_media": true, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir2", @@ -160,11 +172,10 @@ "name": "dir2", "base_name": "dir2", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -174,9 +185,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 2, + "dirs_count": 1, + "size_count": 36457, + "scan_errors": [] }, { "path": "basic/dir2/subdir", @@ -184,11 +198,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -198,9 +211,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 2, + "dirs_count": 0, + "size_count": 36457, + "scan_errors": [] }, { "path": "basic/dir2/subdir/bcopy.s", @@ -208,11 +224,10 @@ "name": "bcopy.s", "base_name": "bcopy", "extension": ".s", - "date": "2015-06-19", "size": 32452, + "date": "2015-06-19", "sha1": "99f20eafc7b7e1c8e21bced55c8cdd05339bec77", "md5": "e1c66adaf6b8aa90e348668ac4869a61", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text, with CRLF line terminators", "programming_language": "GAS", @@ -222,7 +237,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [ { "key": "bsd-original-uc", @@ -267,7 +281,11 @@ "start_line": 34, "end_line": 37 } - ] + ], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir2/subdir/config.conf", @@ -275,11 +293,10 @@ "name": "config.conf", "base_name": "config", "extension": ".conf", - "date": "2015-06-19", "size": 4005, + "date": "2015-06-19", "sha1": "4d567dc15d2117445389edfb64ced872329bceaa", "md5": "107dd38273ab10ce12058a3c8977e4ee", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with CRLF line terminators", "programming_language": null, @@ -289,9 +306,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/main.c", @@ -299,11 +319,10 @@ "name": "main.c", "base_name": "main", "extension": ".c", - "date": "2015-06-19", "size": 1940, + "date": "2015-06-19", "sha1": "35017ed9762bdc9d16054a40ceefdda8b362083a", "md5": "8d0a3b3fe1c96a49af2a66040193291b", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text", "programming_language": "C", @@ -313,7 +332,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "licenses": [ { "key": "gpl-2.0", @@ -372,7 +390,11 @@ "start_line": 2, "end_line": 3 } - ] + ], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/info/all.rooted.expected.json b/tests/scancode/data/info/all.rooted.expected.json index 54aae79eab5..0552a6bfe29 100644 --- a/tests/scancode/data/info/all.rooted.expected.json +++ b/tests/scancode/data/info/all.rooted.expected.json @@ -1,82 +1,89 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, "--email": true, - "--url": true, - "--license-score": 0, - "--format": "json" + "--json": "", + "--license": true, + "--url": true }, - "files_count": 11, + "files_count": 6, "files": [ + { + "path": "basic.tgz", + "licenses": [], + "copyrights": [], + "emails": [], + "urls": [], + "scan_errors": [] + }, { "path": "basic.tgz/basic", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dbase.fdt", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir/e.tar", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir/subdir", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir/subdir/a.aif", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir2", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir2/subdir", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], - "urls": [] + "urls": [], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir2/subdir/bcopy.s", - "scan_errors": [], "licenses": [ { "key": "bsd-original-uc", @@ -135,11 +142,11 @@ "start_line": 17, "end_line": 17 } - ] + ], + "scan_errors": [] }, { "path": "basic.tgz/basic/dir2/subdir/config.conf", - "scan_errors": [], "licenses": [], "copyrights": [], "emails": [], @@ -149,11 +156,11 @@ "start_line": 2, "end_line": 2 } - ] + ], + "scan_errors": [] }, { "path": "basic.tgz/basic/main.c", - "scan_errors": [], "licenses": [ { "key": "gpl-2.0", @@ -220,7 +227,8 @@ "end_line": 3 } ], - "urls": [] + "urls": [], + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/info/basic.expected.json b/tests/scancode/data/info/basic.expected.json index 3b419812f9d..bc955859f59 100644 --- a/tests/scancode/data/info/basic.expected.json +++ b/tests/scancode/data/info/basic.expected.json @@ -1,12 +1,12 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, - "files_count": 11, + "files_count": 6, "files": [ { "path": "basic", @@ -14,11 +14,10 @@ "name": "basic", "base_name": "basic", "extension": "", + "size": 0, "date": null, - "size": 57066, "sha1": null, "md5": null, - "files_count": 6, "mime_type": null, "file_type": null, "programming_language": null, @@ -28,6 +27,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 6, + "dirs_count": 4, + "size_count": 57066, "scan_errors": [] }, { @@ -36,11 +38,10 @@ "name": "dbase.fdt", "base_name": "dbase", "extension": ".fdt", - "date": "2015-06-19", "size": 183, + "date": "2015-06-19", "sha1": "94f059d6478d5f460e79b6b948a7fa9849d4dfdc", "md5": "15240737ec72b9e88b485a663bd045f9", - "files_count": null, "mime_type": "application/octet-stream", "file_type": "data", "programming_language": null, @@ -50,6 +51,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -58,11 +62,10 @@ "name": "dir", "base_name": "dir", "extension": "", + "size": 0, "date": null, - "size": 18486, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -72,6 +75,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 2, + "dirs_count": 1, + "size_count": 18486, "scan_errors": [] }, { @@ -80,11 +86,10 @@ "name": "e.tar", "base_name": "e", "extension": ".tar", - "date": "2015-06-19", "size": 10240, + "date": "2015-06-19", "sha1": "b655d4fc48e19910b9ab7a7102a8c051a39818f1", "md5": "393e789f4e4b2be93a46d0619380b445", - "files_count": null, "mime_type": "application/x-tar", "file_type": "POSIX tar archive (GNU)", "programming_language": null, @@ -94,6 +99,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -102,11 +110,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 8246, "sha1": null, "md5": null, - "files_count": 1, "mime_type": null, "file_type": null, "programming_language": null, @@ -116,6 +123,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 1, + "dirs_count": 0, + "size_count": 8246, "scan_errors": [] }, { @@ -124,11 +134,10 @@ "name": "a.aif", "base_name": "a", "extension": ".aif", - "date": "2015-06-19", "size": 8246, + "date": "2015-06-19", "sha1": "03cfd28bb49d1dab105dcce1663630be1ddd821a", "md5": "b2b073a64e4d568ce7b641c1857a7116", - "files_count": null, "mime_type": "audio/x-aiff", "file_type": "IFF data, AIFF audio", "programming_language": null, @@ -138,6 +147,9 @@ "is_media": true, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -146,11 +158,10 @@ "name": "dir2", "base_name": "dir2", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -160,6 +171,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 2, + "dirs_count": 1, + "size_count": 36457, "scan_errors": [] }, { @@ -168,11 +182,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -182,6 +195,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 2, + "dirs_count": 0, + "size_count": 36457, "scan_errors": [] }, { @@ -190,11 +206,10 @@ "name": "bcopy.s", "base_name": "bcopy", "extension": ".s", - "date": "2015-06-19", "size": 32452, + "date": "2015-06-19", "sha1": "99f20eafc7b7e1c8e21bced55c8cdd05339bec77", "md5": "e1c66adaf6b8aa90e348668ac4869a61", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text, with CRLF line terminators", "programming_language": "GAS", @@ -204,6 +219,9 @@ "is_media": false, "is_source": true, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -212,11 +230,10 @@ "name": "config.conf", "base_name": "config", "extension": ".conf", - "date": "2015-06-19", "size": 4005, + "date": "2015-06-19", "sha1": "4d567dc15d2117445389edfb64ced872329bceaa", "md5": "107dd38273ab10ce12058a3c8977e4ee", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with CRLF line terminators", "programming_language": null, @@ -226,6 +243,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -234,11 +254,10 @@ "name": "main.c", "base_name": "main", "extension": ".c", - "date": "2015-06-19", "size": 1940, + "date": "2015-06-19", "sha1": "35017ed9762bdc9d16054a40ceefdda8b362083a", "md5": "8d0a3b3fe1c96a49af2a66040193291b", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text", "programming_language": "C", @@ -248,6 +267,9 @@ "is_media": false, "is_source": true, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] } ] diff --git a/tests/scancode/data/info/basic.rooted.expected.json b/tests/scancode/data/info/basic.rooted.expected.json index 677472d2bb0..cbb8333b1d3 100644 --- a/tests/scancode/data/info/basic.rooted.expected.json +++ b/tests/scancode/data/info/basic.rooted.expected.json @@ -1,23 +1,46 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--info": true, - "--license-score": 0, - "--format": "json" + "--json": "" }, - "files_count": 11, + "files_count": 6, "files": [ + { + "path": "basic.tgz", + "type": "directory", + "name": "basic.tgz", + "base_name": "basic.tgz", + "extension": "", + "size": 0, + "date": null, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "files_count": 6, + "dirs_count": 5, + "size_count": 57066, + "scan_errors": [] + }, { "path": "basic.tgz/basic", "type": "directory", "name": "basic", "base_name": "basic", "extension": "", + "size": 0, "date": null, - "size": 57066, "sha1": null, "md5": null, - "files_count": 6, "mime_type": null, "file_type": null, "programming_language": null, @@ -27,6 +50,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 6, + "dirs_count": 4, + "size_count": 57066, "scan_errors": [] }, { @@ -35,11 +61,10 @@ "name": "dbase.fdt", "base_name": "dbase", "extension": ".fdt", - "date": "2015-06-19", "size": 183, + "date": "2015-06-19", "sha1": "94f059d6478d5f460e79b6b948a7fa9849d4dfdc", "md5": "15240737ec72b9e88b485a663bd045f9", - "files_count": null, "mime_type": "application/octet-stream", "file_type": "data", "programming_language": null, @@ -49,6 +74,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -57,11 +85,10 @@ "name": "dir", "base_name": "dir", "extension": "", + "size": 0, "date": null, - "size": 18486, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -71,6 +98,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 2, + "dirs_count": 1, + "size_count": 18486, "scan_errors": [] }, { @@ -79,11 +109,10 @@ "name": "e.tar", "base_name": "e", "extension": ".tar", - "date": "2015-06-19", "size": 10240, + "date": "2015-06-19", "sha1": "b655d4fc48e19910b9ab7a7102a8c051a39818f1", "md5": "393e789f4e4b2be93a46d0619380b445", - "files_count": null, "mime_type": "application/x-tar", "file_type": "POSIX tar archive (GNU)", "programming_language": null, @@ -93,6 +122,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -101,11 +133,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 8246, "sha1": null, "md5": null, - "files_count": 1, "mime_type": null, "file_type": null, "programming_language": null, @@ -115,6 +146,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 1, + "dirs_count": 0, + "size_count": 8246, "scan_errors": [] }, { @@ -123,11 +157,10 @@ "name": "a.aif", "base_name": "a", "extension": ".aif", - "date": "2015-06-19", "size": 8246, + "date": "2015-06-19", "sha1": "03cfd28bb49d1dab105dcce1663630be1ddd821a", "md5": "b2b073a64e4d568ce7b641c1857a7116", - "files_count": null, "mime_type": "audio/x-aiff", "file_type": "IFF data, AIFF audio", "programming_language": null, @@ -137,6 +170,9 @@ "is_media": true, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -145,11 +181,10 @@ "name": "dir2", "base_name": "dir2", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -159,6 +194,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 2, + "dirs_count": 1, + "size_count": 36457, "scan_errors": [] }, { @@ -167,11 +205,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -181,6 +218,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 2, + "dirs_count": 0, + "size_count": 36457, "scan_errors": [] }, { @@ -189,11 +229,10 @@ "name": "bcopy.s", "base_name": "bcopy", "extension": ".s", - "date": "2015-06-19", "size": 32452, + "date": "2015-06-19", "sha1": "99f20eafc7b7e1c8e21bced55c8cdd05339bec77", "md5": "e1c66adaf6b8aa90e348668ac4869a61", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text, with CRLF line terminators", "programming_language": "GAS", @@ -203,6 +242,9 @@ "is_media": false, "is_source": true, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -211,11 +253,10 @@ "name": "config.conf", "base_name": "config", "extension": ".conf", - "date": "2015-06-19", "size": 4005, + "date": "2015-06-19", "sha1": "4d567dc15d2117445389edfb64ced872329bceaa", "md5": "107dd38273ab10ce12058a3c8977e4ee", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with CRLF line terminators", "programming_language": null, @@ -225,6 +266,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -233,11 +277,10 @@ "name": "main.c", "base_name": "main", "extension": ".c", - "date": "2015-06-19", "size": 1940, + "date": "2015-06-19", "sha1": "35017ed9762bdc9d16054a40ceefdda8b362083a", "md5": "8d0a3b3fe1c96a49af2a66040193291b", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text", "programming_language": "C", @@ -247,6 +290,9 @@ "is_media": false, "is_source": true, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] } ] diff --git a/tests/scancode/data/info/email_url_info.expected.json b/tests/scancode/data/info/email_url_info.expected.json index 0a200dae894..bbc1ac28045 100644 --- a/tests/scancode/data/info/email_url_info.expected.json +++ b/tests/scancode/data/info/email_url_info.expected.json @@ -1,14 +1,14 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--email": true, - "--url": true, "--info": true, - "--license-score": 0, + "--json": "", "--strip-root": true, - "--format": "json" + "--url": true }, - "files_count": 11, + "files_count": 6, "files": [ { "path": "basic", @@ -16,11 +16,10 @@ "name": "basic", "base_name": "basic", "extension": "", + "size": 0, "date": null, - "size": 57066, "sha1": null, "md5": null, - "files_count": 6, "mime_type": null, "file_type": null, "programming_language": null, @@ -30,9 +29,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 6, + "dirs_count": 4, + "size_count": 57066, + "scan_errors": [] }, { "path": "basic/dbase.fdt", @@ -40,11 +42,10 @@ "name": "dbase.fdt", "base_name": "dbase", "extension": ".fdt", - "date": "2015-06-19", "size": 183, + "date": "2015-06-19", "sha1": "94f059d6478d5f460e79b6b948a7fa9849d4dfdc", "md5": "15240737ec72b9e88b485a663bd045f9", - "files_count": null, "mime_type": "application/octet-stream", "file_type": "data", "programming_language": null, @@ -54,9 +55,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir", @@ -64,11 +68,10 @@ "name": "dir", "base_name": "dir", "extension": "", + "size": 0, "date": null, - "size": 18486, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -78,9 +81,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 2, + "dirs_count": 1, + "size_count": 18486, + "scan_errors": [] }, { "path": "basic/dir/e.tar", @@ -88,11 +94,10 @@ "name": "e.tar", "base_name": "e", "extension": ".tar", - "date": "2015-06-19", "size": 10240, + "date": "2015-06-19", "sha1": "b655d4fc48e19910b9ab7a7102a8c051a39818f1", "md5": "393e789f4e4b2be93a46d0619380b445", - "files_count": null, "mime_type": "application/x-tar", "file_type": "POSIX tar archive (GNU)", "programming_language": null, @@ -102,9 +107,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir/subdir", @@ -112,11 +120,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 8246, "sha1": null, "md5": null, - "files_count": 1, "mime_type": null, "file_type": null, "programming_language": null, @@ -126,9 +133,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 1, + "dirs_count": 0, + "size_count": 8246, + "scan_errors": [] }, { "path": "basic/dir/subdir/a.aif", @@ -136,11 +146,10 @@ "name": "a.aif", "base_name": "a", "extension": ".aif", - "date": "2015-06-19", "size": 8246, + "date": "2015-06-19", "sha1": "03cfd28bb49d1dab105dcce1663630be1ddd821a", "md5": "b2b073a64e4d568ce7b641c1857a7116", - "files_count": null, "mime_type": "audio/x-aiff", "file_type": "IFF data, AIFF audio", "programming_language": null, @@ -150,9 +159,12 @@ "is_media": true, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir2", @@ -160,11 +172,10 @@ "name": "dir2", "base_name": "dir2", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -174,9 +185,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 2, + "dirs_count": 1, + "size_count": 36457, + "scan_errors": [] }, { "path": "basic/dir2/subdir", @@ -184,11 +198,10 @@ "name": "subdir", "base_name": "subdir", "extension": "", + "size": 0, "date": null, - "size": 36457, "sha1": null, "md5": null, - "files_count": 2, "mime_type": null, "file_type": null, "programming_language": null, @@ -198,9 +211,12 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 2, + "dirs_count": 0, + "size_count": 36457, + "scan_errors": [] }, { "path": "basic/dir2/subdir/bcopy.s", @@ -208,11 +224,10 @@ "name": "bcopy.s", "base_name": "bcopy", "extension": ".s", - "date": "2015-06-19", "size": 32452, + "date": "2015-06-19", "sha1": "99f20eafc7b7e1c8e21bced55c8cdd05339bec77", "md5": "e1c66adaf6b8aa90e348668ac4869a61", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text, with CRLF line terminators", "programming_language": "GAS", @@ -222,7 +237,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "emails": [ { "email": "ws@tools.de", @@ -236,7 +250,11 @@ "start_line": 17, "end_line": 17 } - ] + ], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/dir2/subdir/config.conf", @@ -244,11 +262,10 @@ "name": "config.conf", "base_name": "config", "extension": ".conf", - "date": "2015-06-19", "size": 4005, + "date": "2015-06-19", "sha1": "4d567dc15d2117445389edfb64ced872329bceaa", "md5": "107dd38273ab10ce12058a3c8977e4ee", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with CRLF line terminators", "programming_language": null, @@ -258,7 +275,6 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "emails": [], "urls": [ { @@ -266,7 +282,11 @@ "start_line": 2, "end_line": 2 } - ] + ], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "basic/main.c", @@ -274,11 +294,10 @@ "name": "main.c", "base_name": "main", "extension": ".c", - "date": "2015-06-19", "size": 1940, + "date": "2015-06-19", "sha1": "35017ed9762bdc9d16054a40ceefdda8b362083a", "md5": "8d0a3b3fe1c96a49af2a66040193291b", - "files_count": null, "mime_type": "text/x-c", "file_type": "C source, ASCII text", "programming_language": "C", @@ -288,7 +307,6 @@ "is_media": false, "is_source": true, "is_script": false, - "scan_errors": [], "emails": [ { "email": "j@w1.fi", @@ -296,7 +314,11 @@ "end_line": 3 } ], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/license_text/test.expected b/tests/scancode/data/license_text/test.expected index 73635377faa..12eda46c42e 100644 --- a/tests/scancode/data/license_text/test.expected +++ b/tests/scancode/data/license_text/test.expected @@ -1,17 +1,16 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", + "--json": "", "--license": true, - "--license-score": 0, "--license-text": true, - "--strip-root": true, - "--format": "json" + "--strip-root": true }, "files_count": 1, "files": [ { "path": "test.txt", - "scan_errors": [], "licenses": [ { "key": "lgpl-2.1", @@ -35,7 +34,8 @@ }, "matched_text": "license: LGPL-2.1" } - ] + ], + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/mark_source/with_info.expected.json b/tests/scancode/data/mark_source/with_info.expected.json deleted file mode 100644 index 20479ac7cb1..00000000000 --- a/tests/scancode/data/mark_source/with_info.expected.json +++ /dev/null @@ -1,343 +0,0 @@ -{ - "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", - "scancode_version": "2.0.1.post147.63a9004c0.dirty.20170806064809", - "scancode_options": { - "--info": true, - "--license-score": 0, - "--format": "json", - "--mark-source": true - }, - "files_count": 15, - "files": [ - { - "path": "JGroups.tgz/JGroups", - "type": "directory", - "name": "JGroups", - "base_name": "JGroups", - "extension": "", - "date": null, - "size": 206642, - "sha1": null, - "md5": null, - "files_count": 12, - "mime_type": null, - "file_type": null, - "programming_language": null, - "is_binary": false, - "is_text": false, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src", - "type": "directory", - "name": "src", - "base_name": "src", - "extension": "", - "date": null, - "size": 152090, - "sha1": null, - "md5": null, - "files_count": 7, - "mime_type": null, - "file_type": null, - "programming_language": null, - "is_binary": false, - "is_text": false, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses", - "type": "directory", - "name": "licenses", - "base_name": "licenses", - "extension": "", - "date": null, - "size": 54552, - "sha1": null, - "md5": null, - "files_count": 5, - "mime_type": null, - "file_type": null, - "programming_language": null, - "is_binary": false, - "is_text": false, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/S3_PING.java", - "type": "file", - "name": "S3_PING.java", - "base_name": "S3_PING", - "extension": ".java", - "date": "2017-08-05", - "size": 122528, - "sha1": "08dba9986f69719970ead3592dc565465164df0d", - "md5": "83d8324f37d0e3f120bc89865cf0bd39", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/RouterStubManager.java", - "type": "file", - "name": "RouterStubManager.java", - "base_name": "RouterStubManager", - "extension": ".java", - "date": "2017-08-05", - "size": 8162, - "sha1": "eb419dc94cfe11ca318a3e743a7f9f080e70c751", - "md5": "20bee9631b7c82a45c250e095352aec7", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/RouterStub.java", - "type": "file", - "name": "RouterStub.java", - "base_name": "RouterStub", - "extension": ".java", - "date": "2017-08-05", - "size": 9913, - "sha1": "c1f6818f8ee7bddcc9f444bc94c099729d716d52", - "md5": "eecfe23494acbcd8088c93bc1e83c7f2", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/RATE_LIMITER.java", - "type": "file", - "name": "RATE_LIMITER.java", - "base_name": "RATE_LIMITER", - "extension": ".java", - "date": "2017-08-05", - "size": 3692, - "sha1": "a8087e5d50da3273536ebda9b87b77aa4ff55deb", - "md5": "4626bdbc48871b55513e1a12991c61a8", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/ImmutableReference.java", - "type": "file", - "name": "ImmutableReference.java", - "base_name": "ImmutableReference", - "extension": ".java", - "date": "2017-08-05", - "size": 1838, - "sha1": "30f56b876d5576d9869e2c5c509b08db57110592", - "md5": "48ca3c72fb9a65c771a321222f118b88", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/GuardedBy.java", - "type": "file", - "name": "GuardedBy.java", - "base_name": "GuardedBy", - "extension": ".java", - "date": "2017-08-05", - "size": 813, - "sha1": "981d67087e65e9a44957c026d4b10817cf77d966", - "md5": "c5064400f759d3e81771005051d17dc1", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/src/FixedMembershipToken.java", - "type": "file", - "name": "FixedMembershipToken.java", - "base_name": "FixedMembershipToken", - "extension": ".java", - "date": "2017-08-05", - "size": 5144, - "sha1": "5901f73dcc78155a1a2c7b5663a3a11fba400b19", - "md5": "aca9640ec8beee21b098bcf8ecc91442", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": "Java", - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": true, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/lgpl.txt", - "type": "file", - "name": "lgpl.txt", - "base_name": "lgpl", - "extension": ".txt", - "date": "2017-08-05", - "size": 26934, - "sha1": "8f1a637d2e2ed1bdb9eb01a7dccb5c12cc0557e1", - "md5": "f14599a2f089f6ff8c97e2baa4e3d575", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text, with CRLF line terminators", - "programming_language": null, - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/cpl-1.0.txt", - "type": "file", - "name": "cpl-1.0.txt", - "base_name": "cpl-1.0", - "extension": ".txt", - "date": "2017-08-05", - "size": 11987, - "sha1": "681cf776bcd79752543d42490ec7ed22a29fd888", - "md5": "9a6d2c9ae73d59eb3dd38e3909750d14", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text, with CRLF line terminators", - "programming_language": null, - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/bouncycastle.txt", - "type": "file", - "name": "bouncycastle.txt", - "base_name": "bouncycastle", - "extension": ".txt", - "date": "2017-08-05", - "size": 1186, - "sha1": "74facb0e9a734479f9cd893b5be3fe1bf651b760", - "md5": "9fffd8de865a5705969f62b128381f85", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text", - "programming_language": null, - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/apache-2.0.txt", - "type": "file", - "name": "apache-2.0.txt", - "base_name": "apache-2.0", - "extension": ".txt", - "date": "2017-08-05", - "size": 11560, - "sha1": "47b573e3824cd5e02a1a3ae99e2735b49e0256e4", - "md5": "d273d63619c9aeaf15cdaf76422c4f87", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text, with CRLF line terminators", - "programming_language": null, - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/apache-1.1.txt", - "type": "file", - "name": "apache-1.1.txt", - "base_name": "apache-1.1", - "extension": ".txt", - "date": "2017-08-05", - "size": 2885, - "sha1": "6b5608d35c3e304532af43db8bbfc5947bef46a6", - "md5": "276982197c941f4cbf3d218546e17ae2", - "files_count": null, - "mime_type": "text/plain", - "file_type": "ASCII text, with CRLF line terminators", - "programming_language": null, - "is_binary": false, - "is_text": true, - "is_archive": false, - "is_media": false, - "is_source": false, - "is_script": false, - "scan_errors": [] - } - ] -} diff --git a/tests/scancode/data/mark_source/without_info.expected.json b/tests/scancode/data/mark_source/without_info.expected.json deleted file mode 100644 index 718179f0c2e..00000000000 --- a/tests/scancode/data/mark_source/without_info.expected.json +++ /dev/null @@ -1,540 +0,0 @@ -{ - "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", - "scancode_options": { - "--copyright": true, - "--license": true, - "--package": true, - "--license-score": 0, - "--format": "json", - "--mark-source": true - }, - "files_count": 15, - "files": [ - { - "path": "JGroups.tgz/JGroups", - "scan_errors": [], - "licenses": [], - "copyrights": [], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses", - "scan_errors": [], - "licenses": [], - "copyrights": [], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/apache-1.1.txt", - "scan_errors": [], - "licenses": [ - { - "key": "apache-1.1", - "score": 100.0, - "short_name": "Apache 1.1", - "category": "Permissive", - "owner": "Apache Software Foundation", - "homepage_url": "http://www.apache.org/licenses/", - "text_url": "http://apache.org/licenses/LICENSE-1.1", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:apache-1.1", - "spdx_license_key": "Apache-1.1", - "spdx_url": "https://spdx.org/licenses/Apache-1.1", - "start_line": 2, - "end_line": 56, - "matched_rule": { - "identifier": "apache-1.1.SPDX.RULE", - "license_choice": false, - "licenses": [ - "apache-1.1" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright (c) 2000 The Apache Software Foundation." - ], - "holders": [ - "The Apache Software Foundation." - ], - "authors": [], - "start_line": 4, - "end_line": 5 - }, - { - "statements": [], - "holders": [], - "authors": [ - "the Apache Software Foundation" - ], - "start_line": 20, - "end_line": 23 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/apache-2.0.txt", - "scan_errors": [], - "licenses": [ - { - "key": "apache-2.0", - "score": 100.0, - "short_name": "Apache 2.0", - "category": "Permissive", - "owner": "Apache Software Foundation", - "homepage_url": "http://www.apache.org/licenses/", - "text_url": "http://www.apache.org/licenses/LICENSE-2.0", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:apache-2.0", - "spdx_license_key": "Apache-2.0", - "spdx_url": "https://spdx.org/licenses/Apache-2.0", - "start_line": 2, - "end_line": 202, - "matched_rule": { - "identifier": "apache-2.0_easyeclipse.RULE", - "license_choice": false, - "licenses": [ - "apache-2.0" - ] - } - } - ], - "copyrights": [], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/bouncycastle.txt", - "scan_errors": [], - "licenses": [ - { - "key": "mit", - "score": 100.0, - "short_name": "MIT License", - "category": "Permissive", - "owner": "MIT", - "homepage_url": "http://opensource.org/licenses/mit-license.php", - "text_url": "http://opensource.org/licenses/mit-license.php", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:mit", - "spdx_license_key": "MIT", - "spdx_url": "https://spdx.org/licenses/MIT", - "start_line": 7, - "end_line": 18, - "matched_rule": { - "identifier": "mit.LICENSE", - "license_choice": false, - "licenses": [ - "mit" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright (c) 2000 - 2006 The Legion Of The Bouncy Castle (http://www.bouncycastle.org)" - ], - "holders": [ - "Legion Of The Bouncy Castle (http://www.bouncycastle.org)" - ], - "authors": [], - "start_line": 5, - "end_line": 5 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/cpl-1.0.txt", - "scan_errors": [], - "licenses": [ - { - "key": "cpl-1.0", - "score": 99.94, - "short_name": "CPL 1.0", - "category": "Copyleft Limited", - "owner": "IBM", - "homepage_url": "http://www.eclipse.org/legal/cpl-v10.html", - "text_url": "http://www.eclipse.org/legal/cpl-v10.html", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:cpl-1.0", - "spdx_license_key": "CPL-1.0", - "spdx_url": "https://spdx.org/licenses/CPL-1.0", - "start_line": 1, - "end_line": 212, - "matched_rule": { - "identifier": "cpl-1.0.SPDX.RULE", - "license_choice": false, - "licenses": [ - "cpl-1.0" - ] - } - } - ], - "copyrights": [], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/licenses/lgpl.txt", - "scan_errors": [], - "licenses": [ - { - "key": "lgpl-2.1-plus", - "score": 100.0, - "short_name": "LGPL 2.1 or later", - "category": "Copyleft Limited", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "text_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:lgpl-2.1-plus", - "spdx_license_key": "LGPL-2.1+", - "spdx_url": "https://spdx.org/licenses/LGPL-2.1", - "start_line": 1, - "end_line": 502, - "matched_rule": { - "identifier": "lgpl-2.1-plus_2.RULE", - "license_choice": false, - "licenses": [ - "lgpl-2.1-plus" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright (c) 1991, 1999 Free Software Foundation, Inc." - ], - "holders": [ - "Free Software Foundation, Inc." - ], - "authors": [], - "start_line": 4, - "end_line": 7 - }, - { - "statements": [ - "copyrighted by the Free Software Foundation" - ], - "holders": [ - "the Free Software Foundation" - ], - "authors": [], - "start_line": 427, - "end_line": 433 - }, - { - "statements": [], - "holders": [], - "authors": [ - "James Random Hacker." - ], - "start_line": 496, - "end_line": 497 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src", - "scan_errors": [], - "licenses": [], - "copyrights": [], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/FixedMembershipToken.java", - "scan_errors": [], - "licenses": [ - { - "key": "lgpl-2.1-plus", - "score": 100.0, - "short_name": "LGPL 2.1 or later", - "category": "Copyleft Limited", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "text_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:lgpl-2.1-plus", - "spdx_license_key": "LGPL-2.1+", - "spdx_url": "https://spdx.org/licenses/LGPL-2.1", - "start_line": 7, - "end_line": 20, - "matched_rule": { - "identifier": "lgpl-2.1-plus_59.RULE", - "license_choice": false, - "licenses": [ - "lgpl-2.1-plus" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright 2005, JBoss Inc." - ], - "holders": [ - "JBoss Inc." - ], - "authors": [], - "start_line": 2, - "end_line": 5 - }, - { - "statements": [], - "holders": [], - "authors": [ - "Chris Mills (millsy@jboss.com)" - ], - "start_line": 51, - "end_line": 51 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/GuardedBy.java", - "scan_errors": [], - "licenses": [ - { - "key": "cc-by-2.5", - "score": 70.0, - "short_name": "CC-BY-2.5", - "category": "Permissive", - "owner": "Creative Commons", - "homepage_url": "http://creativecommons.org/licenses/by/2.5/", - "text_url": "http://creativecommons.org/licenses/by/2.5/legalcode", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:cc-by-2.5", - "spdx_license_key": "CC-BY-2.5", - "spdx_url": "https://spdx.org/licenses/CC-BY-2.5", - "start_line": 10, - "end_line": 11, - "matched_rule": { - "identifier": "cc-by-2.5_4.RULE", - "license_choice": false, - "licenses": [ - "cc-by-2.5" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright (c) 2005 Brian Goetz and Tim Peierls" - ], - "holders": [ - "Brian Goetz and Tim Peierls" - ], - "authors": [], - "start_line": 9, - "end_line": 12 - }, - { - "statements": [], - "holders": [], - "authors": [ - "Bela Ban" - ], - "start_line": 14, - "end_line": 17 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/ImmutableReference.java", - "scan_errors": [], - "licenses": [ - { - "key": "lgpl-2.1-plus", - "score": 100.0, - "short_name": "LGPL 2.1 or later", - "category": "Copyleft Limited", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "text_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:lgpl-2.1-plus", - "spdx_license_key": "LGPL-2.1+", - "spdx_url": "https://spdx.org/licenses/LGPL-2.1", - "start_line": 7, - "end_line": 20, - "matched_rule": { - "identifier": "lgpl-2.1-plus_59.RULE", - "license_choice": false, - "licenses": [ - "lgpl-2.1-plus" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright 2010, Red Hat, Inc." - ], - "holders": [ - "Red Hat, Inc." - ], - "authors": [], - "start_line": 2, - "end_line": 5 - }, - { - "statements": [], - "holders": [], - "authors": [ - "Brian Stansberry" - ], - "start_line": 29, - "end_line": 29 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/RATE_LIMITER.java", - "scan_errors": [], - "licenses": [], - "copyrights": [ - { - "statements": [], - "holders": [], - "authors": [ - "Bela Ban" - ], - "start_line": 14, - "end_line": 17 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/RouterStub.java", - "scan_errors": [], - "licenses": [], - "copyrights": [ - { - "statements": [], - "holders": [], - "authors": [ - "Bela Ban" - ], - "start_line": 22, - "end_line": 24 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/RouterStubManager.java", - "scan_errors": [], - "licenses": [ - { - "key": "lgpl-2.1-plus", - "score": 100.0, - "short_name": "LGPL 2.1 or later", - "category": "Copyleft Limited", - "owner": "Free Software Foundation (FSF)", - "homepage_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "text_url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:lgpl-2.1-plus", - "spdx_license_key": "LGPL-2.1+", - "spdx_url": "https://spdx.org/licenses/LGPL-2.1", - "start_line": 7, - "end_line": 20, - "matched_rule": { - "identifier": "lgpl-2.1-plus_59.RULE", - "license_choice": false, - "licenses": [ - "lgpl-2.1-plus" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright 2009, Red Hat Middleware LLC" - ], - "holders": [ - "Red Hat Middleware LLC" - ], - "authors": [], - "start_line": 2, - "end_line": 5 - } - ], - "packages": [] - }, - { - "path": "JGroups.tgz/JGroups/src/S3_PING.java", - "scan_errors": [], - "licenses": [ - { - "key": "public-domain", - "score": 10.0, - "short_name": "Public Domain", - "category": "Public Domain", - "owner": "Unspecified", - "homepage_url": "http://www.linfo.org/publicdomain.html", - "text_url": "", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:public-domain", - "spdx_license_key": "", - "spdx_url": "", - "start_line": 1649, - "end_line": 1649, - "matched_rule": { - "identifier": "public-domain.LICENSE", - "license_choice": false, - "licenses": [ - "public-domain" - ] - } - }, - { - "key": "public-domain", - "score": 10.0, - "short_name": "Public Domain", - "category": "Public Domain", - "owner": "Unspecified", - "homepage_url": "http://www.linfo.org/publicdomain.html", - "text_url": "", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:public-domain", - "spdx_license_key": "", - "spdx_url": "", - "start_line": 1692, - "end_line": 1692, - "matched_rule": { - "identifier": "public-domain.LICENSE", - "license_choice": false, - "licenses": [ - "public-domain" - ] - } - } - ], - "copyrights": [ - { - "statements": [], - "holders": [], - "authors": [ - "Bela Ban" - ], - "start_line": 35, - "end_line": 38 - }, - { - "statements": [], - "holders": [], - "authors": [ - "Robert Harder", - "rob@iharder.net" - ], - "start_line": 1697, - "end_line": 1700 - } - ], - "packages": [] - } - ] -} \ No newline at end of file diff --git a/tests/scancode/data/non_utf8/expected-linux.json b/tests/scancode/data/non_utf8/expected-linux.json index b6ab979a358..1c936f53042 100644 --- a/tests/scancode/data/non_utf8/expected-linux.json +++ b/tests/scancode/data/non_utf8/expected-linux.json @@ -1,12 +1,12 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, - "files_count": 19, + "files_count": 18, "files": [ { "path": "non_unicode", @@ -14,11 +14,10 @@ "name": "non_unicode", "base_name": "non_unicode", "extension": "", - "date": null, "size": 0, + "date": null, "sha1": null, "md5": null, - "files_count": 18, "mime_type": null, "file_type": null, "programming_language": null, @@ -28,6 +27,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 18, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -36,11 +38,10 @@ "name": "foo\udcb1bar", "base_name": "foo\udcb1bar", "extension": "", - "date": "2017-07-14", "size": 0, + "date": "2017-07-14", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -50,6 +51,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -58,11 +62,10 @@ "name": "non_ascii_-\u00e0\u00f2\u0258\u0141\u011f", "base_name": "non_ascii_-\u00e0\u00f2\u0258\u0141\u011f", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -72,6 +75,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -80,11 +86,10 @@ "name": "non_ascii_10_\u0e01", "base_name": "non_ascii_10_\u0e01", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -94,6 +99,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -102,11 +110,10 @@ "name": "non_ascii_11_\u00a0", "base_name": "non_ascii_11_\u00a0", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -116,6 +123,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -124,11 +134,10 @@ "name": "non_ascii_12_\u20ac", "base_name": "non_ascii_12_\u20ac", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -138,6 +147,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -146,11 +158,10 @@ "name": "non_ascii_2_\u00e6", "base_name": "non_ascii_2_\u00e6", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -160,6 +171,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -168,11 +182,10 @@ "name": "non_ascii_3_\u0130", "base_name": "non_ascii_3_\u0130", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -182,6 +195,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -190,11 +206,10 @@ "name": "non_ascii_4_\u0141", "base_name": "non_ascii_4_\u0141", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -204,6 +219,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -212,11 +230,10 @@ "name": "non_ascii_5_\u03c6", "base_name": "non_ascii_5_\u03c6", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -226,6 +243,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -234,11 +254,10 @@ "name": "non_ascii_6_\u041a", "base_name": "non_ascii_6_\u041a", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -248,6 +267,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -256,11 +278,10 @@ "name": "non_ascii_7_\u05d0", "base_name": "non_ascii_7_\u05d0", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -270,6 +291,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -278,11 +302,10 @@ "name": "non_ascii_8_\u060c", "base_name": "non_ascii_8_\u060c", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -292,6 +315,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -300,11 +326,10 @@ "name": "non_ascii_9_\u062a", "base_name": "non_ascii_9_\u062a", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -314,6 +339,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -322,11 +350,10 @@ "name": "non_cp12_decodable_\udc81\udc98", "base_name": "non_cp12_decodable_\udc81\udc98", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -336,6 +363,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -344,11 +374,10 @@ "name": "non_cp932_decodable_\udce7w\udcf0", "base_name": "non_cp932_decodable_\udce7w\udcf0", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -358,6 +387,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -366,11 +398,10 @@ "name": "non_utf8_decodable_2_\udced\udcb2\udc80", "base_name": "non_utf8_decodable_2_\udced\udcb2\udc80", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -380,6 +411,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -388,11 +422,10 @@ "name": "non_utf8_decodable_3_\udced\udcb4\udc80", "base_name": "non_utf8_decodable_3_\udced\udcb4\udc80", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -402,6 +435,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] }, { @@ -410,11 +446,10 @@ "name": "non_utf8_decodable_\udcff", "base_name": "non_utf8_decodable_\udcff", "extension": "", - "date": "2017-08-27", "size": 0, + "date": "2017-08-27", "sha1": null, "md5": null, - "files_count": null, "mime_type": "inode/x-empty", "file_type": "empty", "programming_language": null, @@ -424,6 +459,9 @@ "is_media": false, "is_source": false, "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "scan_errors": [] } ] diff --git a/tests/scancode/data/non_utf8/expected-mac.json b/tests/scancode/data/non_utf8/expected-mac.json index 9ea7d26c4ef..6dbfabdabeb 100644 --- a/tests/scancode/data/non_utf8/expected-mac.json +++ b/tests/scancode/data/non_utf8/expected-mac.json @@ -1,34 +1,36 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--info": true, - "--format": "json", - "--license-score": 0, + "--json": "", "--strip-root": true }, - "files_count": 19, + "files_count": 18, "files": [ { "path": "non_unicode", + "type": "directory", + "name": "non_unicode", "base_name": "non_unicode", - "date": null, "extension": "", - "file_type": null, + "date": null, + "size": 0, + "sha1": null, + "md5": null, "files_count": 18, - "is_archive": false, + "dirs_count": 0, + "size_count": 0, + "mime_type": null, + "file_type": null, + "programming_language": null, "is_binary": false, + "is_text": false, + "is_archive": false, "is_media": false, - "is_script": false, "is_source": false, - "is_text": false, - "md5": null, - "mime_type": null, - "name": "non_unicode", - "programming_language": null, - "scan_errors": [], - "sha1": null, - "size": 0, - "type": "directory" + "is_script": false, + "scan_errors": [] }, { "path": "non_unicode/foo%B1bar", @@ -36,7 +38,9 @@ "date": "2017-07-14", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -57,7 +61,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -79,7 +85,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -101,7 +109,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -123,7 +133,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -145,7 +157,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -167,7 +181,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -189,7 +205,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -211,7 +229,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -233,7 +253,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -255,7 +277,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -277,7 +301,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -299,7 +325,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -321,7 +349,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -343,7 +373,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -365,7 +397,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -387,7 +421,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -409,7 +445,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, diff --git a/tests/scancode/data/non_utf8/expected-win.json b/tests/scancode/data/non_utf8/expected-win.json index b5e12bcd430..7b58f6536d0 100644 --- a/tests/scancode/data/non_utf8/expected-win.json +++ b/tests/scancode/data/non_utf8/expected-win.json @@ -1,12 +1,12 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { - "--format": "json", + "input": "", "--info": true, - "--license-score": 0, + "--json": "", "--strip-root": true }, - "files_count": 19, + "files_count": 18, "files": [ { "path": "non_unicode", @@ -15,6 +15,8 @@ "extension": "", "file_type": null, "files_count": 18, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -36,7 +38,9 @@ "date": "2017-07-14", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -57,7 +61,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -79,7 +85,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -101,7 +109,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -123,7 +133,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -145,7 +157,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -167,7 +181,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -189,7 +205,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -211,7 +229,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -233,7 +253,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -255,7 +277,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -277,7 +301,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -299,7 +325,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -321,7 +349,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -343,7 +373,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -365,7 +397,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -387,7 +421,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, @@ -409,7 +445,9 @@ "date": "2017-08-27", "extension": "", "file_type": "empty", - "files_count": null, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, "is_archive": false, "is_binary": false, "is_media": false, diff --git a/tests/scancode/data/ignore/user.tgz b/tests/scancode/data/plugin_ignore/user.tgz similarity index 100% rename from tests/scancode/data/ignore/user.tgz rename to tests/scancode/data/plugin_ignore/user.tgz diff --git a/tests/scancode/data/ignore/vcs.tgz b/tests/scancode/data/plugin_ignore/vcs.tgz similarity index 100% rename from tests/scancode/data/ignore/vcs.tgz rename to tests/scancode/data/plugin_ignore/vcs.tgz diff --git a/tests/scancode/data/plugin_license/license_url.expected.json b/tests/scancode/data/plugin_license/license_url.expected.json new file mode 100644 index 00000000000..8d8dcf7366d --- /dev/null +++ b/tests/scancode/data/plugin_license/license_url.expected.json @@ -0,0 +1,67 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--json-pp": "", + "--license": true, + "--license-url-template": "https://example.com/urn:{}" + }, + "files_count": 1, + "files": [ + { + "path": "license_url", + "licenses": [], + "scan_errors": [] + }, + { + "path": "license_url/apache-1.0.txt", + "licenses": [ + { + "key": "apache-1.0", + "score": 100.0, + "short_name": "Apache 1.0", + "category": "Permissive", + "owner": "Apache Software Foundation", + "homepage_url": "http://www.apache.org/licenses/", + "text_url": "http://www.apache.org/licenses/LICENSE-1.0", + "reference_url": "https://example.com/urn:apache-1.0", + "spdx_license_key": "Apache-1.0", + "spdx_url": "https://spdx.org/licenses/Apache-1.0", + "start_line": 2, + "end_line": 54, + "matched_rule": { + "identifier": "apache-1.0_group_template2.RULE", + "license_choice": false, + "licenses": [ + "apache-1.0", + "public-domain" + ] + } + }, + { + "key": "public-domain", + "score": 100.0, + "short_name": "Public Domain", + "category": "Public Domain", + "owner": "Unspecified", + "homepage_url": "http://www.linfo.org/publicdomain.html", + "text_url": "", + "reference_url": "https://example.com/urn:public-domain", + "spdx_license_key": "", + "spdx_url": "", + "start_line": 2, + "end_line": 54, + "matched_rule": { + "identifier": "apache-1.0_group_template2.RULE", + "license_choice": false, + "licenses": [ + "apache-1.0", + "public-domain" + ] + } + } + ], + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/license_url/apache-1.0.txt b/tests/scancode/data/plugin_license/license_url/apache-1.0.txt similarity index 100% rename from tests/scancode/data/license_url/apache-1.0.txt rename to tests/scancode/data/plugin_license/license_url/apache-1.0.txt diff --git a/tests/scancode/data/mark_source/JGroups.tgz b/tests/scancode/data/plugin_mark_source/JGroups.tgz similarity index 100% rename from tests/scancode/data/mark_source/JGroups.tgz rename to tests/scancode/data/plugin_mark_source/JGroups.tgz diff --git a/tests/scancode/data/plugin_mark_source/with_info.expected.json b/tests/scancode/data/plugin_mark_source/with_info.expected.json new file mode 100644 index 00000000000..ab0b6745412 --- /dev/null +++ b/tests/scancode/data/plugin_mark_source/with_info.expected.json @@ -0,0 +1,412 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--info": true, + "--json": "", + "--mark-source": true + }, + "files_count": 12, + "files": [ + { + "path": "JGroups.tgz", + "type": "directory", + "name": "JGroups.tgz", + "base_name": "JGroups.tgz", + "extension": "", + "size": 0, + "date": null, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 12, + "dirs_count": 3, + "size_count": 206642, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups", + "type": "directory", + "name": "JGroups", + "base_name": "JGroups", + "extension": "", + "size": 0, + "date": null, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 12, + "dirs_count": 2, + "size_count": 206642, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/licenses", + "type": "directory", + "name": "licenses", + "base_name": "licenses", + "extension": "", + "size": 0, + "date": null, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 5, + "dirs_count": 0, + "size_count": 54552, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/licenses/apache-1.1.txt", + "type": "file", + "name": "apache-1.1.txt", + "base_name": "apache-1.1", + "extension": ".txt", + "size": 2885, + "date": "2017-08-05", + "sha1": "6b5608d35c3e304532af43db8bbfc5947bef46a6", + "md5": "276982197c941f4cbf3d218546e17ae2", + "mime_type": "text/plain", + "file_type": "ASCII text, with CRLF line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/licenses/apache-2.0.txt", + "type": "file", + "name": "apache-2.0.txt", + "base_name": "apache-2.0", + "extension": ".txt", + "size": 11560, + "date": "2017-08-05", + "sha1": "47b573e3824cd5e02a1a3ae99e2735b49e0256e4", + "md5": "d273d63619c9aeaf15cdaf76422c4f87", + "mime_type": "text/plain", + "file_type": "ASCII text, with CRLF line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/licenses/bouncycastle.txt", + "type": "file", + "name": "bouncycastle.txt", + "base_name": "bouncycastle", + "extension": ".txt", + "size": 1186, + "date": "2017-08-05", + "sha1": "74facb0e9a734479f9cd893b5be3fe1bf651b760", + "md5": "9fffd8de865a5705969f62b128381f85", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/licenses/cpl-1.0.txt", + "type": "file", + "name": "cpl-1.0.txt", + "base_name": "cpl-1.0", + "extension": ".txt", + "size": 11987, + "date": "2017-08-05", + "sha1": "681cf776bcd79752543d42490ec7ed22a29fd888", + "md5": "9a6d2c9ae73d59eb3dd38e3909750d14", + "mime_type": "text/plain", + "file_type": "ASCII text, with CRLF line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/licenses/lgpl.txt", + "type": "file", + "name": "lgpl.txt", + "base_name": "lgpl", + "extension": ".txt", + "size": 26934, + "date": "2017-08-05", + "sha1": "8f1a637d2e2ed1bdb9eb01a7dccb5c12cc0557e1", + "md5": "f14599a2f089f6ff8c97e2baa4e3d575", + "mime_type": "text/plain", + "file_type": "ASCII text, with CRLF line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src", + "type": "directory", + "name": "src", + "base_name": "src", + "extension": "", + "size": 0, + "date": null, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 7, + "files_count": 7, + "dirs_count": 0, + "size_count": 152090, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/FixedMembershipToken.java", + "type": "file", + "name": "FixedMembershipToken.java", + "base_name": "FixedMembershipToken", + "extension": ".java", + "size": 5144, + "date": "2017-08-05", + "sha1": "5901f73dcc78155a1a2c7b5663a3a11fba400b19", + "md5": "aca9640ec8beee21b098bcf8ecc91442", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/GuardedBy.java", + "type": "file", + "name": "GuardedBy.java", + "base_name": "GuardedBy", + "extension": ".java", + "size": 813, + "date": "2017-08-05", + "sha1": "981d67087e65e9a44957c026d4b10817cf77d966", + "md5": "c5064400f759d3e81771005051d17dc1", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/ImmutableReference.java", + "type": "file", + "name": "ImmutableReference.java", + "base_name": "ImmutableReference", + "extension": ".java", + "size": 1838, + "date": "2017-08-05", + "sha1": "30f56b876d5576d9869e2c5c509b08db57110592", + "md5": "48ca3c72fb9a65c771a321222f118b88", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/RATE_LIMITER.java", + "type": "file", + "name": "RATE_LIMITER.java", + "base_name": "RATE_LIMITER", + "extension": ".java", + "size": 3692, + "date": "2017-08-05", + "sha1": "a8087e5d50da3273536ebda9b87b77aa4ff55deb", + "md5": "4626bdbc48871b55513e1a12991c61a8", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/RouterStub.java", + "type": "file", + "name": "RouterStub.java", + "base_name": "RouterStub", + "extension": ".java", + "size": 9913, + "date": "2017-08-05", + "sha1": "c1f6818f8ee7bddcc9f444bc94c099729d716d52", + "md5": "eecfe23494acbcd8088c93bc1e83c7f2", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/RouterStubManager.java", + "type": "file", + "name": "RouterStubManager.java", + "base_name": "RouterStubManager", + "extension": ".java", + "size": 8162, + "date": "2017-08-05", + "sha1": "eb419dc94cfe11ca318a3e743a7f9f080e70c751", + "md5": "20bee9631b7c82a45c250e095352aec7", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "JGroups.tgz/JGroups/src/S3_PING.java", + "type": "file", + "name": "S3_PING.java", + "base_name": "S3_PING", + "extension": ".java", + "size": 122528, + "date": "2017-08-05", + "sha1": "08dba9986f69719970ead3592dc565465164df0d", + "md5": "83d8324f37d0e3f120bc89865cf0bd39", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "Java", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "source_count": 0, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/plugin_only_findings/basic.tgz b/tests/scancode/data/plugin_only_findings/basic.tgz new file mode 100644 index 00000000000..e9a24f937b4 Binary files /dev/null and b/tests/scancode/data/plugin_only_findings/basic.tgz differ diff --git a/tests/scancode/data/plugin_only_findings/errors.expected.json b/tests/scancode/data/plugin_only_findings/errors.expected.json new file mode 100644 index 00000000000..224f1684a03 --- /dev/null +++ b/tests/scancode/data/plugin_only_findings/errors.expected.json @@ -0,0 +1,39 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--info": true, + "--json-pp": "", + "--only-findings": true, + "--package": true + }, + "files_count": 1, + "files": [ + { + "path": "errors/package.json", + "type": "file", + "name": "package.json", + "base_name": "package", + "extension": ".json", + "size": 2264, + "sha1": "a749017a2d1b53aeb780dc0f66292e37bb6c0d25", + "md5": "ed579407b7aa99bcf4b12e1a6ea1c4ae", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "JSON", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [ + "ERROR: for scanner: packages:\nValueError: Expecting ':' delimiter: line 5 column 12 (char 143)\n" + ] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/plugin_only_findings/errors/illegal.pom.xml b/tests/scancode/data/plugin_only_findings/errors/illegal.pom.xml new file mode 100644 index 00000000000..c9ad800ffce --- /dev/null +++ b/tests/scancode/data/plugin_only_findings/errors/illegal.pom.xml @@ -0,0 +1,21 @@ + + + 4.0.0 + + + + net.bytebuddy + byte-buddy-maven-plugin + + + + net.bytebuddy.test.IllegalTransformPlugin + + + + + + + diff --git a/tests/scancode/data/plugin_only_findings/errors/origin.ABOUT b/tests/scancode/data/plugin_only_findings/errors/origin.ABOUT new file mode 100644 index 00000000000..a583e2a11c5 --- /dev/null +++ b/tests/scancode/data/plugin_only_findings/errors/origin.ABOUT @@ -0,0 +1,3 @@ +date: 2017-05-24 +notes: malformed test Maven POMs from byte-buddy/byte-buddy-maven-plugin/src/test/resources/net/bytebuddy/test/ +download_url: https://github.com/raphw/byte-buddy/archive/838148dd9b735651720094e59f7ce10c1fe7880f.zip diff --git a/tests/scancode/data/plugin_only_findings/errors/package.json b/tests/scancode/data/plugin_only_findings/errors/package.json new file mode 100644 index 00000000000..fa4a53965a6 --- /dev/null +++ b/tests/scancode/data/plugin_only_findings/errors/package.json @@ -0,0 +1,96 @@ +{ + "name": "async", + "description": "Higher-order functions and common patterns for asynchronous code", + "main": "lib/async.js", + "author" { + "name": "Caolan McMahon" + }, + "version": "1.2.1", + "keywords": [ + "async", + "callback", + "utility", + "module" + ], + "repository": { + "type": "git", + "url": "git+https://github.com/caolan/async.git" + }, + "bugs": { + "url": "https://github.com/caolan/async/issues" + }, + "license": "MIT", + "devDependencies": { + "benchmark": "github:bestiejs/benchmark.js", + "coveralls": "^2.11.2", + "jshint": "~2.7.0", + "lodash": ">=2.4.1", + "mkdirp": "~0.5.1", + "nodeunit": ">0.0.0", + "nyc": "^2.1.0", + "uglify-js": "1.2.x", + "yargs": "~3.9.1" + }, + "jam": { + "main": "lib/async.js", + "include": [ + "lib/async.js", + "README.md", + "LICENSE" + ], + "categories": [ + "Utilities" + ] + }, + "scripts": { + "test": "npm run-script lint && nodeunit test/test-async.js", + "lint": "jshint lib/*.js test/*.js perf/*.js", + "coverage": "nyc npm test && nyc report", + "coveralls": "nyc npm test && nyc report --reporter=text-lcov | coveralls" + }, + "spm": { + "main": "lib/async.js" + }, + "volo": { + "main": "lib/async.js", + "ignore": [ + "**/.*", + "node_modules", + "bower_components", + "test", + "tests" + ] + }, + "gitHead": "b66e85d1cca8c8056313253f22d18f571e7001d2", + "homepage": "https://github.com/caolan/async#readme", + "_id": "async@1.2.1", + "_shasum": "a4816a17cd5ff516dfa2c7698a453369b9790de0", + "_from": "async@*", + "_npmVersion": "2.9.0", + "_nodeVersion": "2.0.2", + "_npmUser": { + "name": "aearly", + "email": "alexander.early@gmail.com" + }, + "maintainers": [ + { + "name": "caolan", + "email": "caolan.mcmahon@gmail.com" + }, + { + "name": "beaugunderson", + "email": "beau@beaugunderson.com" + }, + { + "name": "aearly", + "email": "alexander.early@gmail.com" + } + ], + "dist": { + "shasum": "a4816a17cd5ff516dfa2c7698a453369b9790de0", + "tarball": "http://registry.npmjs.org/async/-/async-1.2.1.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/async/-/async-1.2.1.tgz", + "readme": "ERROR: No README data found!" +} diff --git a/tests/scancode/data/only_findings/expected.json b/tests/scancode/data/plugin_only_findings/expected.json similarity index 73% rename from tests/scancode/data/only_findings/expected.json rename to tests/scancode/data/plugin_only_findings/expected.json index 864a30631c0..3b9bbcdc4b9 100644 --- a/tests/scancode/data/only_findings/expected.json +++ b/tests/scancode/data/plugin_only_findings/expected.json @@ -1,19 +1,174 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", - "scancode_version": "2.0.1.post147.5eab12f53", "scancode_options": { + "input": "", "--copyright": true, + "--info": true, + "--json": "", "--license": true, - "--package": true, - "--license-score": 0, - "--format": "json", - "--only-findings": true + "--only-findings": true, + "--package": true }, "files_count": 3, "files": [ + { + "path": "basic.tgz/basic/dir/e.tar", + "type": "file", + "name": "e.tar", + "base_name": "e", + "extension": ".tar", + "size": 10240, + "sha1": "b655d4fc48e19910b9ab7a7102a8c051a39818f1", + "md5": "393e789f4e4b2be93a46d0619380b445", + "mime_type": "application/x-tar", + "file_type": "POSIX tar archive (GNU)", + "programming_language": null, + "is_binary": true, + "is_text": false, + "is_archive": true, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [ + { + "type": "plain tarball", + "name": null, + "version": null, + "primary_language": null, + "packaging": "archive", + "summary": null, + "description": null, + "payload_type": null, + "size": null, + "release_date": null, + "authors": [], + "maintainers": [], + "contributors": [], + "owners": [], + "packagers": [], + "distributors": [], + "vendors": [], + "keywords": [], + "keywords_doc_url": null, + "metafile_locations": [], + "metafile_urls": [], + "homepage_url": null, + "notes": null, + "download_urls": [], + "download_sha1": null, + "download_sha256": null, + "download_md5": null, + "bug_tracking_url": null, + "support_contacts": [], + "code_view_url": null, + "vcs_tool": null, + "vcs_repository": null, + "vcs_revision": null, + "copyright_top_level": null, + "copyrights": [], + "asserted_licenses": [], + "legal_file_locations": [], + "license_expression": null, + "license_texts": [], + "notice_texts": [], + "dependencies": {}, + "related_packages": [] + } + ], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "basic.tgz/basic/dir2/subdir/bcopy.s", + "type": "file", + "name": "bcopy.s", + "base_name": "bcopy", + "extension": ".s", + "size": 32452, + "sha1": "99f20eafc7b7e1c8e21bced55c8cdd05339bec77", + "md5": "e1c66adaf6b8aa90e348668ac4869a61", + "mime_type": "text/x-c", + "file_type": "C source, ASCII text, with CRLF line terminators", + "programming_language": "GAS", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "licenses": [ + { + "key": "bsd-original-uc", + "score": 100.0, + "short_name": "BSD-Original-UC", + "category": "Permissive", + "owner": "Regents of the University of California", + "homepage_url": "ftp://ftp.cs.berkeley.edu/pub/4bsd/README.Impt.License.Change", + "text_url": "http://www.xfree86.org/3.3.6/COPYRIGHT2.html", + "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:bsd-original-uc", + "spdx_license_key": "BSD-4-Clause-UC", + "spdx_url": "https://spdx.org/licenses/BSD-4-Clause-UC", + "start_line": 25, + "end_line": 51, + "matched_rule": { + "identifier": "bsd-original-uc_4.RULE", + "license_choice": false, + "licenses": [ + "bsd-original-uc" + ] + } + } + ], + "copyrights": [ + { + "statements": [ + "Copyright (c) 1993 The Regents of the University of California." + ], + "holders": [ + "The Regents of the University of California." + ], + "authors": [], + "start_line": 22, + "end_line": 23 + }, + { + "statements": [], + "holders": [], + "authors": [ + "the University of California, Berkeley and its contributors." + ], + "start_line": 34, + "end_line": 37 + } + ], + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, { "path": "basic.tgz/basic/main.c", - "scan_errors": [], + "type": "file", + "name": "main.c", + "base_name": "main", + "extension": ".c", + "size": 1940, + "sha1": "35017ed9762bdc9d16054a40ceefdda8b362083a", + "md5": "8d0a3b3fe1c96a49af2a66040193291b", + "mime_type": "text/x-c", + "file_type": "C source, ASCII text", + "programming_language": "C", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, "licenses": [ { "key": "gpl-2.0", @@ -73,109 +228,11 @@ "end_line": 3 } ], - "packages": [] - }, - { - "path": "basic.tgz/basic/dir2/subdir/bcopy.s", - "scan_errors": [], - "licenses": [ - { - "key": "bsd-original-uc", - "score": 100.0, - "short_name": "BSD-Original-UC", - "category": "Permissive", - "owner": "Regents of the University of California", - "homepage_url": "ftp://ftp.cs.berkeley.edu/pub/4bsd/README.Impt.License.Change", - "text_url": "http://www.xfree86.org/3.3.6/COPYRIGHT2.html", - "reference_url": "https://enterprise.dejacode.com/urn/urn:dje:license:bsd-original-uc", - "spdx_license_key": "BSD-4-Clause-UC", - "spdx_url": "https://spdx.org/licenses/BSD-4-Clause-UC", - "start_line": 25, - "end_line": 51, - "matched_rule": { - "identifier": "bsd-original-uc_4.RULE", - "license_choice": false, - "licenses": [ - "bsd-original-uc" - ] - } - } - ], - "copyrights": [ - { - "statements": [ - "Copyright (c) 1993 The Regents of the University of California." - ], - "holders": [ - "The Regents of the University of California." - ], - "authors": [], - "start_line": 22, - "end_line": 23 - }, - { - "statements": [], - "holders": [], - "authors": [ - "the University of California, Berkeley and its contributors." - ], - "start_line": 34, - "end_line": 37 - } - ], - "packages": [] - }, - { - "path": "basic.tgz/basic/dir/e.tar", - "scan_errors": [], - "licenses": [], - "copyrights": [], - "packages": [ - { - "type": "plain tarball", - "name": null, - "version": null, - "primary_language": null, - "packaging": "archive", - "summary": null, - "description": null, - "payload_type": null, - "size": null, - "release_date": null, - "authors": [], - "maintainers": [], - "contributors": [], - "owners": [], - "packagers": [], - "distributors": [], - "vendors": [], - "keywords": [], - "keywords_doc_url": null, - "metafile_locations": [], - "metafile_urls": [], - "homepage_url": null, - "notes": null, - "download_urls": [], - "download_sha1": null, - "download_sha256": null, - "download_md5": null, - "bug_tracking_url": null, - "support_contacts": [], - "code_view_url": null, - "vcs_tool": null, - "vcs_repository": null, - "vcs_revision": null, - "copyright_top_level": null, - "copyrights": [], - "asserted_licenses": [], - "legal_file_locations": [], - "license_expression": null, - "license_texts": [], - "notice_texts": [], - "dependencies": {}, - "related_packages": [] - } - ] + "packages": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] -} +} \ No newline at end of file diff --git a/tests/scancode/data/plugin_only_findings/info.expected.json b/tests/scancode/data/plugin_only_findings/info.expected.json new file mode 100644 index 00000000000..2072a2ada24 --- /dev/null +++ b/tests/scancode/data/plugin_only_findings/info.expected.json @@ -0,0 +1,11 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--info": true, + "--json": "", + "--only-findings": true + }, + "files_count": 0, + "files": [] +} \ No newline at end of file diff --git a/tests/scancode/data/cache/package/package.json b/tests/scancode/data/resource/cache/package/package.json similarity index 100% rename from tests/scancode/data/cache/package/package.json rename to tests/scancode/data/resource/cache/package/package.json diff --git a/tests/scancode/data/resource/cache2/abc b/tests/scancode/data/resource/cache2/abc new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/cache2/dir/that b/tests/scancode/data/resource/cache2/dir/that new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/cache2/dir/this b/tests/scancode/data/resource/cache2/dir/this new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/cache2/et131x.h b/tests/scancode/data/resource/cache2/et131x.h new file mode 100644 index 00000000000..4ffb839292b --- /dev/null +++ b/tests/scancode/data/resource/cache2/et131x.h @@ -0,0 +1,47 @@ +/* Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + diff --git a/tests/scancode/data/resource/cache2/other dir/file b/tests/scancode/data/resource/cache2/other dir/file new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/codebase/abc b/tests/scancode/data/resource/codebase/abc new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/codebase/dir/that b/tests/scancode/data/resource/codebase/dir/that new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/codebase/dir/this b/tests/scancode/data/resource/codebase/dir/this new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/codebase/et131x.h b/tests/scancode/data/resource/codebase/et131x.h new file mode 100644 index 00000000000..4ffb839292b --- /dev/null +++ b/tests/scancode/data/resource/codebase/et131x.h @@ -0,0 +1,47 @@ +/* Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + diff --git a/tests/scancode/data/resource/codebase/other dir/file b/tests/scancode/data/resource/codebase/other dir/file new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scancode/data/resource/samples/JGroups/EULA b/tests/scancode/data/resource/samples/JGroups/EULA new file mode 100644 index 00000000000..0dcb788ede5 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/EULA @@ -0,0 +1,109 @@ +// $Id: EULA,v 1.1 2006/11/02 08:04:26 belaban Exp $ + +LICENSE AGREEMENT +JBOSS(r) + +This License Agreement governs the use of the Software Packages and any updates to the Software +Packages, regardless of the delivery mechanism. Each Software Package is a collective work +under U.S. Copyright Law. Subject to the following terms, Red Hat, Inc. ("Red Hat") grants to +the user ("Client") a license to the applicable collective work(s) pursuant to the +GNU Lesser General Public License v. 2.1 except for the following Software Packages: +(a) JBoss Portal Forums and JBoss Transactions JTS, each of which is licensed pursuant to the +GNU General Public License v.2; + +(b) JBoss Rules, which is licensed pursuant to the Apache License v.2.0; + +(c) an optional download for JBoss Cache for the Berkeley DB for Java database, which is licensed under the +(open source) Sleepycat License (if Client does not wish to use the open source version of this database, +it may purchase a license from Sleepycat Software); + +and (d) the BPEL extension for JBoss jBPM, which is licensed under the Common Public License v.1, +and, pursuant to the OASIS BPEL4WS standard, requires parties wishing to redistribute to enter various +royalty-free patent licenses. + +Each of the foregoing licenses is available at http://www.opensource.org/licenses/index.php. + +1. The Software. "Software Packages" refer to the various software modules that are created and made available +for distribution by the JBoss.org open source community at http://www.jboss.org. Each of the Software Packages +may be comprised of hundreds of software components. The end user license agreement for each component is located in +the component's source code. With the exception of certain image files identified in Section 2 below, +the license terms for the components permit Client to copy, modify, and redistribute the component, +in both source code and binary code forms. This agreement does not limit Client's rights under, +or grant Client rights that supersede, the license terms of any particular component. + +2. Intellectual Property Rights. The Software Packages are owned by Red Hat and others and are protected under copyright +and other laws. Title to the Software Packages and any component, or to any copy, modification, or merged portion shall +remain with the aforementioned, subject to the applicable license. The "JBoss" trademark, "Red Hat" trademark, the +individual Software Package trademarks, and the "Shadowman" logo are registered trademarks of Red Hat and its affiliates +in the U.S. and other countries. This agreement permits Client to distribute unmodified copies of the Software Packages +using the Red Hat trademarks that Red Hat has inserted in the Software Packages on the condition that Client follows Red Hat's +trademark guidelines for those trademarks located at http://www.redhat.com/about/corporate/trademark/. Client must abide by +these trademark guidelines when distributing the Software Packages, regardless of whether the Software Packages have been modified. +If Client modifies the Software Packages, then Client must replace all Red Hat trademarks and logos identified at +http://www.jboss.com/company/logos, unless a separate agreement with Red Hat is executed or other permission granted. +Merely deleting the files containing the Red Hat trademarks may corrupt the Software Packages. + +3. Limited Warranty. Except as specifically stated in this Paragraph 3 or a license for a particular +component, to the maximum extent permitted under applicable law, the Software Packages and the +components are provided and licensed "as is" without warranty of any kind, expressed or implied, +including the implied warranties of merchantability, non-infringement or fitness for a particular purpose. +Red Hat warrants that the media on which Software Packages may be furnished will be free from defects in +materials and manufacture under normal use for a period of 30 days from the date of delivery to Client. +Red Hat does not warrant that the functions contained in the Software Packages will meet Client's requirements +or that the operation of the Software Packages will be entirely error free or appear precisely as described +in the accompanying documentation. This warranty extends only to the party that purchases the Services +pertaining to the Software Packages from Red Hat or a Red Hat authorized distributor. + +4. Limitation of Remedies and Liability. To the maximum extent permitted by applicable law, the remedies +described below are accepted by Client as its only remedies. Red Hat's entire liability, and Client's +exclusive remedies, shall be: If the Software media is defective, Client may return it within 30 days of +delivery along with a copy of Client's payment receipt and Red Hat, at its option, will replace it or +refund the money paid by Client for the Software. To the maximum extent permitted by applicable law, +Red Hat or any Red Hat authorized dealer will not be liable to Client for any incidental or consequential +damages, including lost profits or lost savings arising out of the use or inability to use the Software, +even if Red Hat or such dealer has been advised of the possibility of such damages. In no event shall +Red Hat's liability under this agreement exceed the amount that Client paid to Red Hat under this +Agreement during the twelve months preceding the action. + +5. Export Control. As required by U.S. law, Client represents and warrants that it: +(a) understands that the Software Packages are subject to export controls under the +U.S. Commerce Department's Export Administration Regulations ("EAR"); + +(b) is not located in a prohibited destination country under the EAR or U.S. sanctions regulations +(currently Cuba, Iran, Iraq, Libya, North Korea, Sudan and Syria); + +(c) will not export, re-export, or transfer the Software Packages to any prohibited destination, entity, +or individual without the necessary export license(s) or authorizations(s) from the U.S. Government; + +(d) will not use or transfer the Software Packages for use in any sensitive nuclear, chemical or +biological weapons, or missile technology end-uses unless authorized by the U.S. Government by +regulation or specific license; + +(e) understands and agrees that if it is in the United States and exports or transfers the Software +Packages to eligible end users, it will, as required by EAR Section 740.17(e), submit semi-annual +reports to the Commerce Department's Bureau of Industry & Security (BIS), which include the name and +address (including country) of each transferee; + +and (f) understands that countries other than the United States may restrict the import, use, or +export of encryption products and that it shall be solely responsible for compliance with any such +import, use, or export restrictions. + +6. Third Party Programs. Red Hat may distribute third party software programs with the Software Packages +that are not part of the Software Packages and which Client must install separately. These third party +programs are subject to their own license terms. The license terms either accompany the programs or +can be viewed at http://www.redhat.com/licenses/. If Client does not agree to abide by the applicable +license terms for such programs, then Client may not install them. If Client wishes to install the programs +on more than one system or transfer the programs to another party, then Client must contact the licensor +of the programs. + +7. General. If any provision of this agreement is held to be unenforceable, that shall not affect the +enforceability of the remaining provisions. This License Agreement shall be governed by the laws of the +State of North Carolina and of the United States, without regard to any conflict of laws provisions, +except that the United Nations Convention on the International Sale of Goods shall not apply. + +Copyright 2006 Red Hat, Inc. All rights reserved. +"JBoss" and the JBoss logo are registered trademarks of Red Hat, Inc. +All other trademarks are the property of their respective owners. + + Page 1 of 1 18 October 2006 + diff --git a/tests/scancode/data/resource/samples/JGroups/LICENSE b/tests/scancode/data/resource/samples/JGroups/LICENSE new file mode 100644 index 00000000000..b1e3f5a2638 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/LICENSE @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/tests/scancode/data/resource/samples/JGroups/licenses/apache-1.1.txt b/tests/scancode/data/resource/samples/JGroups/licenses/apache-1.1.txt new file mode 100644 index 00000000000..dae2270c2c0 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/licenses/apache-1.1.txt @@ -0,0 +1,58 @@ +/* ==================================================================== + * The Apache Software License, Version 1.1 + * + * Copyright (c) 2000 The Apache Software Foundation. All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The end-user documentation included with the redistribution, + * if any, must include the following acknowledgment: + * "This product includes software developed by the + * Apache Software Foundation (http://www.apache.org/)." + * Alternately, this acknowledgment may appear in the software itself, + * if and wherever such third-party acknowledgments normally appear. + * + * 4. The names "Apache" and "Apache Software Foundation" must + * not be used to endorse or promote products derived from this + * software without prior written permission. For written + * permission, please contact apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache", + * nor may "Apache" appear in their name, without prior written + * permission of the Apache Software Foundation. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + * Portions of this software are based upon public domain software + * originally written at the National Center for Supercomputing Applications, + * University of Illinois, Urbana-Champaign. + */ + diff --git a/tests/scancode/data/resource/samples/JGroups/licenses/apache-2.0.txt b/tests/scancode/data/resource/samples/JGroups/licenses/apache-2.0.txt new file mode 100644 index 00000000000..75b52484ea4 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/licenses/apache-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tests/scancode/data/resource/samples/JGroups/licenses/bouncycastle.txt b/tests/scancode/data/resource/samples/JGroups/licenses/bouncycastle.txt new file mode 100644 index 00000000000..3cf73c2f032 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/licenses/bouncycastle.txt @@ -0,0 +1,18 @@ +// $Id: bouncycastle.txt,v 1.1 2006/07/07 16:09:48 belaban Exp $ + +License + +Copyright (c) 2000 - 2006 The Legion Of The Bouncy Castle (http://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/tests/scancode/data/resource/samples/JGroups/licenses/cpl-1.0.txt b/tests/scancode/data/resource/samples/JGroups/licenses/cpl-1.0.txt new file mode 100644 index 00000000000..2243be15b29 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/licenses/cpl-1.0.txt @@ -0,0 +1,213 @@ +Common Public License Version 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC +LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM +CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial code and documentation + distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + + i) changes to the Program, and + + ii) additions to the Program; + + where such changes and/or additions to the Program originate from and are + distributed by that particular Contributor. A Contribution 'originates' from + a Contributor if it was added to the Program by such Contributor itself or + anyone acting on such Contributor's behalf. Contributions do not include + additions to the Program which: (i) are separate modules of software + distributed in conjunction with the Program under their own license + agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents " mean patent claims licensable by a Contributor which are +necessarily infringed by the use or sale of its Contribution alone or when +combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including +all Contributors. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free copyright license to + reproduce, prepare derivative works of, publicly display, publicly perform, + distribute and sublicense the Contribution of such Contributor, if any, and + such derivative works, in source code and object code form. + + b) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free patent license under + Licensed Patents to make, use, sell, offer to sell, import and otherwise + transfer the Contribution of such Contributor, if any, in source code and + object code form. This patent license shall apply to the combination of the + Contribution and the Program if, at the time the Contribution is added by + the Contributor, such addition of the Contribution causes such combination + to be covered by the Licensed Patents. The patent license shall not apply to + any other combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the licenses + to its Contributions set forth herein, no assurances are provided by any + Contributor that the Program does not infringe the patent or other + intellectual property rights of any other entity. Each Contributor disclaims + any liability to Recipient for claims brought by any other entity based on + infringement of intellectual property rights or otherwise. As a condition to + exercising the rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual property rights + needed, if any. For example, if a third party patent license is required to + allow Recipient to distribute the Program, it is Recipient's responsibility + to acquire that license before distributing the Program. + + d) Each Contributor represents that to its knowledge it has sufficient + copyright rights in its Contribution, if any, to grant the copyright license + set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its +own license agreement, provided that: + + a) it complies with the terms and conditions of this Agreement; and + + b) its license agreement: + + i) effectively disclaims on behalf of all Contributors all warranties and + conditions, express and implied, including warranties or conditions of title + and non-infringement, and implied warranties or conditions of merchantability + and fitness for a particular purpose; + + ii) effectively excludes on behalf of all Contributors all liability for + damages, including direct, indirect, special, incidental and consequential + damages, such as lost profits; + + iii) states that any provisions which differ from this Agreement are offered + by that Contributor alone and not by any other party; and + + iv) states that source code for the Program is available from such Contributor, + and informs licensees how to obtain it in a reasonable manner on or through + a medium customarily used for software exchange. + +When the Program is made available in source code form: + + a) it must be made available under this Agreement; and + + b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if +any, in a manner that reasonably allows subsequent Recipients to identify the +originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with +respect to end users, business partners and the like. While this license is +intended to facilitate the commercial use of the Program, the Contributor who +includes the Program in a commercial product offering should do so in a manner +which does not create potential liability for other Contributors. Therefore, if +a Contributor includes the Program in a commercial product offering, such +Contributor ("Commercial Contributor") hereby agrees to defend and indemnify +every other Contributor ("Indemnified Contributor") against any losses, damages +and costs (collectively "Losses") arising from claims, lawsuits and other legal +actions brought by a third party against the Indemnified Contributor to the +extent caused by the acts or omissions of such Commercial Contributor in +connection with its distribution of the Program in a commercial product offering. +The obligations in this section do not apply to any claims or Losses relating to +any actual or alleged intellectual property infringement. In order to qualify, +an Indemnified Contributor must: a) promptly notify the Commercial Contributor +n writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any related +settlement negotiations. The Indemnified Contributor may participate in any such +claim at its own expense. + +For example, a Contributor might include the Program in a commercial product +offering, Product X. That Contributor is then a Commercial Contributor. If that +Commercial Contributor then makes performance claims, or offers warranties +related to Product X, those performance claims and warranties are such Commercial +Contributor's responsibility alone. Under this section, the Commercial +Contributor would have to defend claims against the other Contributors related +to those performance claims and warranties, and if a court requires any other +Contributor to pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each +Recipient is solely responsible for determining the appropriateness of using +and distributing the Program and assumes all risks associated with its exercise +of rights under this Agreement, including but not limited to the risks and costs +of program errors, compliance with applicable laws, damage to or loss of data, +programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY +CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS +GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable +law, it shall not affect the validity or enforceability of the remainder of the +terms of this Agreement, and without further action by the parties hereto, such +provision shall be reformed to the minimum extent necessary to make such +provision valid and enforceable. + +If Recipient institutes patent litigation against a Contributor with respect to +a patent applicable to software (including a cross-claim or counterclaim in a +lawsuit), then any patent licenses granted by that Contributor to such Recipient +under this Agreement shall terminate as of the date such litigation is filed. +In addition, if Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the Program +itself (excluding combinations of the Program with other software or hardware) +infringes such Recipient's patent(s), then such Recipient's rights granted under +Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply +with any of the material terms or conditions of this Agreement and does not cure +such failure in a reasonable period of time after becoming aware of such +noncompliance. If all Recipient's rights under this Agreement terminate, Recipient +agrees to cease use and distribution of the Program as soon as reasonably +practicable. However, Recipient's obligations under this Agreement and any +licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in +order to avoid inconsistency the Agreement is copyrighted and may only be modified +in the following manner. The Agreement Steward reserves the right to publish new +versions (including revisions) of this Agreement from time to time. No one other +than the Agreement Steward has the right to modify this Agreement. IBM is the +initial Agreement Steward. IBM may assign the responsibility to serve as the +Agreement Steward to a suitable separate entity. Each new version of the Agreement +will be given a distinguishing version number. The Program (including Contributions) +may always be distributed subject to the version of the Agreement under which it +was received. In addition, after a new version of the Agreement is published, +Contributor may elect to distribute the Program (including its Contributions) +under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, +Recipient receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, estoppel or +otherwise. All rights in the Program not expressly granted under this Agreement +are reserved. + +This Agreement is governed by the laws of the State of New York and the +intellectual property laws of the United States of America. No party to this +Agreement will bring a legal action under this Agreement more than one year after +the cause of action arose. Each party waives its rights to a jury trial in any +resulting litigation. + diff --git a/tests/scancode/data/resource/samples/JGroups/licenses/lgpl.txt b/tests/scancode/data/resource/samples/JGroups/licenses/lgpl.txt new file mode 100644 index 00000000000..cbee875ba6d --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/licenses/lgpl.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/tests/scancode/data/resource/samples/JGroups/src/FixedMembershipToken.java b/tests/scancode/data/resource/samples/JGroups/src/FixedMembershipToken.java new file mode 100644 index 00000000000..46cf578d6de --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/FixedMembershipToken.java @@ -0,0 +1,150 @@ +/* + * JBoss, Home of Professional Open Source + * Copyright 2005, JBoss Inc., and individual contributors as indicated + * by the @authors tag. See the copyright.txt in the distribution for a + * full listing of individual contributors. + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This software is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this software; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA, or see the FSF site: http://www.fsf.org. + */ +package org.jgroups.auth; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.StringTokenizer; + +import org.jgroups.Event; +import org.jgroups.Message; +import org.jgroups.PhysicalAddress; +import org.jgroups.annotations.Property; +import org.jgroups.util.Util; + +/** + *

+ * The FixedMemberShipToken object predefines a list of IP addresses and ports that can join the + * group. + *

+ *

+ * Configuration parameters for this example are shown below: + *

+ *
    + *
  • fixed_members_value (required) = List of IP addresses & ports (optionally) - ports must be + * seperated by a '/' e.g. 127.0.0.1/1010*127.0.0.1/4567
  • + *
  • fixed_members_seperator (required) = The seperator used between IP addresses - e.g. *
  • + *
+ * + * @author Chris Mills (millsy@jboss.com) + */ +public class FixedMembershipToken extends AuthToken { + private List memberList = null; + private String token = "emptyToken"; + + @Property + private String fixed_members_seperator = ","; + private static final long serialVersionUID = 4717069536900221681L; + + public FixedMembershipToken() { + } + + public String getName() { + return "org.jgroups.auth.FixedMembershipToken"; + } + + @Property + public void setFixedMembersSeparator(String value) { + fixed_members_seperator = value; + } + + public boolean authenticate(AuthToken token, Message msg) { + if ((token != null) && (token instanceof FixedMembershipToken) && (this.memberList != null)) { + PhysicalAddress src = (PhysicalAddress) auth.down(new Event(Event.GET_PHYSICAL_ADDRESS, + msg.getSrc())); + if (src == null) { + if (log.isErrorEnabled()) + log.error("didn't find physical address for " + msg.getSrc()); + return false; + } + + String sourceAddressWithPort = src.toString(); + String sourceAddressWithoutPort = sourceAddressWithPort.substring(0, + sourceAddressWithPort.indexOf(":")); + + if (log.isDebugEnabled()) { + log.debug("AUTHToken received from " + sourceAddressWithPort); + } + + for (String member : memberList) { + if (hasPort(member)) { + if (member.equals(sourceAddressWithPort)) + return true; + } else { + if (member.equals(sourceAddressWithoutPort)) + return true; + } + } + return false; + } + + if (log.isWarnEnabled()) { + log.warn("Invalid AuthToken instance - wrong type or null"); + } + return false; + } + + private static boolean hasPort(String member) { + return member.contains(":"); + } + + @Property(name = "fixed_members_value") + public void setMemberList(String list) { + memberList = new ArrayList(); + StringTokenizer memberListTokenizer = new StringTokenizer(list, fixed_members_seperator); + while (memberListTokenizer.hasMoreTokens()) { + memberList.add(memberListTokenizer.nextToken().replace('/', ':')); + } + } + + /** + * Required to serialize the object to pass across the wire + * + * @param out + * @throws java.io.IOException + */ + public void writeTo(DataOutputStream out) throws IOException { + if (log.isDebugEnabled()) { + log.debug("SimpleToken writeTo()"); + } + Util.writeString(this.token, out); + } + + /** + * Required to deserialize the object when read in from the wire + * + * @param in + * @throws IOException + * @throws IllegalAccessException + * @throws InstantiationException + */ + public void readFrom(DataInputStream in) throws IOException, IllegalAccessException, + InstantiationException { + if (log.isDebugEnabled()) { + log.debug("SimpleToken readFrom()"); + } + this.token = Util.readString(in); + } +} diff --git a/tests/scancode/data/resource/samples/JGroups/src/GuardedBy.java b/tests/scancode/data/resource/samples/JGroups/src/GuardedBy.java new file mode 100644 index 00000000000..6d9a9ec4a3f --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/GuardedBy.java @@ -0,0 +1,23 @@ +package org.jgroups.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Adopted from Java Concurrency in Practice. This annotation defines the monitor that protects the variable + * annotated by @GuardedBy, e.g. @GuardedBy("lock") or @GuardedBy("this") + * @author Bela Ban + * @version $Id: GuardedBy.java,v 1.3 2007/02/27 14:49:40 belaban Exp $ + */ +@Target({ElementType.FIELD, ElementType.METHOD}) +@Retention(RetentionPolicy.SOURCE) +public @interface GuardedBy { + String value(); +} diff --git a/tests/scancode/data/resource/samples/JGroups/src/ImmutableReference.java b/tests/scancode/data/resource/samples/JGroups/src/ImmutableReference.java new file mode 100644 index 00000000000..50c720e0bf0 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/ImmutableReference.java @@ -0,0 +1,55 @@ +/* + * JBoss, Home of Professional Open Source. + * Copyright 2010, Red Hat, Inc. and individual contributors + * as indicated by the @author tags. See the copyright.txt file in the + * distribution for a full listing of individual contributors. + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This software is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this software; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA, or see the FSF site: http://www.fsf.org. + */ + +package org.jgroups.util; + +/** + * Simple class that holds an immutable reference to another object (or to + * null). + * + * @author Brian Stansberry + * + * @version $Id: ImmutableReference.java,v 1.1 2010/06/19 02:24:46 bstansberry Exp $ + */ +public class ImmutableReference { + + private final T referent; + + /** + * Create a new ImmutableReference. + * + * @param referent the object to refer to, or null + */ + public ImmutableReference(T referent) { + this.referent = referent; + } + + /** + * Gets the wrapped object, if there is one. + * + * @return the object passed to the constructor, or null if + * null was passed to the constructor + */ + public T get() { + return referent; + } +} diff --git a/tests/scancode/data/resource/samples/JGroups/src/RATE_LIMITER.java b/tests/scancode/data/resource/samples/JGroups/src/RATE_LIMITER.java new file mode 100644 index 00000000000..d0765aa5f29 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/RATE_LIMITER.java @@ -0,0 +1,120 @@ +package org.jgroups.protocols; + +import org.jgroups.Event; +import org.jgroups.Message; +import org.jgroups.annotations.*; +import org.jgroups.stack.Protocol; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +/** + * Protocol which sends at most max_bytes in time_period milliseconds. Can be used instead of a flow control protocol, + * e.g. FC or SFC (same position in the stack) + * @author Bela Ban + * @version $Id: RATE_LIMITER.java,v 1.3 2009/12/11 13:08:03 belaban Exp $ + */ +@Experimental @Unsupported +public class RATE_LIMITER extends Protocol { + + @Property(description="Max number of bytes to be sent in time_period ms. Blocks the sender if exceeded until a new " + + "time period has started") + protected long max_bytes=500000; + + @Property(description="Number of milliseconds during which max_bytes bytes can be sent") + protected long time_period=1000L; + + + /** Keeps track of the number of bytes sent in the current time period */ + @GuardedBy("lock") + @ManagedAttribute + protected long num_bytes_sent=0L; + + @GuardedBy("lock") + protected long end_of_current_period=0L; + + protected final Lock lock=new ReentrantLock(); + protected final Condition block=lock.newCondition(); + + @ManagedAttribute + protected int num_blockings=0; + + @ManagedAttribute + protected long total_block_time=0L; + + + + public Object down(Event evt) { + if(evt.getType() == Event.MSG) { + Message msg=(Message)evt.getArg(); + int len=msg.getLength(); + + lock.lock(); + try { + if(len > max_bytes) { + log.error("message length (" + len + " bytes) exceeded max_bytes (" + max_bytes + "); " + + "adjusting max_bytes to " + len); + max_bytes=len; + } + + while(true) { + boolean size_exceeded=num_bytes_sent + len >= max_bytes, + time_exceeded=System.currentTimeMillis() > end_of_current_period; + if(!size_exceeded && !time_exceeded) + break; + + if(time_exceeded) { + reset(); + } + else { // size exceeded + long block_time=end_of_current_period - System.currentTimeMillis(); + if(block_time > 0) { + try { + block.await(block_time, TimeUnit.MILLISECONDS); + num_blockings++; + total_block_time+=block_time; + } + catch(InterruptedException e) { + } + } + } + } + } + finally { + num_bytes_sent+=len; + lock.unlock(); + } + + return down_prot.down(evt); + } + + return down_prot.down(evt); + } + + + public void init() throws Exception { + super.init(); + if(time_period <= 0) + throw new IllegalArgumentException("time_period needs to be positive"); + } + + public void stop() { + super.stop(); + reset(); + } + + protected void reset() { + lock.lock(); + try { + // blocking=false; + num_bytes_sent=0L; + end_of_current_period=System.currentTimeMillis() + time_period; + block.signalAll(); + } + finally { + lock.unlock(); + } + } +} diff --git a/tests/scancode/data/resource/samples/JGroups/src/RouterStub.java b/tests/scancode/data/resource/samples/JGroups/src/RouterStub.java new file mode 100644 index 00000000000..1e0b9f9ef4c --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/RouterStub.java @@ -0,0 +1,295 @@ +package org.jgroups.stack; + +import org.jgroups.Address; +import org.jgroups.PhysicalAddress; +import org.jgroups.logging.Log; +import org.jgroups.logging.LogFactory; +import org.jgroups.protocols.PingData; +import org.jgroups.protocols.TUNNEL.StubReceiver; +import org.jgroups.util.Util; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.util.ArrayList; +import java.util.List; + +/** + * Client stub that talks to a remote GossipRouter + * @author Bela Ban + * @version $Id: RouterStub.java,v 1.62 2010/06/09 14:22:00 belaban Exp $ + */ +public class RouterStub { + + public static enum ConnectionStatus {INITIAL, CONNECTION_BROKEN, CONNECTION_ESTABLISHED, CONNECTED,DISCONNECTED}; + + private final String router_host; // name of the router host + + private final int router_port; // port on which router listens on + + private Socket sock=null; // socket connecting to the router + + private DataOutputStream output=null; + + private DataInputStream input=null; + + private volatile ConnectionStatus connectionState=ConnectionStatus.INITIAL; + + private static final Log log=LogFactory.getLog(RouterStub.class); + + private final ConnectionListener conn_listener; + + private final InetAddress bind_addr; + + private int sock_conn_timeout=3000; // max number of ms to wait for socket establishment to + // GossipRouter + + private int sock_read_timeout=3000; // max number of ms to wait for socket reads (0 means block + // forever, or until the sock is closed) + + private boolean tcp_nodelay=true; + + private StubReceiver receiver; + + public interface ConnectionListener { + void connectionStatusChange(RouterStub stub, ConnectionStatus state); + } + + /** + * Creates a stub for a remote Router object. + * @param routerHost The name of the router's host + * @param routerPort The router's port + * @throws SocketException + */ + public RouterStub(String routerHost, int routerPort, InetAddress bindAddress, ConnectionListener l) { + router_host=routerHost != null? routerHost : "localhost"; + router_port=routerPort; + bind_addr=bindAddress; + conn_listener=l; + } + + public synchronized void setReceiver(StubReceiver receiver) { + this.receiver = receiver; + } + + public synchronized StubReceiver getReceiver() { + return receiver; + } + + public boolean isTcpNoDelay() { + return tcp_nodelay; + } + + public void setTcpNoDelay(boolean tcp_nodelay) { + this.tcp_nodelay=tcp_nodelay; + } + + public synchronized void interrupt() { + if(receiver != null) { + Thread thread = receiver.getThread(); + if(thread != null) + thread.interrupt(); + } + } + + public synchronized void join(long wait) throws InterruptedException { + if(receiver != null) { + Thread thread = receiver.getThread(); + if(thread != null) + thread.join(wait); + } + } + + + public int getSocketConnectionTimeout() { + return sock_conn_timeout; + } + + public void setSocketConnectionTimeout(int sock_conn_timeout) { + this.sock_conn_timeout=sock_conn_timeout; + } + + public int getSocketReadTimeout() { + return sock_read_timeout; + } + + public void setSocketReadTimeout(int sock_read_timeout) { + this.sock_read_timeout=sock_read_timeout; + } + + public boolean isConnected() { + return !(connectionState == ConnectionStatus.CONNECTION_BROKEN || connectionState == ConnectionStatus.INITIAL); + } + + public ConnectionStatus getConnectionStatus() { + return connectionState; + } + + + /** + * Register this process with the router under group. + * @param group The name of the group under which to register + */ + public synchronized void connect(String group, Address addr, String logical_name, List phys_addrs) throws Exception { + doConnect(); + GossipData request=new GossipData(GossipRouter.CONNECT, group, addr, logical_name, phys_addrs); + request.writeTo(output); + output.flush(); + byte result = input.readByte(); + if(result == GossipRouter.CONNECT_OK) { + connectionStateChanged(ConnectionStatus.CONNECTED); + } else { + connectionStateChanged(ConnectionStatus.DISCONNECTED); + throw new Exception("Connect failed received from GR " + getGossipRouterAddress()); + } + } + + public synchronized void doConnect() throws Exception { + if(!isConnected()) { + try { + sock=new Socket(); + sock.bind(new InetSocketAddress(bind_addr, 0)); + sock.setSoTimeout(sock_read_timeout); + sock.setSoLinger(true, 2); + sock.setTcpNoDelay(tcp_nodelay); + sock.setKeepAlive(true); + Util.connect(sock, new InetSocketAddress(router_host, router_port), sock_conn_timeout); + output=new DataOutputStream(sock.getOutputStream()); + input=new DataInputStream(sock.getInputStream()); + connectionStateChanged(ConnectionStatus.CONNECTION_ESTABLISHED); + } + catch(Exception e) { + Util.close(sock); + Util.close(input); + Util.close(output); + connectionStateChanged(ConnectionStatus.CONNECTION_BROKEN); + throw new Exception("Could not connect to " + getGossipRouterAddress() , e); + } + } + } + + /** + * Checks whether the connection is open + * @return + */ + public synchronized void checkConnection() { + GossipData request=new GossipData(GossipRouter.PING); + try { + request.writeTo(output); + output.flush(); + } + catch(IOException e) { + connectionStateChanged(ConnectionStatus.CONNECTION_BROKEN); + } + } + + + public synchronized void disconnect(String group, Address addr) { + try { + GossipData request=new GossipData(GossipRouter.DISCONNECT, group, addr); + request.writeTo(output); + output.flush(); + } + catch(Exception e) { + } finally { + connectionStateChanged(ConnectionStatus.DISCONNECTED); + } + } + + public synchronized void destroy() { + try { + GossipData request = new GossipData(GossipRouter.CLOSE); + request.writeTo(output); + output.flush(); + } catch (Exception e) { + } finally { + Util.close(output); + Util.close(input); + Util.close(sock); + } + } + + + /* + * Used only in testing, never access socket directly + * + */ + public Socket getSocket() { + return sock; + } + + + public synchronized List getMembers(final String group) throws Exception { + List retval=new ArrayList(); + try { + + if(!isConnected() || input == null) throw new Exception ("not connected"); + // we might get a spurious SUSPECT message from the router, just ignore it + if(input.available() > 0) // fixes https://jira.jboss.org/jira/browse/JGRP-1151 + input.skipBytes(input.available()); + + GossipData request=new GossipData(GossipRouter.GOSSIP_GET, group, null); + request.writeTo(output); + output.flush(); + + short num_rsps=input.readShort(); + for(int i=0; i < num_rsps; i++) { + PingData rsp=new PingData(); + rsp.readFrom(input); + retval.add(rsp); + } + } + catch(Exception e) { + connectionStateChanged(ConnectionStatus.CONNECTION_BROKEN); + throw new Exception("Connection to " + getGossipRouterAddress() + " broken. Could not send GOSSIP_GET request", e); + } + return retval; + } + + public InetSocketAddress getGossipRouterAddress() { + return new InetSocketAddress(router_host, router_port); + } + + public String toString() { + return "RouterStub[localsocket=" + ((sock != null) ? sock.getLocalSocketAddress().toString() + : "null")+ ",router_host=" + router_host + "::" + router_port + + ",connected=" + isConnected() + "]"; + } + + public void sendToAllMembers(String group, byte[] data, int offset, int length) throws Exception { + sendToMember(group, null, data, offset, length); // null destination represents mcast + } + + public synchronized void sendToMember(String group, Address dest, byte[] data, int offset, int length) throws Exception { + try { + GossipData request = new GossipData(GossipRouter.MESSAGE, group, dest, data, offset, length); + request.writeTo(output); + output.flush(); + } catch (Exception e) { + connectionStateChanged(ConnectionStatus.CONNECTION_BROKEN); + throw new Exception("Connection to " + getGossipRouterAddress() + + " broken. Could not send message to " + dest, e); + } + } + + public DataInputStream getInputStream() { + return input; + } + + private void connectionStateChanged(ConnectionStatus newState) { + boolean notify=connectionState != newState; + connectionState=newState; + if(notify && conn_listener != null) { + try { + conn_listener.connectionStatusChange(this, newState); + } + catch(Throwable t) { + log.error("failed notifying ConnectionListener " + conn_listener, t); + } + } + } +} diff --git a/tests/scancode/data/resource/samples/JGroups/src/RouterStubManager.java b/tests/scancode/data/resource/samples/JGroups/src/RouterStubManager.java new file mode 100644 index 00000000000..47153252434 --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/RouterStubManager.java @@ -0,0 +1,213 @@ +/* + * JBoss, Home of Professional Open Source. + * Copyright 2009, Red Hat Middleware LLC, and individual contributors + * as indicated by the @author tags. See the copyright.txt file in the + * distribution for a full listing of individual contributors. + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This software is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this software; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA, or see the FSF site: http://www.fsf.org. + */ +package org.jgroups.stack; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.jgroups.Address; +import org.jgroups.Event; +import org.jgroups.PhysicalAddress; +import org.jgroups.annotations.GuardedBy; +import org.jgroups.logging.Log; +import org.jgroups.logging.LogFactory; +import org.jgroups.util.TimeScheduler; + +public class RouterStubManager implements RouterStub.ConnectionListener { + + @GuardedBy("reconnectorLock") + private final Map> futures = new HashMap>(); + private final Lock reconnectorLock = new ReentrantLock(); + private final List stubs; + + private final Protocol owner; + private final TimeScheduler timer; + private final String channelName; + private final Address logicalAddress; + private final long interval; + + protected final Log log; + + public RouterStubManager(Protocol owner, String channelName, Address logicalAddress, long interval) { + this.owner = owner; + this.stubs = new CopyOnWriteArrayList(); + this.log = LogFactory.getLog(owner.getClass()); + this.timer = owner.getTransport().getTimer(); + this.channelName = channelName; + this.logicalAddress = logicalAddress; + this.interval = interval; + } + + private RouterStubManager(Protocol p) { + this(p,null,null,0L); + } + + public List getStubs(){ + return stubs; + } + + public RouterStub createAndRegisterStub(String routerHost, int routerPort, InetAddress bindAddress) { + RouterStub s = new RouterStub(routerHost,routerPort,bindAddress,this); + unregisterAndDestroyStub(s.getGossipRouterAddress()); + stubs.add(s); + return s; + } + + public void registerStub(RouterStub s) { + unregisterAndDestroyStub(s.getGossipRouterAddress()); + stubs.add(s); + } + + public boolean unregisterStub(final RouterStub s) { + return stubs.remove(s); + } + + public RouterStub unregisterStub(final InetSocketAddress address) { + if(address == null) + throw new IllegalArgumentException("Cannot remove null address"); + for (RouterStub s : stubs) { + if (s.getGossipRouterAddress().equals(address)) { + stubs.remove(address); + return s; + } + } + return null; + } + + public boolean unregisterAndDestroyStub(final InetSocketAddress address) { + RouterStub unregisteredStub = unregisterStub(address); + if(unregisteredStub !=null) { + unregisteredStub.destroy(); + return true; + } + return false; + } + + public void disconnectStubs() { + for (RouterStub stub : stubs) { + try { + stub.disconnect(channelName, logicalAddress); + } catch (Exception e) { + } + } + } + + public void destroyStubs() { + for (RouterStub s : stubs) { + stopReconnecting(s); + s.destroy(); + } + stubs.clear(); + } + + public void startReconnecting(final RouterStub stub) { + reconnectorLock.lock(); + try { + InetSocketAddress routerAddress = stub.getGossipRouterAddress(); + Future f = futures.get(routerAddress); + if (f != null) { + f.cancel(true); + futures.remove(routerAddress); + } + + final Runnable reconnector = new Runnable() { + public void run() { + try { + if (log.isTraceEnabled()) log.trace("Reconnecting " + stub); + String logical_name = org.jgroups.util.UUID.get(logicalAddress); + PhysicalAddress physical_addr = (PhysicalAddress) owner.down(new Event( + Event.GET_PHYSICAL_ADDRESS, logicalAddress)); + List physical_addrs = Arrays.asList(physical_addr); + stub.connect(channelName, logicalAddress, logical_name, physical_addrs); + if (log.isTraceEnabled()) log.trace("Reconnected " + stub); + } catch (Throwable ex) { + if (log.isWarnEnabled()) + log.warn("failed reconnecting stub to GR at "+ stub.getGossipRouterAddress() + ": " + ex); + } + } + }; + f = timer.scheduleWithFixedDelay(reconnector, 0, interval, TimeUnit.MILLISECONDS); + futures.put(stub.getGossipRouterAddress(), f); + } finally { + reconnectorLock.unlock(); + } + } + + public void stopReconnecting(final RouterStub stub) { + reconnectorLock.lock(); + try { + InetSocketAddress routerAddress = stub.getGossipRouterAddress(); + Future f = futures.get(stub.getGossipRouterAddress()); + if (f != null) { + f.cancel(true); + futures.remove(routerAddress); + } + + final Runnable pinger = new Runnable() { + public void run() { + try { + if(log.isTraceEnabled()) log.trace("Pinging " + stub); + stub.checkConnection(); + if(log.isTraceEnabled()) log.trace("Pinged " + stub); + } catch (Throwable ex) { + if (log.isWarnEnabled()) + log.warn("failed pinging stub, GR at " + stub.getGossipRouterAddress()+ ": " + ex); + } + } + }; + f = timer.scheduleWithFixedDelay(pinger, 0, interval, TimeUnit.MILLISECONDS); + futures.put(stub.getGossipRouterAddress(), f); + } finally { + reconnectorLock.unlock(); + } + } + + + public void connectionStatusChange(RouterStub stub, RouterStub.ConnectionStatus newState) { + if (newState == RouterStub.ConnectionStatus.CONNECTION_BROKEN) { + stub.interrupt(); + stub.destroy(); + startReconnecting(stub); + } else if (newState == RouterStub.ConnectionStatus.CONNECTED) { + stopReconnecting(stub); + } else if (newState == RouterStub.ConnectionStatus.DISCONNECTED) { + // wait for disconnect ack; + try { + stub.join(interval); + } catch (InterruptedException e) { + } + } + } + + public static RouterStubManager emptyGossipClientStubManager(Protocol p) { + return new RouterStubManager(p); + } +} diff --git a/tests/scancode/data/resource/samples/JGroups/src/S3_PING.java b/tests/scancode/data/resource/samples/JGroups/src/S3_PING.java new file mode 100644 index 00000000000..2f93ec6cc9c --- /dev/null +++ b/tests/scancode/data/resource/samples/JGroups/src/S3_PING.java @@ -0,0 +1,3025 @@ +package org.jgroups.protocols; + +import org.jgroups.Address; +import org.jgroups.annotations.Experimental; +import org.jgroups.annotations.Property; +import org.jgroups.annotations.Unsupported; +import org.jgroups.util.Util; +import org.xml.sax.Attributes; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; +import org.xml.sax.helpers.DefaultHandler; +import org.xml.sax.helpers.XMLReaderFactory; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLEncoder; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.*; + +import static java.lang.String.valueOf; + + +/** + * Discovery protocol using Amazon's S3 storage. The S3 access code reuses the example shipped by Amazon. + * This protocol is unsupported and experimental ! + * @author Bela Ban + * @version $Id: S3_PING.java,v 1.11 2010/06/18 04:39:08 belaban Exp $ + */ +@Experimental +public class S3_PING extends FILE_PING { + + @Property(description="The access key to AWS (S3)") + protected String access_key=null; + + @Property(description="The secret access key to AWS (S3)") + protected String secret_access_key=null; + + @Property(description="When non-null, we set location to prefix-UUID") + protected String prefix=null; + + protected AWSAuthConnection conn=null; + + + + public void init() throws Exception { + super.init(); + if(access_key == null || secret_access_key == null) + throw new IllegalArgumentException("access_key and secret_access_key must be non-null"); + + conn=new AWSAuthConnection(access_key, secret_access_key); + + if(prefix != null && prefix.length() > 0) { + ListAllMyBucketsResponse bucket_list=conn.listAllMyBuckets(null); + List buckets=bucket_list.entries; + if(buckets != null) { + boolean found=false; + for(Object tmp: buckets) { + if(tmp instanceof Bucket) { + Bucket bucket=(Bucket)tmp; + if(bucket.name.startsWith(prefix)) { + location=bucket.name; + found=true; + } + } + } + if(!found) { + location=prefix + "-" + java.util.UUID.randomUUID().toString(); + } + } + } + + + if(!conn.checkBucketExists(location)) { + conn.createBucket(location, AWSAuthConnection.LOCATION_DEFAULT, null).connection.getResponseMessage(); + } + + Runtime.getRuntime().addShutdownHook(new Thread() { + public void run() { + remove(group_addr, local_addr); + } + }); + } + + protected void createRootDir() { + ; // do *not* create root file system (don't remove !) + } + + protected List readAll(String clustername) { + if(clustername == null) + return null; + + List retval=new ArrayList(); + try { + ListBucketResponse rsp=conn.listBucket(location, clustername, null, null, null); + if(rsp.entries != null) { + for(Iterator it=rsp.entries.iterator(); it.hasNext();) { + ListEntry key=it.next(); + GetResponse val=conn.get(location, key.key, null); + if(val.object != null) { + byte[] buf=val.object.data; + if(buf != null) { + try { + PingData data=(PingData)Util.objectFromByteBuffer(buf); + retval.add(data); + } + catch(Exception e) { + log.error("failed marshalling buffer to address", e); + } + } + } + } + } + + return retval; + } + catch(IOException ex) { + log.error("failed reading addresses", ex); + return retval; + } + } + + + protected void writeToFile(PingData data, String clustername) { + if(clustername == null || data == null) + return; + String filename=local_addr instanceof org.jgroups.util.UUID? ((org.jgroups.util.UUID)local_addr).toStringLong() : local_addr.toString(); + String key=clustername + "/" + filename; + try { + Map headers=new TreeMap(); + headers.put("Content-Type", Arrays.asList("text/plain")); + byte[] buf=Util.objectToByteBuffer(data); + S3Object val=new S3Object(buf, null); + conn.put(location, key, val, headers).connection.getResponseMessage(); + } + catch(Exception e) { + log.error("failed marshalling " + data + " to buffer", e); + } + } + + + protected void remove(String clustername, Address addr) { + if(clustername == null || addr == null) + return; + String filename=addr instanceof org.jgroups.util.UUID? ((org.jgroups.util.UUID)addr).toStringLong() : addr.toString(); + String key=clustername + "/" + filename; + try { + Map headers=new TreeMap(); + headers.put("Content-Type", Arrays.asList("text/plain")); + conn.delete(location, key, headers).connection.getResponseMessage(); + if(log.isTraceEnabled()) + log.trace("removing " + location + "/" + key); + } + catch(Exception e) { + log.error("failure removing data", e); + } + } + + + + + + + + /** + * The following classes have been copied from Amazon's sample code + */ + static class AWSAuthConnection { + public static final String LOCATION_DEFAULT=null; + public static final String LOCATION_EU="EU"; + + private String awsAccessKeyId; + private String awsSecretAccessKey; + private boolean isSecure; + private String server; + private int port; + private CallingFormat callingFormat; + + public AWSAuthConnection(String awsAccessKeyId, String awsSecretAccessKey) { + this(awsAccessKeyId, awsSecretAccessKey, true); + } + + public AWSAuthConnection(String awsAccessKeyId, String awsSecretAccessKey, boolean isSecure) { + this(awsAccessKeyId, awsSecretAccessKey, isSecure, Utils.DEFAULT_HOST); + } + + public AWSAuthConnection(String awsAccessKeyId, String awsSecretAccessKey, boolean isSecure, + String server) { + this(awsAccessKeyId, awsSecretAccessKey, isSecure, server, + isSecure? Utils.SECURE_PORT : Utils.INSECURE_PORT); + } + + public AWSAuthConnection(String awsAccessKeyId, String awsSecretAccessKey, boolean isSecure, + String server, int port) { + this(awsAccessKeyId, awsSecretAccessKey, isSecure, server, port, CallingFormat.getSubdomainCallingFormat()); + + } + + public AWSAuthConnection(String awsAccessKeyId, String awsSecretAccessKey, boolean isSecure, + String server, CallingFormat format) { + this(awsAccessKeyId, awsSecretAccessKey, isSecure, server, + isSecure? Utils.SECURE_PORT : Utils.INSECURE_PORT, + format); + } + + /** + * Create a new interface to interact with S3 with the given credential and connection + * parameters + * @param awsAccessKeyId Your user key into AWS + * @param awsSecretAccessKey The secret string used to generate signatures for authentication. + * @param isSecure use SSL encryption + * @param server Which host to connect to. Usually, this will be s3.amazonaws.com + * @param port Which port to use. + * @param format Type of request Regular/Vanity or Pure Vanity domain + */ + public AWSAuthConnection(String awsAccessKeyId, String awsSecretAccessKey, boolean isSecure, + String server, int port, CallingFormat format) { + this.awsAccessKeyId=awsAccessKeyId; + this.awsSecretAccessKey=awsSecretAccessKey; + this.isSecure=isSecure; + this.server=server; + this.port=port; + this.callingFormat=format; + } + + /** + * Creates a new bucket. + * @param bucket The name of the bucket to create. + * @param headers A Map of String to List of Strings representing the http headers to pass (can be null). + */ + public Response createBucket(String bucket, Map headers) throws IOException { + return createBucket(bucket, null, headers); + } + + /** + * Creates a new bucket. + * @param bucket The name of the bucket to create. + * @param location Desired location ("EU") (or null for default). + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + * @throws IllegalArgumentException on invalid location + */ + public Response createBucket(String bucket, String location, Map headers) throws IOException { + String body; + if(location == null) { + body=null; + } + else if(LOCATION_EU.equals(location)) { + if(!callingFormat.supportsLocatedBuckets()) + throw new IllegalArgumentException("Creating location-constrained bucket with unsupported calling-format"); + body="" + location + ""; + } + else + throw new IllegalArgumentException("Invalid Location: " + location); + + // validate bucket name + if(!Utils.validateBucketName(bucket, callingFormat)) + throw new IllegalArgumentException("Invalid Bucket Name: " + bucket); + + HttpURLConnection request=makeRequest("PUT", bucket, "", null, headers); + if(body != null) { + request.setDoOutput(true); + request.getOutputStream().write(body.getBytes("UTF-8")); + } + return new Response(request); + } + + /** + * Check if the specified bucket exists (via a HEAD request) + * @param bucket The name of the bucket to check + * @return true if HEAD access returned success + */ + public boolean checkBucketExists(String bucket) throws IOException { + HttpURLConnection response=makeRequest("HEAD", bucket, "", null, null); + int httpCode=response.getResponseCode(); + + if(httpCode >= 200 && httpCode < 300) + return true; + if(httpCode == HttpURLConnection.HTTP_NOT_FOUND) // bucket doesn't exist + return false; + throw new IOException("bucket '" + bucket + "' could not be accessed (rsp=" + + httpCode + " (" + response.getResponseMessage() + "). Maybe the bucket is owned by somebody else or " + + "the authentication failed"); + + } + + /** + * Lists the contents of a bucket. + * @param bucket The name of the bucket to create. + * @param prefix All returned keys will start with this string (can be null). + * @param marker All returned keys will be lexographically greater than + * this string (can be null). + * @param maxKeys The maximum number of keys to return (can be null). + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public ListBucketResponse listBucket(String bucket, String prefix, String marker, + Integer maxKeys, Map headers) throws IOException { + return listBucket(bucket, prefix, marker, maxKeys, null, headers); + } + + /** + * Lists the contents of a bucket. + * @param bucket The name of the bucket to list. + * @param prefix All returned keys will start with this string (can be null). + * @param marker All returned keys will be lexographically greater than + * this string (can be null). + * @param maxKeys The maximum number of keys to return (can be null). + * @param delimiter Keys that contain a string between the prefix and the first + * occurrence of the delimiter will be rolled up into a single element. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public ListBucketResponse listBucket(String bucket, String prefix, String marker, + Integer maxKeys, String delimiter, Map headers) throws IOException { + + Map pathArgs=Utils.paramsForListOptions(prefix, marker, maxKeys, delimiter); + return new ListBucketResponse(makeRequest("GET", bucket, "", pathArgs, headers)); + } + + /** + * Deletes a bucket. + * @param bucket The name of the bucket to delete. + * @param headers A Map of String to List of Strings representing the http headers to pass (can be null). + */ + public Response deleteBucket(String bucket, Map headers) throws IOException { + return new Response(makeRequest("DELETE", bucket, "", null, headers)); + } + + /** + * Writes an object to S3. + * @param bucket The name of the bucket to which the object will be added. + * @param key The name of the key to use. + * @param object An S3Object containing the data to write. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public Response put(String bucket, String key, S3Object object, Map headers) throws IOException { + HttpURLConnection request= + makeRequest("PUT", bucket, Utils.urlencode(key), null, headers, object); + + request.setDoOutput(true); + request.getOutputStream().write(object.data == null? new byte[]{} : object.data); + + return new Response(request); + } + + /** + * Creates a copy of an existing S3 Object. In this signature, we will copy the + * existing metadata. The default access control policy is private; if you want + * to override it, please use x-amz-acl in the headers. + * @param sourceBucket The name of the bucket where the source object lives. + * @param sourceKey The name of the key to copy. + * @param destinationBucket The name of the bucket to which the object will be added. + * @param destinationKey The name of the key to use. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). You may wish to set the x-amz-acl header appropriately. + */ + public Response copy(String sourceBucket, String sourceKey, String destinationBucket, String destinationKey, Map headers) + throws IOException { + S3Object object=new S3Object(new byte[]{}, new HashMap()); + headers=headers == null? new HashMap() : new HashMap(headers); + headers.put("x-amz-copy-source", Arrays.asList(sourceBucket + "/" + sourceKey)); + headers.put("x-amz-metadata-directive", Arrays.asList("COPY")); + return verifyCopy(put(destinationBucket, destinationKey, object, headers)); + } + + /** + * Creates a copy of an existing S3 Object. In this signature, we will replace the + * existing metadata. The default access control policy is private; if you want + * to override it, please use x-amz-acl in the headers. + * @param sourceBucket The name of the bucket where the source object lives. + * @param sourceKey The name of the key to copy. + * @param destinationBucket The name of the bucket to which the object will be added. + * @param destinationKey The name of the key to use. + * @param metadata A Map of String to List of Strings representing the S3 metadata + * for the new object. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). You may wish to set the x-amz-acl header appropriately. + */ + public Response copy(String sourceBucket, String sourceKey, String destinationBucket, String destinationKey, Map metadata, Map headers) + throws IOException { + S3Object object=new S3Object(new byte[]{}, metadata); + headers=headers == null? new HashMap() : new HashMap(headers); + headers.put("x-amz-copy-source", Arrays.asList(sourceBucket + "/" + sourceKey)); + headers.put("x-amz-metadata-directive", Arrays.asList("REPLACE")); + return verifyCopy(put(destinationBucket, destinationKey, object, headers)); + } + + /** + * Copy sometimes returns a successful response and starts to send whitespace + * characters to us. This method processes those whitespace characters and + * will throw an exception if the response is either unknown or an error. + * @param response Response object from the PUT request. + * @return The response with the input stream drained. + * @throws IOException If anything goes wrong. + */ + private static Response verifyCopy(Response response) throws IOException { + if(response.connection.getResponseCode() < 400) { + byte[] body=GetResponse.slurpInputStream(response.connection.getInputStream()); + String message=new String(body); + if(message.contains("")) { + // It worked! + } + else { + throw new IOException("Unexpected response: " + message); + } + } + return response; + } + + /** + * Reads an object from S3. + * @param bucket The name of the bucket where the object lives. + * @param key The name of the key to use. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public GetResponse get(String bucket, String key, Map headers) throws IOException { + return new GetResponse(makeRequest("GET", bucket, Utils.urlencode(key), null, headers)); + } + + /** + * Deletes an object from S3. + * @param bucket The name of the bucket where the object lives. + * @param key The name of the key to use. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public Response delete(String bucket, String key, Map headers) throws IOException { + return new Response(makeRequest("DELETE", bucket, Utils.urlencode(key), null, headers)); + } + + /** + * Get the requestPayment xml document for a given bucket + * @param bucket The name of the bucket + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public GetResponse getBucketRequestPayment(String bucket, Map headers) throws IOException { + Map pathArgs=new HashMap(); + pathArgs.put("requestPayment", null); + return new GetResponse(makeRequest("GET", bucket, "", pathArgs, headers)); + } + + /** + * Write a new requestPayment xml document for a given bucket + * @param bucket The name of the bucket + * @param requestPaymentXMLDoc + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public Response putBucketRequestPayment(String bucket, String requestPaymentXMLDoc, Map headers) + throws IOException { + Map pathArgs=new HashMap(); + pathArgs.put("requestPayment", null); + S3Object object=new S3Object(requestPaymentXMLDoc.getBytes(), null); + HttpURLConnection request=makeRequest("PUT", bucket, "", pathArgs, headers, object); + + request.setDoOutput(true); + request.getOutputStream().write(object.data == null? new byte[]{} : object.data); + + return new Response(request); + } + + /** + * Get the logging xml document for a given bucket + * @param bucket The name of the bucket + * @param headers A Map of String to List of Strings representing the http headers to pass (can be null). + */ + public GetResponse getBucketLogging(String bucket, Map headers) throws IOException { + Map pathArgs=new HashMap(); + pathArgs.put("logging", null); + return new GetResponse(makeRequest("GET", bucket, "", pathArgs, headers)); + } + + /** + * Write a new logging xml document for a given bucket + * @param loggingXMLDoc The xml representation of the logging configuration as a String + * @param bucket The name of the bucket + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public Response putBucketLogging(String bucket, String loggingXMLDoc, Map headers) throws IOException { + Map pathArgs=new HashMap(); + pathArgs.put("logging", null); + S3Object object=new S3Object(loggingXMLDoc.getBytes(), null); + HttpURLConnection request=makeRequest("PUT", bucket, "", pathArgs, headers, object); + + request.setDoOutput(true); + request.getOutputStream().write(object.data == null? new byte[]{} : object.data); + + return new Response(request); + } + + /** + * Get the ACL for a given bucket + * @param bucket The name of the bucket where the object lives. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public GetResponse getBucketACL(String bucket, Map headers) throws IOException { + return getACL(bucket, "", headers); + } + + /** + * Get the ACL for a given object (or bucket, if key is null). + * @param bucket The name of the bucket where the object lives. + * @param key The name of the key to use. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public GetResponse getACL(String bucket, String key, Map headers) throws IOException { + if(key == null) key=""; + + Map pathArgs=new HashMap(); + pathArgs.put("acl", null); + + return new GetResponse( + makeRequest("GET", bucket, Utils.urlencode(key), pathArgs, headers) + ); + } + + /** + * Write a new ACL for a given bucket + * @param aclXMLDoc The xml representation of the ACL as a String + * @param bucket The name of the bucket where the object lives. + * @param headers A Map of String to List of Strings representing the http headers to pass (can be null). + */ + public Response putBucketACL(String bucket, String aclXMLDoc, Map headers) throws IOException { + return putACL(bucket, "", aclXMLDoc, headers); + } + + /** + * Write a new ACL for a given object + * @param aclXMLDoc The xml representation of the ACL as a String + * @param bucket The name of the bucket where the object lives. + * @param key The name of the key to use. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public Response putACL(String bucket, String key, String aclXMLDoc, Map headers) + throws IOException { + S3Object object=new S3Object(aclXMLDoc.getBytes(), null); + + Map pathArgs=new HashMap(); + pathArgs.put("acl", null); + + HttpURLConnection request= + makeRequest("PUT", bucket, Utils.urlencode(key), pathArgs, headers, object); + + request.setDoOutput(true); + request.getOutputStream().write(object.data == null? new byte[]{} : object.data); + + return new Response(request); + } + + public LocationResponse getBucketLocation(String bucket) + throws IOException { + Map pathArgs=new HashMap(); + pathArgs.put("location", null); + return new LocationResponse(makeRequest("GET", bucket, "", pathArgs, null)); + } + + + /** + * List all the buckets created by this account. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + public ListAllMyBucketsResponse listAllMyBuckets(Map headers) + throws IOException { + return new ListAllMyBucketsResponse(makeRequest("GET", "", "", null, headers)); + } + + + /** + * Make a new HttpURLConnection without passing an S3Object parameter. + * Use this method for key operations that do require arguments + * @param method The method to invoke + * @param bucketName the bucket this request is for + * @param key the key this request is for + * @param pathArgs the + * @param headers + * @return + * @throws MalformedURLException + * @throws IOException + */ + private HttpURLConnection makeRequest(String method, String bucketName, String key, Map pathArgs, Map headers) + throws IOException { + return makeRequest(method, bucketName, key, pathArgs, headers, null); + } + + + /** + * Make a new HttpURLConnection. + * @param method The HTTP method to use (GET, PUT, DELETE) + * @param bucket The bucket name this request affects + * @param key The key this request is for + * @param pathArgs parameters if any to be sent along this request + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + * @param object The S3Object that is to be written (can be null). + */ + private HttpURLConnection makeRequest(String method, String bucket, String key, Map pathArgs, Map headers, + S3Object object) + throws IOException { + CallingFormat format=Utils.getCallingFormatForBucket(this.callingFormat, bucket); + if(isSecure && format != CallingFormat.getPathCallingFormat() && bucket.contains(".")) { + System.err.println("You are making an SSL connection, however, the bucket contains periods and the wildcard certificate will not match by default. Please consider using HTTP."); + } + + // build the domain based on the calling format + URL url=format.getURL(isSecure, server, this.port, bucket, key, pathArgs); + + HttpURLConnection connection=(HttpURLConnection)url.openConnection(); + connection.setRequestMethod(method); + + // subdomain-style urls may encounter http redirects. + // Ensure that redirects are supported. + if(!connection.getInstanceFollowRedirects() + && format.supportsLocatedBuckets()) + throw new RuntimeException("HTTP redirect support required."); + + addHeaders(connection, headers); + if(object != null) addMetadataHeaders(connection, object.metadata); + addAuthHeader(connection, method, bucket, key, pathArgs); + + return connection; + } + + /** + * Add the given headers to the HttpURLConnection. + * @param connection The HttpURLConnection to which the headers will be added. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + */ + private static void addHeaders(HttpURLConnection connection, Map headers) { + addHeaders(connection, headers, ""); + } + + /** + * Add the given metadata fields to the HttpURLConnection. + * @param connection The HttpURLConnection to which the headers will be added. + * @param metadata A Map of String to List of Strings representing the s3 + * metadata for this resource. + */ + private static void addMetadataHeaders(HttpURLConnection connection, Map metadata) { + addHeaders(connection, metadata, Utils.METADATA_PREFIX); + } + + /** + * Add the given headers to the HttpURLConnection with a prefix before the keys. + * @param connection The HttpURLConnection to which the headers will be added. + * @param headers A Map of String to List of Strings representing the http + * headers to pass (can be null). + * @param prefix The string to prepend to each key before adding it to the connection. + */ + private static void addHeaders(HttpURLConnection connection, Map headers, String prefix) { + if(headers != null) { + for(Iterator i=headers.keySet().iterator(); i.hasNext();) { + String key=(String)i.next(); + for(Iterator j=((List)headers.get(key)).iterator(); j.hasNext();) { + String value=(String)j.next(); + connection.addRequestProperty(prefix + key, value); + } + } + } + } + + /** + * Add the appropriate Authorization header to the HttpURLConnection. + * @param connection The HttpURLConnection to which the header will be added. + * @param method The HTTP method to use (GET, PUT, DELETE) + * @param bucket the bucket name this request is for + * @param key the key this request is for + * @param pathArgs path arguments which are part of this request + */ + private void addAuthHeader(HttpURLConnection connection, String method, String bucket, String key, Map pathArgs) { + if(connection.getRequestProperty("Date") == null) { + connection.setRequestProperty("Date", httpDate()); + } + if(connection.getRequestProperty("Content-Type") == null) { + connection.setRequestProperty("Content-Type", ""); + } + + String canonicalString= + Utils.makeCanonicalString(method, bucket, key, pathArgs, connection.getRequestProperties()); + String encodedCanonical=Utils.encode(this.awsSecretAccessKey, canonicalString, false); + connection.setRequestProperty("Authorization", + "AWS " + this.awsAccessKeyId + ":" + encodedCanonical); + } + + + /** + * Generate an rfc822 date for use in the Date HTTP header. + */ + public static String httpDate() { + final String DateFormat="EEE, dd MMM yyyy HH:mm:ss "; + SimpleDateFormat format=new SimpleDateFormat(DateFormat, Locale.US); + format.setTimeZone(TimeZone.getTimeZone("GMT")); + return format.format(new Date()) + "GMT"; + } + } + + static class ListEntry { + /** + * The name of the object + */ + public String key; + + /** + * The date at which the object was last modified. + */ + public Date lastModified; + + /** + * The object's ETag, which can be used for conditional GETs. + */ + public String eTag; + + /** + * The size of the object in bytes. + */ + public long size; + + /** + * The object's storage class + */ + public String storageClass; + + /** + * The object's owner + */ + public Owner owner; + + public String toString() { + return key; + } + } + + static class Owner { + public String id; + public String displayName; + } + + + static class Response { + public HttpURLConnection connection; + + public Response(HttpURLConnection connection) throws IOException { + this.connection=connection; + } + } + + + static class GetResponse extends Response { + public S3Object object; + + /** + * Pulls a representation of an S3Object out of the HttpURLConnection response. + */ + public GetResponse(HttpURLConnection connection) throws IOException { + super(connection); + if(connection.getResponseCode() < 400) { + Map metadata=extractMetadata(connection); + byte[] body=slurpInputStream(connection.getInputStream()); + this.object=new S3Object(body, metadata); + } + } + + /** + * Examines the response's header fields and returns a Map from String to List of Strings + * representing the object's metadata. + */ + private static Map extractMetadata(HttpURLConnection connection) { + TreeMap metadata=new TreeMap(); + Map headers=connection.getHeaderFields(); + for(Iterator i=headers.keySet().iterator(); i.hasNext();) { + String key=(String)i.next(); + if(key == null) continue; + if(key.startsWith(Utils.METADATA_PREFIX)) { + metadata.put(key.substring(Utils.METADATA_PREFIX.length()), headers.get(key)); + } + } + + return metadata; + } + + /** + * Read the input stream and dump it all into a big byte array + */ + static byte[] slurpInputStream(InputStream stream) throws IOException { + final int chunkSize=2048; + byte[] buf=new byte[chunkSize]; + ByteArrayOutputStream byteStream=new ByteArrayOutputStream(chunkSize); + int count; + + while((count=stream.read(buf)) != -1) byteStream.write(buf, 0, count); + + return byteStream.toByteArray(); + } + } + + static class LocationResponse extends Response { + String location; + + /** + * Parse the response to a ?location query. + */ + public LocationResponse(HttpURLConnection connection) throws IOException { + super(connection); + if(connection.getResponseCode() < 400) { + try { + XMLReader xr=Utils.createXMLReader(); + ; + LocationResponseHandler handler=new LocationResponseHandler(); + xr.setContentHandler(handler); + xr.setErrorHandler(handler); + + xr.parse(new InputSource(connection.getInputStream())); + this.location=handler.loc; + } + catch(SAXException e) { + throw new RuntimeException("Unexpected error parsing ListAllMyBuckets xml", e); + } + } + else { + this.location=""; + } + } + + /** + * Report the location-constraint for a bucket. + * A value of null indicates an error; + * the empty string indicates no constraint; + * and any other value is an actual location constraint value. + */ + public String getLocation() { + return location; + } + + /** + * Helper class to parse LocationConstraint response XML + */ + static class LocationResponseHandler extends DefaultHandler { + String loc=null; + private StringBuffer currText=null; + + public void startDocument() { + } + + public void startElement(String uri, String name, String qName, Attributes attrs) { + if(name.equals("LocationConstraint")) { + this.currText=new StringBuffer(); + } + } + + public void endElement(String uri, String name, String qName) { + if(name.equals("LocationConstraint")) { + loc=this.currText.toString(); + this.currText=null; + } + } + + public void characters(char ch[], int start, int length) { + if(currText != null) + this.currText.append(ch, start, length); + } + } + } + + + static class Bucket { + /** + * The name of the bucket. + */ + public String name; + + /** + * The bucket's creation date. + */ + public Date creationDate; + + public Bucket() { + this.name=null; + this.creationDate=null; + } + + public Bucket(String name, Date creationDate) { + this.name=name; + this.creationDate=creationDate; + } + + public String toString() { + return this.name; + } + } + + static class ListBucketResponse extends Response { + + /** + * The name of the bucket being listed. Null if request fails. + */ + public String name=null; + + /** + * The prefix echoed back from the request. Null if request fails. + */ + public String prefix=null; + + /** + * The marker echoed back from the request. Null if request fails. + */ + public String marker=null; + + /** + * The delimiter echoed back from the request. Null if not specified in + * the request, or if it fails. + */ + public String delimiter=null; + + /** + * The maxKeys echoed back from the request if specified. 0 if request fails. + */ + public int maxKeys=0; + + /** + * Indicates if there are more results to the list. True if the current + * list results have been truncated. false if request fails. + */ + public boolean isTruncated=false; + + /** + * Indicates what to use as a marker for subsequent list requests in the event + * that the results are truncated. Present only when a delimiter is specified. + * Null if request fails. + */ + public String nextMarker=null; + + /** + * A List of ListEntry objects representing the objects in the given bucket. + * Null if the request fails. + */ + public List entries=null; + + /** + * A List of CommonPrefixEntry objects representing the common prefixes of the + * keys that matched up to the delimiter. Null if the request fails. + */ + public List commonPrefixEntries=null; + + public ListBucketResponse(HttpURLConnection connection) throws IOException { + super(connection); + if(connection.getResponseCode() < 400) { + try { + XMLReader xr=Utils.createXMLReader(); + ListBucketHandler handler=new ListBucketHandler(); + xr.setContentHandler(handler); + xr.setErrorHandler(handler); + + xr.parse(new InputSource(connection.getInputStream())); + + this.name=handler.getName(); + this.prefix=handler.getPrefix(); + this.marker=handler.getMarker(); + this.delimiter=handler.getDelimiter(); + this.maxKeys=handler.getMaxKeys(); + this.isTruncated=handler.getIsTruncated(); + this.nextMarker=handler.getNextMarker(); + this.entries=handler.getKeyEntries(); + this.commonPrefixEntries=handler.getCommonPrefixEntries(); + + } + catch(SAXException e) { + throw new RuntimeException("Unexpected error parsing ListBucket xml", e); + } + } + } + + static class ListBucketHandler extends DefaultHandler { + + private String name=null; + private String prefix=null; + private String marker=null; + private String delimiter=null; + private int maxKeys=0; + private boolean isTruncated=false; + private String nextMarker=null; + private boolean isEchoedPrefix=false; + private List keyEntries=null; + private ListEntry keyEntry=null; + private List commonPrefixEntries=null; + private CommonPrefixEntry commonPrefixEntry=null; + private StringBuffer currText=null; + private SimpleDateFormat iso8601Parser=null; + + public ListBucketHandler() { + super(); + keyEntries=new ArrayList(); + commonPrefixEntries=new ArrayList(); + this.iso8601Parser=new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); + this.iso8601Parser.setTimeZone(new SimpleTimeZone(0, "GMT")); + this.currText=new StringBuffer(); + } + + public void startDocument() { + this.isEchoedPrefix=true; + } + + public void endDocument() { + // ignore + } + + public void startElement(String uri, String name, String qName, Attributes attrs) { + if(name.equals("Contents")) { + this.keyEntry=new ListEntry(); + } + else if(name.equals("Owner")) { + this.keyEntry.owner=new Owner(); + } + else if(name.equals("CommonPrefixes")) { + this.commonPrefixEntry=new CommonPrefixEntry(); + } + } + + public void endElement(String uri, String name, String qName) { + if(name.equals("Name")) { + this.name=this.currText.toString(); + } + // this prefix is the one we echo back from the request + else if(name.equals("Prefix") && this.isEchoedPrefix) { + this.prefix=this.currText.toString(); + this.isEchoedPrefix=false; + } + else if(name.equals("Marker")) { + this.marker=this.currText.toString(); + } + else if(name.equals("MaxKeys")) { + this.maxKeys=Integer.parseInt(this.currText.toString()); + } + else if(name.equals("Delimiter")) { + this.delimiter=this.currText.toString(); + } + else if(name.equals("IsTruncated")) { + this.isTruncated=Boolean.valueOf(this.currText.toString()); + } + else if(name.equals("NextMarker")) { + this.nextMarker=this.currText.toString(); + } + else if(name.equals("Contents")) { + this.keyEntries.add(this.keyEntry); + } + else if(name.equals("Key")) { + this.keyEntry.key=this.currText.toString(); + } + else if(name.equals("LastModified")) { + try { + this.keyEntry.lastModified=this.iso8601Parser.parse(this.currText.toString()); + } + catch(ParseException e) { + throw new RuntimeException("Unexpected date format in list bucket output", e); + } + } + else if(name.equals("ETag")) { + this.keyEntry.eTag=this.currText.toString(); + } + else if(name.equals("Size")) { + this.keyEntry.size=Long.parseLong(this.currText.toString()); + } + else if(name.equals("StorageClass")) { + this.keyEntry.storageClass=this.currText.toString(); + } + else if(name.equals("ID")) { + this.keyEntry.owner.id=this.currText.toString(); + } + else if(name.equals("DisplayName")) { + this.keyEntry.owner.displayName=this.currText.toString(); + } + else if(name.equals("CommonPrefixes")) { + this.commonPrefixEntries.add(this.commonPrefixEntry); + } + // this is the common prefix for keys that match up to the delimiter + else if(name.equals("Prefix")) { + this.commonPrefixEntry.prefix=this.currText.toString(); + } + if(this.currText.length() != 0) + this.currText=new StringBuffer(); + } + + public void characters(char ch[], int start, int length) { + this.currText.append(ch, start, length); + } + + public String getName() { + return this.name; + } + + public String getPrefix() { + return this.prefix; + } + + public String getMarker() { + return this.marker; + } + + public String getDelimiter() { + return this.delimiter; + } + + public int getMaxKeys() { + return this.maxKeys; + } + + public boolean getIsTruncated() { + return this.isTruncated; + } + + public String getNextMarker() { + return this.nextMarker; + } + + public List getKeyEntries() { + return this.keyEntries; + } + + public List getCommonPrefixEntries() { + return this.commonPrefixEntries; + } + } + } + + + static class CommonPrefixEntry { + /** + * The prefix common to the delimited keys it represents + */ + public String prefix; + } + + + static class ListAllMyBucketsResponse extends Response { + /** + * A list of Bucket objects, one for each of this account's buckets. Will be null if + * the request fails. + */ + public List entries; + + public ListAllMyBucketsResponse(HttpURLConnection connection) throws IOException { + super(connection); + if(connection.getResponseCode() < 400) { + try { + XMLReader xr=Utils.createXMLReader(); + ; + ListAllMyBucketsHandler handler=new ListAllMyBucketsHandler(); + xr.setContentHandler(handler); + xr.setErrorHandler(handler); + + xr.parse(new InputSource(connection.getInputStream())); + this.entries=handler.getEntries(); + } + catch(SAXException e) { + throw new RuntimeException("Unexpected error parsing ListAllMyBuckets xml", e); + } + } + } + + static class ListAllMyBucketsHandler extends DefaultHandler { + + private List entries=null; + private Bucket currBucket=null; + private StringBuffer currText=null; + private SimpleDateFormat iso8601Parser=null; + + public ListAllMyBucketsHandler() { + super(); + entries=new ArrayList(); + this.iso8601Parser=new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); + this.iso8601Parser.setTimeZone(new SimpleTimeZone(0, "GMT")); + this.currText=new StringBuffer(); + } + + public void startDocument() { + // ignore + } + + public void endDocument() { + // ignore + } + + public void startElement(String uri, String name, String qName, Attributes attrs) { + if(name.equals("Bucket")) { + this.currBucket=new Bucket(); + } + } + + public void endElement(String uri, String name, String qName) { + if(name.equals("Bucket")) { + this.entries.add(this.currBucket); + } + else if(name.equals("Name")) { + this.currBucket.name=this.currText.toString(); + } + else if(name.equals("CreationDate")) { + try { + this.currBucket.creationDate=this.iso8601Parser.parse(this.currText.toString()); + } + catch(ParseException e) { + throw new RuntimeException("Unexpected date format in list bucket output", e); + } + } + this.currText=new StringBuffer(); + } + + public void characters(char ch[], int start, int length) { + this.currText.append(ch, start, length); + } + + public List getEntries() { + return this.entries; + } + } + } + + + static class S3Object { + + public byte[] data; + + /** + * A Map from String to List of Strings representing the object's metadata + */ + public Map metadata; + + public S3Object(byte[] data, Map metadata) { + this.data=data; + this.metadata=metadata; + } + } + + + abstract static class CallingFormat { + + protected static CallingFormat pathCallingFormat=new PathCallingFormat(); + protected static CallingFormat subdomainCallingFormat=new SubdomainCallingFormat(); + protected static CallingFormat vanityCallingFormat=new VanityCallingFormat(); + + public abstract boolean supportsLocatedBuckets(); + + public abstract String getEndpoint(String server, int port, String bucket); + + public abstract String getPathBase(String bucket, String key); + + public abstract URL getURL(boolean isSecure, String server, int port, String bucket, String key, Map pathArgs) + throws MalformedURLException; + + public static CallingFormat getPathCallingFormat() { + return pathCallingFormat; + } + + public static CallingFormat getSubdomainCallingFormat() { + return subdomainCallingFormat; + } + + public static CallingFormat getVanityCallingFormat() { + return vanityCallingFormat; + } + + private static class PathCallingFormat extends CallingFormat { + public boolean supportsLocatedBuckets() { + return false; + } + + public String getPathBase(String bucket, String key) { + return isBucketSpecified(bucket)? "/" + bucket + "/" + key : "/"; + } + + public String getEndpoint(String server, int port, String bucket) { + return server + ":" + port; + } + + public URL getURL(boolean isSecure, String server, int port, String bucket, String key, Map pathArgs) + throws MalformedURLException { + String pathBase=isBucketSpecified(bucket)? "/" + bucket + "/" + key : "/"; + String pathArguments=Utils.convertPathArgsHashToString(pathArgs); + return new URL(isSecure? "https" : "http", server, port, pathBase + pathArguments); + } + + private static boolean isBucketSpecified(String bucket) { + return bucket != null && bucket.length() != 0; + } + } + + private static class SubdomainCallingFormat extends CallingFormat { + public boolean supportsLocatedBuckets() { + return true; + } + + public String getServer(String server, String bucket) { + return bucket + "." + server; + } + + public String getEndpoint(String server, int port, String bucket) { + return getServer(server, bucket) + ":" + port; + } + + public String getPathBase(String bucket, String key) { + return "/" + key; + } + + public URL getURL(boolean isSecure, String server, int port, String bucket, String key, Map pathArgs) + throws MalformedURLException { + if(bucket == null || bucket.length() == 0) { + //The bucket is null, this is listAllBuckets request + String pathArguments=Utils.convertPathArgsHashToString(pathArgs); + return new URL(isSecure? "https" : "http", server, port, "/" + pathArguments); + } + else { + String serverToUse=getServer(server, bucket); + String pathBase=getPathBase(bucket, key); + String pathArguments=Utils.convertPathArgsHashToString(pathArgs); + return new URL(isSecure? "https" : "http", serverToUse, port, pathBase + pathArguments); + } + } + } + + private static class VanityCallingFormat extends SubdomainCallingFormat { + public String getServer(String server, String bucket) { + return bucket; + } + } + } + + static class Utils { + static final String METADATA_PREFIX="x-amz-meta-"; + static final String AMAZON_HEADER_PREFIX="x-amz-"; + static final String ALTERNATIVE_DATE_HEADER="x-amz-date"; + public static final String DEFAULT_HOST="s3.amazonaws.com"; + + public static final int SECURE_PORT=443; + public static final int INSECURE_PORT=80; + + + /** + * HMAC/SHA1 Algorithm per RFC 2104. + */ + private static final String HMAC_SHA1_ALGORITHM="HmacSHA1"; + + static String makeCanonicalString(String method, String bucket, String key, Map pathArgs, Map headers) { + return makeCanonicalString(method, bucket, key, pathArgs, headers, null); + } + + /** + * Calculate the canonical string. When expires is non-null, it will be + * used instead of the Date header. + */ + static String makeCanonicalString(String method, String bucketName, String key, Map pathArgs, + Map headers, String expires) { + StringBuilder buf=new StringBuilder(); + buf.append(method + "\n"); + + // Add all interesting headers to a list, then sort them. "Interesting" + // is defined as Content-MD5, Content-Type, Date, and x-amz- + SortedMap interestingHeaders=new TreeMap(); + if(headers != null) { + for(Iterator i=headers.keySet().iterator(); i.hasNext();) { + String hashKey=(String)i.next(); + if(hashKey == null) continue; + String lk=hashKey.toLowerCase(); + + // Ignore any headers that are not particularly interesting. + if(lk.equals("content-type") || lk.equals("content-md5") || lk.equals("date") || + lk.startsWith(AMAZON_HEADER_PREFIX)) { + List s=(List)headers.get(hashKey); + interestingHeaders.put(lk, concatenateList(s)); + } + } + } + + if(interestingHeaders.containsKey(ALTERNATIVE_DATE_HEADER)) { + interestingHeaders.put("date", ""); + } + + // if the expires is non-null, use that for the date field. this + // trumps the x-amz-date behavior. + if(expires != null) { + interestingHeaders.put("date", expires); + } + + // these headers require that we still put a new line in after them, + // even if they don't exist. + if(!interestingHeaders.containsKey("content-type")) { + interestingHeaders.put("content-type", ""); + } + if(!interestingHeaders.containsKey("content-md5")) { + interestingHeaders.put("content-md5", ""); + } + + // Finally, add all the interesting headers (i.e.: all that startwith x-amz- ;-)) + for(Iterator i=interestingHeaders.keySet().iterator(); i.hasNext();) { + String headerKey=(String)i.next(); + if(headerKey.startsWith(AMAZON_HEADER_PREFIX)) { + buf.append(headerKey).append(':').append(interestingHeaders.get(headerKey)); + } + else { + buf.append(interestingHeaders.get(headerKey)); + } + buf.append("\n"); + } + + // build the path using the bucket and key + if(bucketName != null && bucketName.length() != 0) { + buf.append("/" + bucketName); + } + + // append the key (it might be an empty string) + // append a slash regardless + buf.append("/"); + if(key != null) { + buf.append(key); + } + + // if there is an acl, logging or torrent parameter + // add them to the string + if(pathArgs != null) { + if(pathArgs.containsKey("acl")) { + buf.append("?acl"); + } + else if(pathArgs.containsKey("torrent")) { + buf.append("?torrent"); + } + else if(pathArgs.containsKey("logging")) { + buf.append("?logging"); + } + else if(pathArgs.containsKey("location")) { + buf.append("?location"); + } + } + + return buf.toString(); + + } + + /** + * Calculate the HMAC/SHA1 on a string. + * @return Signature + * @throws java.security.NoSuchAlgorithmException + * If the algorithm does not exist. Unlikely + * @throws java.security.InvalidKeyException + * If the key is invalid. + */ + static String encode(String awsSecretAccessKey, String canonicalString, + boolean urlencode) { + // The following HMAC/SHA1 code for the signature is taken from the + // AWS Platform's implementation of RFC2104 (amazon.webservices.common.Signature) + // + // Acquire an HMAC/SHA1 from the raw key bytes. + SecretKeySpec signingKey= + new SecretKeySpec(awsSecretAccessKey.getBytes(), HMAC_SHA1_ALGORITHM); + + // Acquire the MAC instance and initialize with the signing key. + Mac mac=null; + try { + mac=Mac.getInstance(HMAC_SHA1_ALGORITHM); + } + catch(NoSuchAlgorithmException e) { + // should not happen + throw new RuntimeException("Could not find sha1 algorithm", e); + } + try { + mac.init(signingKey); + } + catch(InvalidKeyException e) { + // also should not happen + throw new RuntimeException("Could not initialize the MAC algorithm", e); + } + + // Compute the HMAC on the digest, and set it. + String b64=Base64.encodeBytes(mac.doFinal(canonicalString.getBytes())); + + if(urlencode) { + return urlencode(b64); + } + else { + return b64; + } + } + + static Map paramsForListOptions(String prefix, String marker, Integer maxKeys) { + return paramsForListOptions(prefix, marker, maxKeys, null); + } + + static Map paramsForListOptions(String prefix, String marker, Integer maxKeys, String delimiter) { + + Map argParams=new HashMap(); + // these three params must be url encoded + if(prefix != null) + argParams.put("prefix", urlencode(prefix)); + if(marker != null) + argParams.put("marker", urlencode(marker)); + if(delimiter != null) + argParams.put("delimiter", urlencode(delimiter)); + + if(maxKeys != null) + argParams.put("max-keys", Integer.toString(maxKeys.intValue())); + + return argParams; + + } + + /** + * Converts the Path Arguments from a map to String which can be used in url construction + * @param pathArgs a map of arguments + * @return a string representation of pathArgs + */ + public static String convertPathArgsHashToString(Map pathArgs) { + StringBuilder pathArgsString=new StringBuilder(); + String argumentValue; + boolean firstRun=true; + if(pathArgs != null) { + for(Iterator argumentIterator=pathArgs.keySet().iterator(); argumentIterator.hasNext();) { + String argument=(String)argumentIterator.next(); + if(firstRun) { + firstRun=false; + pathArgsString.append("?"); + } + else { + pathArgsString.append("&"); + } + + argumentValue=(String)pathArgs.get(argument); + pathArgsString.append(argument); + if(argumentValue != null) { + pathArgsString.append("="); + pathArgsString.append(argumentValue); + } + } + } + + return pathArgsString.toString(); + } + + + static String urlencode(String unencoded) { + try { + return URLEncoder.encode(unencoded, "UTF-8"); + } + catch(UnsupportedEncodingException e) { + // should never happen + throw new RuntimeException("Could not url encode to UTF-8", e); + } + } + + static XMLReader createXMLReader() { + try { + return XMLReaderFactory.createXMLReader(); + } + catch(SAXException e) { + // oops, lets try doing this (needed in 1.4) + System.setProperty("org.xml.sax.driver", "org.apache.crimson.parser.XMLReaderImpl"); + } + try { + // try once more + return XMLReaderFactory.createXMLReader(); + } + catch(SAXException e) { + throw new RuntimeException("Couldn't initialize a sax driver for the XMLReader"); + } + } + + /** + * Concatenates a bunch of header values, seperating them with a comma. + * @param values List of header values. + * @return String of all headers, with commas. + */ + private static String concatenateList(List values) { + StringBuilder buf=new StringBuilder(); + for(int i=0, size=values.size(); i < size; ++i) { + buf.append(((String)values.get(i)).replaceAll("\n", "").trim()); + if(i != (size - 1)) { + buf.append(","); + } + } + return buf.toString(); + } + + /** + * Validate bucket-name + */ + static boolean validateBucketName(String bucketName, CallingFormat callingFormat) { + if(callingFormat == CallingFormat.getPathCallingFormat()) { + final int MIN_BUCKET_LENGTH=3; + final int MAX_BUCKET_LENGTH=255; + final String BUCKET_NAME_REGEX="^[0-9A-Za-z\\.\\-_]*$"; + + return null != bucketName && + bucketName.length() >= MIN_BUCKET_LENGTH && + bucketName.length() <= MAX_BUCKET_LENGTH && + bucketName.matches(BUCKET_NAME_REGEX); + } + else { + return isValidSubdomainBucketName(bucketName); + } + } + + static boolean isValidSubdomainBucketName(String bucketName) { + final int MIN_BUCKET_LENGTH=3; + final int MAX_BUCKET_LENGTH=63; + // don't allow names that look like 127.0.0.1 + final String IPv4_REGEX="^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$"; + // dns sub-name restrictions + final String BUCKET_NAME_REGEX="^[a-z0-9]([a-z0-9\\-\\_]*[a-z0-9])?(\\.[a-z0-9]([a-z0-9\\-\\_]*[a-z0-9])?)*$"; + + // If there wasn't a location-constraint, then the current actual + // restriction is just that no 'part' of the name (i.e. sequence + // of characters between any 2 '.'s has to be 63) but the recommendation + // is to keep the entire bucket name under 63. + return null != bucketName && + bucketName.length() >= MIN_BUCKET_LENGTH && + bucketName.length() <= MAX_BUCKET_LENGTH && + !bucketName.matches(IPv4_REGEX) && + bucketName.matches(BUCKET_NAME_REGEX); + } + + static CallingFormat getCallingFormatForBucket(CallingFormat desiredFormat, String bucketName) { + CallingFormat callingFormat=desiredFormat; + if(callingFormat == CallingFormat.getSubdomainCallingFormat() && !Utils.isValidSubdomainBucketName(bucketName)) { + callingFormat=CallingFormat.getPathCallingFormat(); + } + return callingFormat; + } + } + + +// +// NOTE: The following source code is the iHarder.net public domain +// Base64 library and is provided here as a convenience. For updates, +// problems, questions, etc. regarding this code, please visit: +// http://iharder.sourceforge.net/current/java/base64/ +// + + + /** + * Encodes and decodes to and from Base64 notation. + *

+ *

+ * Change Log: + *

+ *
    + *
  • v2.1 - Cleaned up javadoc comments and unused variables and methods. Added + * some convenience methods for reading and writing to and from files.
  • + *
  • v2.0.2 - Now specifies UTF-8 encoding in places where the code fails on systems + * with other encodings (like EBCDIC).
  • + *
  • v2.0.1 - Fixed an error when decoding a single byte, that is, when the + * encoded data was a single byte.
  • + *
  • v2.0 - I got rid of methods that used booleans to set options. + * Now everything is more consolidated and cleaner. The code now detects + * when data that's being decoded is gzip-compressed and will decompress it + * automatically. Generally things are cleaner. You'll probably have to + * change some method calls that you were making to support the new + * options format (ints that you "OR" together).
  • + *
  • v1.5.1 - Fixed bug when decompressing and decoding to a + * byte[] using decode( String s, boolean gzipCompressed ). + * Added the ability to "suspend" encoding in the Output Stream so + * you can turn on and off the encoding if you need to embed base64 + * data in an otherwise "normal" stream (like an XML file).
  • + *
  • v1.5 - Output stream pases on flush() command but doesn't do anything itself. + * This helps when using GZIP streams. + * Added the ability to GZip-compress objects before encoding them.
  • + *
  • v1.4 - Added helper methods to read/write files.
  • + *
  • v1.3.6 - Fixed OutputStream.flush() so that 'position' is reset.
  • + *
  • v1.3.5 - Added flag to turn on and off line breaks. Fixed bug in input stream + * where last buffer being read, if not completely full, was not returned.
  • + *
  • v1.3.4 - Fixed when "improperly padded stream" error was thrown at the wrong time.
  • + *
  • v1.3.3 - Fixed I/O streams which were totally messed up.
  • + *
+ *

+ *

+ * I am placing this code in the Public Domain. Do with it as you will. + * This software comes with no guarantees or warranties but with + * plenty of well-wishing instead! + * Please visit http://iharder.net/base64 + * periodically to check for updates or to contribute improvements. + *

+ * @author Robert Harder + * @author rob@iharder.net + * @version 2.1 + */ + static class Base64 { + +/* ******** P U B L I C F I E L D S ******** */ + + + /** + * No options specified. Value is zero. + */ + public final static int NO_OPTIONS=0; + + /** + * Specify encoding. + */ + public final static int ENCODE=1; + + + /** + * Specify decoding. + */ + public final static int DECODE=0; + + + /** + * Specify that data should be gzip-compressed. + */ + public final static int GZIP=2; + + + /** + * Don't break lines when encoding (violates strict Base64 specification) + */ + public final static int DONT_BREAK_LINES=8; + + +/* ******** P R I V A T E F I E L D S ******** */ + + + /** + * Maximum line length (76) of Base64 output. + */ + private final static int MAX_LINE_LENGTH=76; + + + /** + * The equals sign (=) as a byte. + */ + private final static byte EQUALS_SIGN=(byte)'='; + + + /** + * The new line character (\n) as a byte. + */ + private final static byte NEW_LINE=(byte)'\n'; + + + /** + * Preferred encoding. + */ + private final static String PREFERRED_ENCODING="UTF-8"; + + + /** + * The 64 valid Base64 values. + */ + private static final byte[] ALPHABET; + private static final byte[] _NATIVE_ALPHABET= /* May be something funny like EBCDIC */ + { + (byte)'A', (byte)'B', (byte)'C', (byte)'D', (byte)'E', (byte)'F', (byte)'G', + (byte)'H', (byte)'I', (byte)'J', (byte)'K', (byte)'L', (byte)'M', (byte)'N', + (byte)'O', (byte)'P', (byte)'Q', (byte)'R', (byte)'S', (byte)'T', (byte)'U', + (byte)'V', (byte)'W', (byte)'X', (byte)'Y', (byte)'Z', + (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f', (byte)'g', + (byte)'h', (byte)'i', (byte)'j', (byte)'k', (byte)'l', (byte)'m', (byte)'n', + (byte)'o', (byte)'p', (byte)'q', (byte)'r', (byte)'s', (byte)'t', (byte)'u', + (byte)'v', (byte)'w', (byte)'x', (byte)'y', (byte)'z', + (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', (byte)'5', + (byte)'6', (byte)'7', (byte)'8', (byte)'9', (byte)'+', (byte)'/' + }; + + /** Determine which ALPHABET to use. */ + static { + byte[] __bytes; + try { + __bytes="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".getBytes(PREFERRED_ENCODING); + } // end try + catch(java.io.UnsupportedEncodingException use) { + __bytes=_NATIVE_ALPHABET; // Fall back to native encoding + } // end catch + ALPHABET=__bytes; + } // end static + + + /** + * Translates a Base64 value to either its 6-bit reconstruction value + * or a negative number indicating some other meaning. + */ + private final static byte[] DECODABET= + { + -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8 + -5, -5, // Whitespace: Tab and Linefeed + -9, -9, // Decimal 11 - 12 + -5, // Whitespace: Carriage Return + -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26 + -9, -9, -9, -9, -9, // Decimal 27 - 31 + -5, // Whitespace: Space + -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42 + 62, // Plus sign at decimal 43 + -9, -9, -9, // Decimal 44 - 46 + 63, // Slash at decimal 47 + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine + -9, -9, -9, // Decimal 58 - 60 + -1, // Equals sign at decimal 61 + -9, -9, -9, // Decimal 62 - 64 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N' + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z' + -9, -9, -9, -9, -9, -9, // Decimal 91 - 96 + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm' + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z' + -9, -9, -9, -9 // Decimal 123 - 126 + /*,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 127 - 139 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 140 - 152 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 153 - 165 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 166 - 178 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 179 - 191 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 192 - 204 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 205 - 217 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 218 - 230 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 231 - 243 + -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9 // Decimal 244 - 255 */ + }; + + // I think I end up not using the BAD_ENCODING indicator. + //private final static byte BAD_ENCODING = -9; // Indicates error in encoding + private final static byte WHITE_SPACE_ENC=-5; // Indicates white space in encoding + private final static byte EQUALS_SIGN_ENC=-1; // Indicates equals sign in encoding + + + /** + * Defeats instantiation. + */ + private Base64() { + } + + +/* ******** E N C O D I N G M E T H O D S ******** */ + + + /** + * Encodes up to the first three bytes of array threeBytes + * and returns a four-byte array in Base64 notation. + * The actual number of significant bytes in your array is + * given by numSigBytes. + * The array threeBytes needs only be as big as + * numSigBytes. + * Code can reuse a byte array by passing a four-byte array as b4. + * @param b4 A reusable byte array to reduce array instantiation + * @param threeBytes the array to convert + * @param numSigBytes the number of significant bytes in your array + * @return four byte array in Base64 notation. + * @since 1.5.1 + */ + private static byte[] encode3to4(byte[] b4, byte[] threeBytes, int numSigBytes) { + encode3to4(threeBytes, 0, numSigBytes, b4, 0); + return b4; + } // end encode3to4 + + + /** + * Encodes up to three bytes of the array source + * and writes the resulting four Base64 bytes to destination. + * The source and destination arrays can be manipulated + * anywhere along their length by specifying + * srcOffset and destOffset. + * This method does not check to make sure your arrays + * are large enough to accomodate srcOffset + 3 for + * the source array or destOffset + 4 for + * the destination array. + * The actual number of significant bytes in your array is + * given by numSigBytes. + * @param source the array to convert + * @param srcOffset the index where conversion begins + * @param numSigBytes the number of significant bytes in your array + * @param destination the array to hold the conversion + * @param destOffset the index where output will be put + * @return the destination array + * @since 1.3 + */ + private static byte[] encode3to4( + byte[] source, int srcOffset, int numSigBytes, + byte[] destination, int destOffset) { + // 1 2 3 + // 01234567890123456789012345678901 Bit position + // --------000000001111111122222222 Array position from threeBytes + // --------| || || || | Six bit groups to index ALPHABET + // >>18 >>12 >> 6 >> 0 Right shift necessary + // 0x3f 0x3f 0x3f Additional AND + + // Create buffer with zero-padding if there are only one or two + // significant bytes passed in the array. + // We have to shift left 24 in order to flush out the 1's that appear + // when Java treats a value as negative that is cast from a byte to an int. + int inBuff=(numSigBytes > 0? ((source[srcOffset] << 24) >>> 8) : 0) + | (numSigBytes > 1? ((source[srcOffset + 1] << 24) >>> 16) : 0) + | (numSigBytes > 2? ((source[srcOffset + 2] << 24) >>> 24) : 0); + + switch(numSigBytes) { + case 3: + destination[destOffset]=ALPHABET[(inBuff >>> 18)]; + destination[destOffset + 1]=ALPHABET[(inBuff >>> 12) & 0x3f]; + destination[destOffset + 2]=ALPHABET[(inBuff >>> 6) & 0x3f]; + destination[destOffset + 3]=ALPHABET[(inBuff) & 0x3f]; + return destination; + + case 2: + destination[destOffset]=ALPHABET[(inBuff >>> 18)]; + destination[destOffset + 1]=ALPHABET[(inBuff >>> 12) & 0x3f]; + destination[destOffset + 2]=ALPHABET[(inBuff >>> 6) & 0x3f]; + destination[destOffset + 3]=EQUALS_SIGN; + return destination; + + case 1: + destination[destOffset]=ALPHABET[(inBuff >>> 18)]; + destination[destOffset + 1]=ALPHABET[(inBuff >>> 12) & 0x3f]; + destination[destOffset + 2]=EQUALS_SIGN; + destination[destOffset + 3]=EQUALS_SIGN; + return destination; + + default: + return destination; + } // end switch + } // end encode3to4 + + + /** + * Serializes an object and returns the Base64-encoded + * version of that serialized object. If the object + * cannot be serialized or there is another error, + * the method will return null. + * The object is not GZip-compressed before being encoded. + * @param serializableObject The object to encode + * @return The Base64-encoded object + * @since 1.4 + */ + public static String encodeObject(java.io.Serializable serializableObject) { + return encodeObject(serializableObject, NO_OPTIONS); + } // end encodeObject + + + /** + * Serializes an object and returns the Base64-encoded + * version of that serialized object. If the object + * cannot be serialized or there is another error, + * the method will return null. + *

+ * Valid options:

+         *   GZIP: gzip-compresses object before encoding it.
+         *   DONT_BREAK_LINES: don't break lines at 76 characters
+         *     Note: Technically, this makes your encoding non-compliant.
+         * 
+ *

+ * Example: encodeObject( myObj, Base64.GZIP ) or + *

+ * Example: encodeObject( myObj, Base64.GZIP | Base64.DONT_BREAK_LINES ) + * @param serializableObject The object to encode + * @param options Specified options + * @return The Base64-encoded object + * @see Base64#GZIP + * @see Base64#DONT_BREAK_LINES + * @since 2.0 + */ + public static String encodeObject(java.io.Serializable serializableObject, int options) { + // Streams + java.io.ByteArrayOutputStream baos=null; + java.io.OutputStream b64os=null; + java.io.ObjectOutputStream oos=null; + java.util.zip.GZIPOutputStream gzos=null; + + // Isolate options + int gzip=(options & GZIP); + int dontBreakLines=(options & DONT_BREAK_LINES); + + try { + // ObjectOutputStream -> (GZIP) -> Base64 -> ByteArrayOutputStream + baos=new java.io.ByteArrayOutputStream(); + b64os=new Base64.OutputStream(baos, ENCODE | dontBreakLines); + + // GZip? + if(gzip == GZIP) { + gzos=new java.util.zip.GZIPOutputStream(b64os); + oos=new java.io.ObjectOutputStream(gzos); + } // end if: gzip + else + oos=new java.io.ObjectOutputStream(b64os); + + oos.writeObject(serializableObject); + } // end try + catch(java.io.IOException e) { + e.printStackTrace(); + return null; + } // end catch + finally { + try { + oos.close(); + } + catch(Exception e) { + } + try { + gzos.close(); + } + catch(Exception e) { + } + try { + b64os.close(); + } + catch(Exception e) { + } + try { + baos.close(); + } + catch(Exception e) { + } + } // end finally + + // Return value according to relevant encoding. + try { + return new String(baos.toByteArray(), PREFERRED_ENCODING); + } // end try + catch(java.io.UnsupportedEncodingException uue) { + return new String(baos.toByteArray()); + } // end catch + + } // end encode + + + /** + * Encodes a byte array into Base64 notation. + * Does not GZip-compress data. + * @param source The data to convert + * @since 1.4 + */ + public static String encodeBytes(byte[] source) { + return encodeBytes(source, 0, source.length, NO_OPTIONS); + } // end encodeBytes + + + /** + * Encodes a byte array into Base64 notation. + *

+ * Valid options:

+         *   GZIP: gzip-compresses object before encoding it.
+         *   DONT_BREAK_LINES: don't break lines at 76 characters
+         *     Note: Technically, this makes your encoding non-compliant.
+         * 
+ *

+ * Example: encodeBytes( myData, Base64.GZIP ) or + *

+ * Example: encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) + * @param source The data to convert + * @param options Specified options + * @see Base64#GZIP + * @see Base64#DONT_BREAK_LINES + * @since 2.0 + */ + public static String encodeBytes(byte[] source, int options) { + return encodeBytes(source, 0, source.length, options); + } // end encodeBytes + + + /** + * Encodes a byte array into Base64 notation. + * Does not GZip-compress data. + * @param source The data to convert + * @param off Offset in array where conversion should begin + * @param len Length of data to convert + * @since 1.4 + */ + public static String encodeBytes(byte[] source, int off, int len) { + return encodeBytes(source, off, len, NO_OPTIONS); + } // end encodeBytes + + + /** + * Encodes a byte array into Base64 notation. + *

+ * Valid options:

+         *   GZIP: gzip-compresses object before encoding it.
+         *   DONT_BREAK_LINES: don't break lines at 76 characters
+         *     Note: Technically, this makes your encoding non-compliant.
+         * 
+ *

+ * Example: encodeBytes( myData, Base64.GZIP ) or + *

+ * Example: encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) + * @param source The data to convert + * @param off Offset in array where conversion should begin + * @param len Length of data to convert + * @param options Specified options + * @see Base64#GZIP + * @see Base64#DONT_BREAK_LINES + * @since 2.0 + */ + public static String encodeBytes(byte[] source, int off, int len, int options) { + // Isolate options + int dontBreakLines=(options & DONT_BREAK_LINES); + int gzip=(options & GZIP); + + // Compress? + if(gzip == GZIP) { + java.io.ByteArrayOutputStream baos=null; + java.util.zip.GZIPOutputStream gzos=null; + Base64.OutputStream b64os=null; + + + try { + // GZip -> Base64 -> ByteArray + baos=new java.io.ByteArrayOutputStream(); + b64os=new Base64.OutputStream(baos, ENCODE | dontBreakLines); + gzos=new java.util.zip.GZIPOutputStream(b64os); + + gzos.write(source, off, len); + gzos.close(); + } // end try + catch(java.io.IOException e) { + e.printStackTrace(); + return null; + } // end catch + finally { + try { + gzos.close(); + } + catch(Exception e) { + } + try { + b64os.close(); + } + catch(Exception e) { + } + try { + baos.close(); + } + catch(Exception e) { + } + } // end finally + + // Return value according to relevant encoding. + try { + return new String(baos.toByteArray(), PREFERRED_ENCODING); + } // end try + catch(java.io.UnsupportedEncodingException uue) { + return new String(baos.toByteArray()); + } // end catch + } // end if: compress + + // Else, don't compress. Better not to use streams at all then. + else { + // Convert option to boolean in way that code likes it. + boolean breakLines=dontBreakLines == 0; + + int len43=len * 4 / 3; + byte[] outBuff=new byte[(len43) // Main 4:3 + + ((len % 3) > 0? 4 : 0) // Account for padding + + (breakLines? (len43 / MAX_LINE_LENGTH) : 0)]; // New lines + int d=0; + int e=0; + int len2=len - 2; + int lineLength=0; + for(; d < len2; d+=3, e+=4) { + encode3to4(source, d + off, 3, outBuff, e); + + lineLength+=4; + if(breakLines && lineLength == MAX_LINE_LENGTH) { + outBuff[e + 4]=NEW_LINE; + e++; + lineLength=0; + } // end if: end of line + } // en dfor: each piece of array + + if(d < len) { + encode3to4(source, d + off, len - d, outBuff, e); + e+=4; + } // end if: some padding needed + + + // Return value according to relevant encoding. + try { + return new String(outBuff, 0, e, PREFERRED_ENCODING); + } // end try + catch(java.io.UnsupportedEncodingException uue) { + return new String(outBuff, 0, e); + } // end catch + + } // end else: don't compress + + } // end encodeBytes + + +/* ******** D E C O D I N G M E T H O D S ******** */ + + + /** + * Decodes four bytes from array source + * and writes the resulting bytes (up to three of them) + * to destination. + * The source and destination arrays can be manipulated + * anywhere along their length by specifying + * srcOffset and destOffset. + * This method does not check to make sure your arrays + * are large enough to accomodate srcOffset + 4 for + * the source array or destOffset + 3 for + * the destination array. + * This method returns the actual number of bytes that + * were converted from the Base64 encoding. + * @param source the array to convert + * @param srcOffset the index where conversion begins + * @param destination the array to hold the conversion + * @param destOffset the index where output will be put + * @return the number of decoded bytes converted + * @since 1.3 + */ + private static int decode4to3(byte[] source, int srcOffset, byte[] destination, int destOffset) { + // Example: Dk== + if(source[srcOffset + 2] == EQUALS_SIGN) { + // Two ways to do the same thing. Don't know which way I like best. + //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) + // | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 ); + int outBuff=((DECODABET[source[srcOffset]] & 0xFF) << 18) + | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12); + + destination[destOffset]=(byte)(outBuff >>> 16); + return 1; + } + + // Example: DkL= + else if(source[srcOffset + 3] == EQUALS_SIGN) { + // Two ways to do the same thing. Don't know which way I like best. + //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) + // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 ) + // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 ); + int outBuff=((DECODABET[source[srcOffset]] & 0xFF) << 18) + | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12) + | ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6); + + destination[destOffset]=(byte)(outBuff >>> 16); + destination[destOffset + 1]=(byte)(outBuff >>> 8); + return 2; + } + + // Example: DkLE + else { + try { + // Two ways to do the same thing. Don't know which way I like best. + //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) + // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 ) + // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 ) + // | ( ( DECODABET[ source[ srcOffset + 3 ] ] << 24 ) >>> 24 ); + int outBuff=((DECODABET[source[srcOffset]] & 0xFF) << 18) + | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12) + | ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6) + | ((DECODABET[source[srcOffset + 3]] & 0xFF)); + + + destination[destOffset]=(byte)(outBuff >> 16); + destination[destOffset + 1]=(byte)(outBuff >> 8); + destination[destOffset + 2]=(byte)(outBuff); + + return 3; + } + catch(Exception e) { + System.out.println(valueOf(source[srcOffset]) + ": " + (DECODABET[source[srcOffset]])); + System.out.println(valueOf(source[srcOffset + 1]) + ": " + (DECODABET[source[srcOffset + 1]])); + System.out.println(valueOf(source[srcOffset + 2]) + ": " + (DECODABET[source[srcOffset + 2]])); + System.out.println(String.valueOf(source[srcOffset + 3]) + ": " + (DECODABET[source[srcOffset + 3]])); + return -1; + } //e nd catch + } + } // end decodeToBytes + + + /** + * Very low-level access to decoding ASCII characters in + * the form of a byte array. Does not support automatically + * gunzipping or any other "fancy" features. + * @param source The Base64 encoded data + * @param off The offset of where to begin decoding + * @param len The length of characters to decode + * @return decoded data + * @since 1.3 + */ + public static byte[] decode(byte[] source, int off, int len) { + int len34=len * 3 / 4; + byte[] outBuff=new byte[len34]; // Upper limit on size of output + int outBuffPosn=0; + + byte[] b4=new byte[4]; + int b4Posn=0; + int i=0; + byte sbiCrop=0; + byte sbiDecode=0; + for(i=off; i < off + len; i++) { + sbiCrop=(byte)(source[i] & 0x7f); // Only the low seven bits + sbiDecode=DECODABET[sbiCrop]; + + if(sbiDecode >= WHITE_SPACE_ENC) // White space, Equals sign or better + { + if(sbiDecode >= EQUALS_SIGN_ENC) { + b4[b4Posn++]=sbiCrop; + if(b4Posn > 3) { + outBuffPosn+=decode4to3(b4, 0, outBuff, outBuffPosn); + b4Posn=0; + + // If that was the equals sign, break out of 'for' loop + if(sbiCrop == EQUALS_SIGN) + break; + } // end if: quartet built + + } // end if: equals sign or better + + } // end if: white space, equals sign or better + else { + System.err.println("Bad Base64 input character at " + i + ": " + source[i] + "(decimal)"); + return null; + } // end else: + } // each input character + + byte[] out=new byte[outBuffPosn]; + System.arraycopy(outBuff, 0, out, 0, outBuffPosn); + return out; + } // end decode + + + /** + * Decodes data from Base64 notation, automatically + * detecting gzip-compressed data and decompressing it. + * @param s the string to decode + * @return the decoded data + * @since 1.4 + */ + public static byte[] decode(String s) { + byte[] bytes; + try { + bytes=s.getBytes(PREFERRED_ENCODING); + } // end try + catch(java.io.UnsupportedEncodingException uee) { + bytes=s.getBytes(); + } // end catch + // + + // Decode + bytes=decode(bytes, 0, bytes.length); + + + // Check to see if it's gzip-compressed + // GZIP Magic Two-Byte Number: 0x8b1f (35615) + if(bytes != null && bytes.length >= 4) { + + int head=((int)bytes[0] & 0xff) | ((bytes[1] << 8) & 0xff00); + if(java.util.zip.GZIPInputStream.GZIP_MAGIC == head) { + java.io.ByteArrayInputStream bais=null; + java.util.zip.GZIPInputStream gzis=null; + java.io.ByteArrayOutputStream baos=null; + byte[] buffer=new byte[2048]; + int length=0; + + try { + baos=new java.io.ByteArrayOutputStream(); + bais=new java.io.ByteArrayInputStream(bytes); + gzis=new java.util.zip.GZIPInputStream(bais); + + while((length=gzis.read(buffer)) >= 0) { + baos.write(buffer, 0, length); + } // end while: reading input + + // No error? Get new bytes. + bytes=baos.toByteArray(); + + } // end try + catch(java.io.IOException e) { + // Just return originally-decoded bytes + } // end catch + finally { + try { + baos.close(); + } + catch(Exception e) { + } + try { + gzis.close(); + } + catch(Exception e) { + } + try { + bais.close(); + } + catch(Exception e) { + } + } // end finally + + } // end if: gzipped + } // end if: bytes.length >= 2 + + return bytes; + } // end decode + + + /** + * Attempts to decode Base64 data and deserialize a Java + * Object within. Returns null if there was an error. + * @param encodedObject The Base64 data to decode + * @return The decoded and deserialized object + * @since 1.5 + */ + public static Object decodeToObject(String encodedObject) { + // Decode and gunzip if necessary + byte[] objBytes=decode(encodedObject); + + java.io.ByteArrayInputStream bais=null; + java.io.ObjectInputStream ois=null; + Object obj=null; + + try { + bais=new java.io.ByteArrayInputStream(objBytes); + ois=new java.io.ObjectInputStream(bais); + + obj=ois.readObject(); + } // end try + catch(java.io.IOException e) { + e.printStackTrace(); + obj=null; + } // end catch + catch(java.lang.ClassNotFoundException e) { + e.printStackTrace(); + obj=null; + } // end catch + finally { + try { + if(bais != null) + bais.close(); + } + catch(Exception e) { + } + try { + if(ois != null) + ois.close(); + } + catch(Exception e) { + } + } // end finally + + return obj; + } // end decodeObject + + + /** + * Convenience method for encoding data to a file. + * @param dataToEncode byte array of data to encode in base64 form + * @param filename Filename for saving encoded data + * @return true if successful, false otherwise + * @since 2.1 + */ + public static boolean encodeToFile(byte[] dataToEncode, String filename) { + boolean success=false; + Base64.OutputStream bos=null; + try { + bos=new Base64.OutputStream( + new java.io.FileOutputStream(filename), Base64.ENCODE); + bos.write(dataToEncode); + success=true; + } // end try + catch(java.io.IOException e) { + + success=false; + } // end catch: IOException + finally { + try { + if(bos != null) + bos.close(); + } + catch(Exception e) { + } + } // end finally + + return success; + } // end encodeToFile + + + /** + * Convenience method for decoding data to a file. + * @param dataToDecode Base64-encoded data as a string + * @param filename Filename for saving decoded data + * @return true if successful, false otherwise + * @since 2.1 + */ + public static boolean decodeToFile(String dataToDecode, String filename) { + boolean success=false; + Base64.OutputStream bos=null; + try { + bos=new Base64.OutputStream( + new java.io.FileOutputStream(filename), Base64.DECODE); + bos.write(dataToDecode.getBytes(PREFERRED_ENCODING)); + success=true; + } // end try + catch(java.io.IOException e) { + success=false; + } // end catch: IOException + finally { + try { + if(bos != null) + bos.close(); + } + catch(Exception e) { + } + } // end finally + + return success; + } // end decodeToFile + + + /** + * Convenience method for reading a base64-encoded + * file and decoding it. + * @param filename Filename for reading encoded data + * @return decoded byte array or null if unsuccessful + * @since 2.1 + */ + public static byte[] decodeFromFile(String filename) { + byte[] decodedData=null; + Base64.InputStream bis=null; + try { + // Set up some useful variables + java.io.File file=new java.io.File(filename); + byte[] buffer=null; + int length=0; + int numBytes=0; + + // Check for size of file + if(file.length() > Integer.MAX_VALUE) { + System.err.println("File is too big for this convenience method (" + file.length() + " bytes)."); + return null; + } // end if: file too big for int index + buffer=new byte[(int)file.length()]; + + // Open a stream + bis=new Base64.InputStream( + new java.io.BufferedInputStream( + new java.io.FileInputStream(file)), Base64.DECODE); + + // Read until done + while((numBytes=bis.read(buffer, length, 4096)) >= 0) + length+=numBytes; + + // Save in a variable to return + decodedData=new byte[length]; + System.arraycopy(buffer, 0, decodedData, 0, length); + + } // end try + catch(java.io.IOException e) { + System.err.println("Error decoding from file " + filename); + } // end catch: IOException + finally { + try { + if(bis != null) + bis.close(); + } + catch(Exception e) { + } + } // end finally + + return decodedData; + } // end decodeFromFile + + + /** + * Convenience method for reading a binary file + * and base64-encoding it. + * @param filename Filename for reading binary data + * @return base64-encoded string or null if unsuccessful + * @since 2.1 + */ + public static String encodeFromFile(String filename) { + String encodedData=null; + Base64.InputStream bis=null; + try { + // Set up some useful variables + java.io.File file=new java.io.File(filename); + byte[] buffer=new byte[(int)(file.length() * 1.4)]; + int length=0; + int numBytes=0; + + // Open a stream + bis=new Base64.InputStream( + new java.io.BufferedInputStream( + new java.io.FileInputStream(file)), Base64.ENCODE); + + // Read until done + while((numBytes=bis.read(buffer, length, 4096)) >= 0) + length+=numBytes; + + // Save in a variable to return + encodedData=new String(buffer, 0, length, Base64.PREFERRED_ENCODING); + + } // end try + catch(java.io.IOException e) { + System.err.println("Error encoding from file " + filename); + } // end catch: IOException + finally { + try { + if(bis != null) + bis.close(); + } + catch(Exception e) { + } + } // end finally + + return encodedData; + } // end encodeFromFile + + + /* ******** I N N E R C L A S S I N P U T S T R E A M ******** */ + + + /** + * A {@link Base64.InputStream} will read data from another + * java.io.InputStream, given in the constructor, + * and encode/decode to/from Base64 notation on the fly. + * @see Base64 + * @since 1.3 + */ + public static class InputStream extends java.io.FilterInputStream { + private boolean encode; // Encoding or decoding + private int position; // Current position in the buffer + private byte[] buffer; // Small buffer holding converted data + private int bufferLength; // Length of buffer (3 or 4) + private int numSigBytes; // Number of meaningful bytes in the buffer + private int lineLength; + private boolean breakLines; // Break lines at less than 80 characters + + + /** + * Constructs a {@link Base64.InputStream} in DECODE mode. + * @param in the java.io.InputStream from which to read data. + * @since 1.3 + */ + public InputStream(java.io.InputStream in) { + this(in, DECODE); + } // end constructor + + + /** + * Constructs a {@link Base64.InputStream} in + * either ENCODE or DECODE mode. + *

+ * Valid options:

+             *   ENCODE or DECODE: Encode or Decode as data is read.
+             *   DONT_BREAK_LINES: don't break lines at 76 characters
+             *     (only meaningful when encoding)
+             *     Note: Technically, this makes your encoding non-compliant.
+             * 
+ *

+ * Example: new Base64.InputStream( in, Base64.DECODE ) + * @param in the java.io.InputStream from which to read data. + * @param options Specified options + * @see Base64#ENCODE + * @see Base64#DECODE + * @see Base64#DONT_BREAK_LINES + * @since 2.0 + */ + public InputStream(java.io.InputStream in, int options) { + super(in); + this.breakLines=(options & DONT_BREAK_LINES) != DONT_BREAK_LINES; + this.encode=(options & ENCODE) == ENCODE; + this.bufferLength=encode? 4 : 3; + this.buffer=new byte[bufferLength]; + this.position=-1; + this.lineLength=0; + } // end constructor + + /** + * Reads enough of the input stream to convert + * to/from Base64 and returns the next byte. + * @return next byte + * @since 1.3 + */ + public int read() throws java.io.IOException { + // Do we need to get data? + if(position < 0) { + if(encode) { + byte[] b3=new byte[3]; + int numBinaryBytes=0; + for(int i=0; i < 3; i++) { + try { + int b=in.read(); + + // If end of stream, b is -1. + if(b >= 0) { + b3[i]=(byte)b; + numBinaryBytes++; + } // end if: not end of stream + + } // end try: read + catch(java.io.IOException e) { + // Only a problem if we got no data at all. + if(i == 0) + throw e; + + } // end catch + } // end for: each needed input byte + + if(numBinaryBytes > 0) { + encode3to4(b3, 0, numBinaryBytes, buffer, 0); + position=0; + numSigBytes=4; + } // end if: got data + else { + return -1; + } // end else + } // end if: encoding + + // Else decoding + else { + byte[] b4=new byte[4]; + int i=0; + for(i=0; i < 4; i++) { + // Read four "meaningful" bytes: + int b=0; + do { + b=in.read(); + } + while(b >= 0 && DECODABET[b & 0x7f] <= WHITE_SPACE_ENC); + + if(b < 0) + break; // Reads a -1 if end of stream + + b4[i]=(byte)b; + } // end for: each needed input byte + + if(i == 4) { + numSigBytes=decode4to3(b4, 0, buffer, 0); + position=0; + } // end if: got four characters + else if(i == 0) { + return -1; + } // end else if: also padded correctly + else { + // Must have broken out from above. + throw new java.io.IOException("Improperly padded Base64 input."); + } // end + + } // end else: decode + } // end else: get data + + // Got data? + if(position >= 0) { + // End of relevant data? + if( /*!encode &&*/ position >= numSigBytes) + return -1; + + if(encode && breakLines && lineLength >= MAX_LINE_LENGTH) { + lineLength=0; + return '\n'; + } // end if + else { + lineLength++; // This isn't important when decoding + // but throwing an extra "if" seems + // just as wasteful. + + int b=buffer[position++]; + + if(position >= bufferLength) + position=-1; + + return b & 0xFF; // This is how you "cast" a byte that's + // intended to be unsigned. + } // end else + } // end if: position >= 0 + + // Else error + else { + // When JDK1.4 is more accepted, use an assertion here. + throw new java.io.IOException("Error in Base64 code reading stream."); + } // end else + } // end read + + + /** + * Calls {@link #read()} repeatedly until the end of stream + * is reached or len bytes are read. + * Returns number of bytes read into array or -1 if + * end of stream is encountered. + * @param dest array to hold values + * @param off offset for array + * @param len max number of bytes to read into array + * @return bytes read into array or -1 if end of stream is encountered. + * @since 1.3 + */ + public int read(byte[] dest, int off, int len) throws java.io.IOException { + int i; + int b; + for(i=0; i < len; i++) { + b=read(); + + //if( b < 0 && i == 0 ) + // return -1; + + if(b >= 0) + dest[off + i]=(byte)b; + else if(i == 0) + return -1; + else + break; // Out of 'for' loop + } // end for: each byte read + return i; + } // end read + + } // end inner class InputStream + + + /* ******** I N N E R C L A S S O U T P U T S T R E A M ******** */ + + + /** + * A {@link Base64.OutputStream} will write data to another + * java.io.OutputStream, given in the constructor, + * and encode/decode to/from Base64 notation on the fly. + * @see Base64 + * @since 1.3 + */ + public static class OutputStream extends java.io.FilterOutputStream { + private boolean encode; + private int position; + private byte[] buffer; + private int bufferLength; + private int lineLength; + private boolean breakLines; + private byte[] b4; // Scratch used in a few places + private boolean suspendEncoding; + + /** + * Constructs a {@link Base64.OutputStream} in ENCODE mode. + * @param out the java.io.OutputStream to which data will be written. + * @since 1.3 + */ + public OutputStream(java.io.OutputStream out) { + this(out, ENCODE); + } // end constructor + + + /** + * Constructs a {@link Base64.OutputStream} in + * either ENCODE or DECODE mode. + *

+ * Valid options:

+             *   ENCODE or DECODE: Encode or Decode as data is read.
+             *   DONT_BREAK_LINES: don't break lines at 76 characters
+             *     (only meaningful when encoding)
+             *     Note: Technically, this makes your encoding non-compliant.
+             * 
+ *

+ * Example: new Base64.OutputStream( out, Base64.ENCODE ) + * @param out the java.io.OutputStream to which data will be written. + * @param options Specified options. + * @see Base64#ENCODE + * @see Base64#DECODE + * @see Base64#DONT_BREAK_LINES + * @since 1.3 + */ + public OutputStream(java.io.OutputStream out, int options) { + super(out); + this.breakLines=(options & DONT_BREAK_LINES) != DONT_BREAK_LINES; + this.encode=(options & ENCODE) == ENCODE; + this.bufferLength=encode? 3 : 4; + this.buffer=new byte[bufferLength]; + this.position=0; + this.lineLength=0; + this.suspendEncoding=false; + this.b4=new byte[4]; + } // end constructor + + + /** + * Writes the byte to the output stream after + * converting to/from Base64 notation. + * When encoding, bytes are buffered three + * at a time before the output stream actually + * gets a write() call. + * When decoding, bytes are buffered four + * at a time. + * @param theByte the byte to write + * @since 1.3 + */ + public void write(int theByte) throws java.io.IOException { + // Encoding suspended? + if(suspendEncoding) { + super.out.write(theByte); + return; + } // end if: supsended + + // Encode? + if(encode) { + buffer[position++]=(byte)theByte; + if(position >= bufferLength) // Enough to encode. + { + out.write(encode3to4(b4, buffer, bufferLength)); + + lineLength+=4; + if(breakLines && lineLength >= MAX_LINE_LENGTH) { + out.write(NEW_LINE); + lineLength=0; + } // end if: end of line + + position=0; + } // end if: enough to output + } // end if: encoding + + // Else, Decoding + else { + // Meaningful Base64 character? + if(DECODABET[theByte & 0x7f] > WHITE_SPACE_ENC) { + buffer[position++]=(byte)theByte; + if(position >= bufferLength) // Enough to output. + { + int len=Base64.decode4to3(buffer, 0, b4, 0); + out.write(b4, 0, len); + //out.write( Base64.decode4to3( buffer ) ); + position=0; + } // end if: enough to output + } // end if: meaningful base64 character + else if(DECODABET[theByte & 0x7f] != WHITE_SPACE_ENC) { + throw new java.io.IOException("Invalid character in Base64 data."); + } // end else: not white space either + } // end else: decoding + } // end write + + + /** + * Calls {@link #write(int)} repeatedly until len + * bytes are written. + * @param theBytes array from which to read bytes + * @param off offset for array + * @param len max number of bytes to read into array + * @since 1.3 + */ + public void write(byte[] theBytes, int off, int len) throws java.io.IOException { + // Encoding suspended? + if(suspendEncoding) { + super.out.write(theBytes, off, len); + return; + } // end if: supsended + + for(int i=0; i < len; i++) { + write(theBytes[off + i]); + } // end for: each byte written + + } // end write + + + /** + * Method added by PHIL. [Thanks, PHIL. -Rob] + * This pads the buffer without closing the stream. + */ + public void flushBase64() throws java.io.IOException { + if(position > 0) { + if(encode) { + out.write(encode3to4(b4, buffer, position)); + position=0; + } // end if: encoding + else { + throw new java.io.IOException("Base64 input not properly padded."); + } // end else: decoding + } // end if: buffer partially full + + } // end flush + + + /** + * Flushes and closes (I think, in the superclass) the stream. + * @since 1.3 + */ + public void close() throws java.io.IOException { + // 1. Ensure that pending characters are written + flushBase64(); + + // 2. Actually close the stream + // Base class both flushes and closes. + super.close(); + + buffer=null; + out=null; + } // end close + + + /** + * Suspends encoding of the stream. + * May be helpful if you need to embed a piece of + * base640-encoded data in a stream. + * @since 1.5.1 + */ + public void suspendEncoding() throws java.io.IOException { + flushBase64(); + this.suspendEncoding=true; + } // end suspendEncoding + + + /** + * Resumes encoding of the stream. + * May be helpful if you need to embed a piece of + * base640-encoded data in a stream. + * @since 1.5.1 + */ + public void resumeEncoding() { + this.suspendEncoding=false; + } // end resumeEncoding + + + } // end inner class OutputStream + + + } // end class Base64 + +} + + + + + diff --git a/tests/scancode/data/resource/samples/README b/tests/scancode/data/resource/samples/README new file mode 100644 index 00000000000..1d61df81ffb --- /dev/null +++ b/tests/scancode/data/resource/samples/README @@ -0,0 +1,4 @@ +This directory contains a few sample files extracted from these two archives: + +download_url: http://zlib.net/zlib-1.2.8.tar.gz +download_url: http://master.dl.sourceforge.net/project/javagroups/JGroups/2.10.0.GA/JGroups-2.10.0.GA.src.zip \ No newline at end of file diff --git a/tests/scancode/data/resource/samples/arch/zlib.tar.gz b/tests/scancode/data/resource/samples/arch/zlib.tar.gz new file mode 100644 index 00000000000..b57920bb555 Binary files /dev/null and b/tests/scancode/data/resource/samples/arch/zlib.tar.gz differ diff --git a/tests/scancode/data/resource/samples/screenshot.png b/tests/scancode/data/resource/samples/screenshot.png new file mode 100644 index 00000000000..97155e4a9b9 Binary files /dev/null and b/tests/scancode/data/resource/samples/screenshot.png differ diff --git a/tests/scancode/data/resource/samples/zlib/ada/zlib.ads b/tests/scancode/data/resource/samples/zlib/ada/zlib.ads new file mode 100644 index 00000000000..79ffc4095cf --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/ada/zlib.ads @@ -0,0 +1,328 @@ +------------------------------------------------------------------------------ +-- ZLib for Ada thick binding. -- +-- -- +-- Copyright (C) 2002-2004 Dmitriy Anisimkov -- +-- -- +-- This library is free software; you can redistribute it and/or modify -- +-- it under the terms of the GNU General Public License as published by -- +-- the Free Software Foundation; either version 2 of the License, or (at -- +-- your option) any later version. -- +-- -- +-- This library is distributed in the hope that it will be useful, but -- +-- WITHOUT ANY WARRANTY; without even the implied warranty of -- +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- +-- General Public License for more details. -- +-- -- +-- You should have received a copy of the GNU General Public License -- +-- along with this library; if not, write to the Free Software Foundation, -- +-- Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -- +-- -- +-- As a special exception, if other files instantiate generics from this -- +-- unit, or you link this unit with other files to produce an executable, -- +-- this unit does not by itself cause the resulting executable to be -- +-- covered by the GNU General Public License. This exception does not -- +-- however invalidate any other reasons why the executable file might be -- +-- covered by the GNU Public License. -- +------------------------------------------------------------------------------ + +-- $Id: zlib.ads,v 1.26 2004/09/06 06:53:19 vagul Exp $ + +with Ada.Streams; + +with Interfaces; + +package ZLib is + + ZLib_Error : exception; + Status_Error : exception; + + type Compression_Level is new Integer range -1 .. 9; + + type Flush_Mode is private; + + type Compression_Method is private; + + type Window_Bits_Type is new Integer range 8 .. 15; + + type Memory_Level_Type is new Integer range 1 .. 9; + + type Unsigned_32 is new Interfaces.Unsigned_32; + + type Strategy_Type is private; + + type Header_Type is (None, Auto, Default, GZip); + -- Header type usage have a some limitation for inflate. + -- See comment for Inflate_Init. + + subtype Count is Ada.Streams.Stream_Element_Count; + + Default_Memory_Level : constant Memory_Level_Type := 8; + Default_Window_Bits : constant Window_Bits_Type := 15; + + ---------------------------------- + -- Compression method constants -- + ---------------------------------- + + Deflated : constant Compression_Method; + -- Only one method allowed in this ZLib version + + --------------------------------- + -- Compression level constants -- + --------------------------------- + + No_Compression : constant Compression_Level := 0; + Best_Speed : constant Compression_Level := 1; + Best_Compression : constant Compression_Level := 9; + Default_Compression : constant Compression_Level := -1; + + -------------------------- + -- Flush mode constants -- + -------------------------- + + No_Flush : constant Flush_Mode; + -- Regular way for compression, no flush + + Partial_Flush : constant Flush_Mode; + -- Will be removed, use Z_SYNC_FLUSH instead + + Sync_Flush : constant Flush_Mode; + -- All pending output is flushed to the output buffer and the output + -- is aligned on a byte boundary, so that the decompressor can get all + -- input data available so far. (In particular avail_in is zero after the + -- call if enough output space has been provided before the call.) + -- Flushing may degrade compression for some compression algorithms and so + -- it should be used only when necessary. + + Block_Flush : constant Flush_Mode; + -- Z_BLOCK requests that inflate() stop + -- if and when it get to the next deflate block boundary. When decoding the + -- zlib or gzip format, this will cause inflate() to return immediately + -- after the header and before the first block. When doing a raw inflate, + -- inflate() will go ahead and process the first block, and will return + -- when it gets to the end of that block, or when it runs out of data. + + Full_Flush : constant Flush_Mode; + -- All output is flushed as with SYNC_FLUSH, and the compression state + -- is reset so that decompression can restart from this point if previous + -- compressed data has been damaged or if random access is desired. Using + -- Full_Flush too often can seriously degrade the compression. + + Finish : constant Flush_Mode; + -- Just for tell the compressor that input data is complete. + + ------------------------------------ + -- Compression strategy constants -- + ------------------------------------ + + -- RLE stategy could be used only in version 1.2.0 and later. + + Filtered : constant Strategy_Type; + Huffman_Only : constant Strategy_Type; + RLE : constant Strategy_Type; + Default_Strategy : constant Strategy_Type; + + Default_Buffer_Size : constant := 4096; + + type Filter_Type is tagged limited private; + -- The filter is for compression and for decompression. + -- The usage of the type is depend of its initialization. + + function Version return String; + pragma Inline (Version); + -- Return string representation of the ZLib version. + + procedure Deflate_Init + (Filter : in out Filter_Type; + Level : in Compression_Level := Default_Compression; + Strategy : in Strategy_Type := Default_Strategy; + Method : in Compression_Method := Deflated; + Window_Bits : in Window_Bits_Type := Default_Window_Bits; + Memory_Level : in Memory_Level_Type := Default_Memory_Level; + Header : in Header_Type := Default); + -- Compressor initialization. + -- When Header parameter is Auto or Default, then default zlib header + -- would be provided for compressed data. + -- When Header is GZip, then gzip header would be set instead of + -- default header. + -- When Header is None, no header would be set for compressed data. + + procedure Inflate_Init + (Filter : in out Filter_Type; + Window_Bits : in Window_Bits_Type := Default_Window_Bits; + Header : in Header_Type := Default); + -- Decompressor initialization. + -- Default header type mean that ZLib default header is expecting in the + -- input compressed stream. + -- Header type None mean that no header is expecting in the input stream. + -- GZip header type mean that GZip header is expecting in the + -- input compressed stream. + -- Auto header type mean that header type (GZip or Native) would be + -- detected automatically in the input stream. + -- Note that header types parameter values None, GZip and Auto are + -- supported for inflate routine only in ZLib versions 1.2.0.2 and later. + -- Deflate_Init is supporting all header types. + + function Is_Open (Filter : in Filter_Type) return Boolean; + pragma Inline (Is_Open); + -- Is the filter opened for compression or decompression. + + procedure Close + (Filter : in out Filter_Type; + Ignore_Error : in Boolean := False); + -- Closing the compression or decompressor. + -- If stream is closing before the complete and Ignore_Error is False, + -- The exception would be raised. + + generic + with procedure Data_In + (Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset); + with procedure Data_Out + (Item : in Ada.Streams.Stream_Element_Array); + procedure Generic_Translate + (Filter : in out Filter_Type; + In_Buffer_Size : in Integer := Default_Buffer_Size; + Out_Buffer_Size : in Integer := Default_Buffer_Size); + -- Compress/decompress data fetch from Data_In routine and pass the result + -- to the Data_Out routine. User should provide Data_In and Data_Out + -- for compression/decompression data flow. + -- Compression or decompression depend on Filter initialization. + + function Total_In (Filter : in Filter_Type) return Count; + pragma Inline (Total_In); + -- Returns total number of input bytes read so far + + function Total_Out (Filter : in Filter_Type) return Count; + pragma Inline (Total_Out); + -- Returns total number of bytes output so far + + function CRC32 + (CRC : in Unsigned_32; + Data : in Ada.Streams.Stream_Element_Array) + return Unsigned_32; + pragma Inline (CRC32); + -- Compute CRC32, it could be necessary for make gzip format + + procedure CRC32 + (CRC : in out Unsigned_32; + Data : in Ada.Streams.Stream_Element_Array); + pragma Inline (CRC32); + -- Compute CRC32, it could be necessary for make gzip format + + ------------------------------------------------- + -- Below is more complex low level routines. -- + ------------------------------------------------- + + procedure Translate + (Filter : in out Filter_Type; + In_Data : in Ada.Streams.Stream_Element_Array; + In_Last : out Ada.Streams.Stream_Element_Offset; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode); + -- Compress/decompress the In_Data buffer and place the result into + -- Out_Data. In_Last is the index of last element from In_Data accepted by + -- the Filter. Out_Last is the last element of the received data from + -- Filter. To tell the filter that incoming data are complete put the + -- Flush parameter to Finish. + + function Stream_End (Filter : in Filter_Type) return Boolean; + pragma Inline (Stream_End); + -- Return the true when the stream is complete. + + procedure Flush + (Filter : in out Filter_Type; + Out_Data : out Ada.Streams.Stream_Element_Array; + Out_Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode); + pragma Inline (Flush); + -- Flushing the data from the compressor. + + generic + with procedure Write + (Item : in Ada.Streams.Stream_Element_Array); + -- User should provide this routine for accept + -- compressed/decompressed data. + + Buffer_Size : in Ada.Streams.Stream_Element_Offset + := Default_Buffer_Size; + -- Buffer size for Write user routine. + + procedure Write + (Filter : in out Filter_Type; + Item : in Ada.Streams.Stream_Element_Array; + Flush : in Flush_Mode := No_Flush); + -- Compress/Decompress data from Item to the generic parameter procedure + -- Write. Output buffer size could be set in Buffer_Size generic parameter. + + generic + with procedure Read + (Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset); + -- User should provide data for compression/decompression + -- thru this routine. + + Buffer : in out Ada.Streams.Stream_Element_Array; + -- Buffer for keep remaining data from the previous + -- back read. + + Rest_First, Rest_Last : in out Ada.Streams.Stream_Element_Offset; + -- Rest_First have to be initialized to Buffer'Last + 1 + -- Rest_Last have to be initialized to Buffer'Last + -- before usage. + + Allow_Read_Some : in Boolean := False; + -- Is it allowed to return Last < Item'Last before end of data. + + procedure Read + (Filter : in out Filter_Type; + Item : out Ada.Streams.Stream_Element_Array; + Last : out Ada.Streams.Stream_Element_Offset; + Flush : in Flush_Mode := No_Flush); + -- Compress/Decompress data from generic parameter procedure Read to the + -- Item. User should provide Buffer and initialized Rest_First, Rest_Last + -- indicators. If Allow_Read_Some is True, Read routines could return + -- Last < Item'Last only at end of stream. + +private + + use Ada.Streams; + + pragma Assert (Ada.Streams.Stream_Element'Size = 8); + pragma Assert (Ada.Streams.Stream_Element'Modulus = 2**8); + + type Flush_Mode is new Integer range 0 .. 5; + + type Compression_Method is new Integer range 8 .. 8; + + type Strategy_Type is new Integer range 0 .. 3; + + No_Flush : constant Flush_Mode := 0; + Partial_Flush : constant Flush_Mode := 1; + Sync_Flush : constant Flush_Mode := 2; + Full_Flush : constant Flush_Mode := 3; + Finish : constant Flush_Mode := 4; + Block_Flush : constant Flush_Mode := 5; + + Filtered : constant Strategy_Type := 1; + Huffman_Only : constant Strategy_Type := 2; + RLE : constant Strategy_Type := 3; + Default_Strategy : constant Strategy_Type := 0; + + Deflated : constant Compression_Method := 8; + + type Z_Stream; + + type Z_Stream_Access is access all Z_Stream; + + type Filter_Type is tagged limited record + Strm : Z_Stream_Access; + Compression : Boolean; + Stream_End : Boolean; + Header : Header_Type; + CRC : Unsigned_32; + Offset : Stream_Element_Offset; + -- Offset for gzip header/footer output. + end record; + +end ZLib; diff --git a/tests/scancode/data/resource/samples/zlib/adler32.c b/tests/scancode/data/resource/samples/zlib/adler32.c new file mode 100644 index 00000000000..a868f073d8a --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/adler32.c @@ -0,0 +1,179 @@ +/* adler32.c -- compute the Adler-32 checksum of a data stream + * Copyright (C) 1995-2011 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zutil.h" + +#define local static + +local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2)); + +#define BASE 65521 /* largest prime smaller than 65536 */ +#define NMAX 5552 +/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ + +#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;} +#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); +#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); +#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); +#define DO16(buf) DO8(buf,0); DO8(buf,8); + +/* use NO_DIVIDE if your processor does not do division in hardware -- + try it both ways to see which is faster */ +#ifdef NO_DIVIDE +/* note that this assumes BASE is 65521, where 65536 % 65521 == 15 + (thank you to John Reiser for pointing this out) */ +# define CHOP(a) \ + do { \ + unsigned long tmp = a >> 16; \ + a &= 0xffffUL; \ + a += (tmp << 4) - tmp; \ + } while (0) +# define MOD28(a) \ + do { \ + CHOP(a); \ + if (a >= BASE) a -= BASE; \ + } while (0) +# define MOD(a) \ + do { \ + CHOP(a); \ + MOD28(a); \ + } while (0) +# define MOD63(a) \ + do { /* this assumes a is not negative */ \ + z_off64_t tmp = a >> 32; \ + a &= 0xffffffffL; \ + a += (tmp << 8) - (tmp << 5) + tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + if (a >= BASE) a -= BASE; \ + } while (0) +#else +# define MOD(a) a %= BASE +# define MOD28(a) a %= BASE +# define MOD63(a) a %= BASE +#endif + +/* ========================================================================= */ +uLong ZEXPORT adler32(adler, buf, len) + uLong adler; + const Bytef *buf; + uInt len; +{ + unsigned long sum2; + unsigned n; + + /* split Adler-32 into component sums */ + sum2 = (adler >> 16) & 0xffff; + adler &= 0xffff; + + /* in case user likes doing a byte at a time, keep it fast */ + if (len == 1) { + adler += buf[0]; + if (adler >= BASE) + adler -= BASE; + sum2 += adler; + if (sum2 >= BASE) + sum2 -= BASE; + return adler | (sum2 << 16); + } + + /* initial Adler-32 value (deferred check for len == 1 speed) */ + if (buf == Z_NULL) + return 1L; + + /* in case short lengths are provided, keep it somewhat fast */ + if (len < 16) { + while (len--) { + adler += *buf++; + sum2 += adler; + } + if (adler >= BASE) + adler -= BASE; + MOD28(sum2); /* only added so many BASE's */ + return adler | (sum2 << 16); + } + + /* do length NMAX blocks -- requires just one modulo operation */ + while (len >= NMAX) { + len -= NMAX; + n = NMAX / 16; /* NMAX is divisible by 16 */ + do { + DO16(buf); /* 16 sums unrolled */ + buf += 16; + } while (--n); + MOD(adler); + MOD(sum2); + } + + /* do remaining bytes (less than NMAX, still just one modulo) */ + if (len) { /* avoid modulos if none remaining */ + while (len >= 16) { + len -= 16; + DO16(buf); + buf += 16; + } + while (len--) { + adler += *buf++; + sum2 += adler; + } + MOD(adler); + MOD(sum2); + } + + /* return recombined sums */ + return adler | (sum2 << 16); +} + +/* ========================================================================= */ +local uLong adler32_combine_(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + unsigned long sum1; + unsigned long sum2; + unsigned rem; + + /* for negative len, return invalid adler32 as a clue for debugging */ + if (len2 < 0) + return 0xffffffffUL; + + /* the derivation of this formula is left as an exercise for the reader */ + MOD63(len2); /* assumes len2 >= 0 */ + rem = (unsigned)len2; + sum1 = adler1 & 0xffff; + sum2 = rem * sum1; + MOD(sum2); + sum1 += (adler2 & 0xffff) + BASE - 1; + sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; + if (sum1 >= BASE) sum1 -= BASE; + if (sum1 >= BASE) sum1 -= BASE; + if (sum2 >= (BASE << 1)) sum2 -= (BASE << 1); + if (sum2 >= BASE) sum2 -= BASE; + return sum1 | (sum2 << 16); +} + +/* ========================================================================= */ +uLong ZEXPORT adler32_combine(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} + +uLong ZEXPORT adler32_combine64(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} diff --git a/tests/scancode/data/resource/samples/zlib/deflate.c b/tests/scancode/data/resource/samples/zlib/deflate.c new file mode 100644 index 00000000000..696957705b7 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/deflate.c @@ -0,0 +1,1967 @@ +/* deflate.c -- compress data using the deflation algorithm + * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process depends on being able to identify portions + * of the input text which are identical to earlier input (within a + * sliding window trailing behind the input currently being processed). + * + * The most straightforward technique turns out to be the fastest for + * most input files: try all possible matches and select the longest. + * The key feature of this algorithm is that insertions into the string + * dictionary are very simple and thus fast, and deletions are avoided + * completely. Insertions are performed at each input character, whereas + * string matches are performed only when the previous match ends. So it + * is preferable to spend more time in matches to allow very fast string + * insertions and avoid deletions. The matching algorithm for small + * strings is inspired from that of Rabin & Karp. A brute force approach + * is used to find longer strings when a small match has been found. + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze + * (by Leonid Broukhis). + * A previous version of this file used a more sophisticated algorithm + * (by Fiala and Greene) which is guaranteed to run in linear amortized + * time, but has a larger average cost, uses more memory and is patented. + * However the F&G algorithm may be faster for some highly redundant + * files if the parameter max_chain_length (described below) is too large. + * + * ACKNOWLEDGEMENTS + * + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and + * I found it in 'freeze' written by Leonid Broukhis. + * Thanks to many people for bug reports and testing. + * + * REFERENCES + * + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". + * Available in http://tools.ietf.org/html/rfc1951 + * + * A description of the Rabin and Karp algorithm is given in the book + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. + * + * Fiala,E.R., and Greene,D.H. + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 + * + */ + +/* @(#) $Id$ */ + +#include "deflate.h" + +const char deflate_copyright[] = + " deflate 1.2.8 Copyright 1995-2013 Jean-loup Gailly and Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* =========================================================================== + * Function prototypes. + */ +typedef enum { + need_more, /* block not completed, need more input or more output */ + block_done, /* block flush performed */ + finish_started, /* finish started, need only more output at next deflate */ + finish_done /* finish done, accept no more input or output */ +} block_state; + +typedef block_state (*compress_func) OF((deflate_state *s, int flush)); +/* Compression function. Returns the block state after the call. */ + +local void fill_window OF((deflate_state *s)); +local block_state deflate_stored OF((deflate_state *s, int flush)); +local block_state deflate_fast OF((deflate_state *s, int flush)); +#ifndef FASTEST +local block_state deflate_slow OF((deflate_state *s, int flush)); +#endif +local block_state deflate_rle OF((deflate_state *s, int flush)); +local block_state deflate_huff OF((deflate_state *s, int flush)); +local void lm_init OF((deflate_state *s)); +local void putShortMSB OF((deflate_state *s, uInt b)); +local void flush_pending OF((z_streamp strm)); +local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); +#ifdef ASMV + void match_init OF((void)); /* asm code initialization */ + uInt longest_match OF((deflate_state *s, IPos cur_match)); +#else +local uInt longest_match OF((deflate_state *s, IPos cur_match)); +#endif + +#ifdef DEBUG +local void check_match OF((deflate_state *s, IPos start, IPos match, + int length)); +#endif + +/* =========================================================================== + * Local data + */ + +#define NIL 0 +/* Tail of hash chains */ + +#ifndef TOO_FAR +# define TOO_FAR 4096 +#endif +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +typedef struct config_s { + ush good_length; /* reduce lazy search above this match length */ + ush max_lazy; /* do not perform lazy search above this match length */ + ush nice_length; /* quit search above this match length */ + ush max_chain; + compress_func func; +} config; + +#ifdef FASTEST +local const config configuration_table[2] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ +#else +local const config configuration_table[10] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ +/* 2 */ {4, 5, 16, 8, deflate_fast}, +/* 3 */ {4, 6, 32, 32, deflate_fast}, + +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ +/* 5 */ {8, 16, 32, 32, deflate_slow}, +/* 6 */ {8, 16, 128, 128, deflate_slow}, +/* 7 */ {8, 32, 128, 256, deflate_slow}, +/* 8 */ {32, 128, 258, 1024, deflate_slow}, +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ +#endif + +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different + * meaning. + */ + +#define EQUAL 0 +/* result of memcmp for equal strings */ + +#ifndef NO_DUMMY_DECL +struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ +#endif + +/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ +#define RANK(f) (((f) << 1) - ((f) > 4 ? 9 : 0)) + +/* =========================================================================== + * Update a hash value with the given input byte + * IN assertion: all calls to to UPDATE_HASH are made with consecutive + * input characters, so that a running hash key can be computed from the + * previous key instead of complete recalculation each time. + */ +#define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) + + +/* =========================================================================== + * Insert string str in the dictionary and set match_head to the previous head + * of the hash chain (the most recent string with same hash key). Return + * the previous length of the hash chain. + * If this file is compiled with -DFASTEST, the compression level is forced + * to 1, and no hash chains are maintained. + * IN assertion: all calls to to INSERT_STRING are made with consecutive + * input characters and the first MIN_MATCH bytes of str are valid + * (except for the last MIN_MATCH-1 bytes of the input file). + */ +#ifdef FASTEST +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + match_head = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) +#else +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) +#endif + +/* =========================================================================== + * Initialize the hash table (avoiding 64K overflow for 16 bit systems). + * prev[] will be initialized on the fly. + */ +#define CLEAR_HASH(s) \ + s->head[s->hash_size-1] = NIL; \ + zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); + +/* ========================================================================= */ +int ZEXPORT deflateInit_(strm, level, version, stream_size) + z_streamp strm; + int level; + const char *version; + int stream_size; +{ + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, + Z_DEFAULT_STRATEGY, version, stream_size); + /* To do: ignore strm->next_in if we use it as window */ +} + +/* ========================================================================= */ +int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + version, stream_size) + z_streamp strm; + int level; + int method; + int windowBits; + int memLevel; + int strategy; + const char *version; + int stream_size; +{ + deflate_state *s; + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + + ushf *overlay; + /* We overlay pending_buf and d_buf+l_buf. This works since the average + * output size for (length,distance) codes is <= 24 bits. + */ + + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; + } + if (strm == Z_NULL) return Z_STREAM_ERROR; + + strm->msg = Z_NULL; + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + +#ifdef FASTEST + if (level != 0) level = 1; +#else + if (level == Z_DEFAULT_COMPRESSION) level = 6; +#endif + + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; + } +#ifdef GZIP + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; + } +#endif + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED) { + return Z_STREAM_ERROR; + } + if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); + if (s == Z_NULL) return Z_MEM_ERROR; + strm->state = (struct internal_state FAR *)s; + s->strm = strm; + + s->wrap = wrap; + s->gzhead = Z_NULL; + s->w_bits = windowBits; + s->w_size = 1 << s->w_bits; + s->w_mask = s->w_size - 1; + + s->hash_bits = memLevel + 7; + s->hash_size = 1 << s->hash_bits; + s->hash_mask = s->hash_size - 1; + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); + + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + + s->high_water = 0; /* nothing written to s->window yet */ + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + s->pending_buf = (uchf *) overlay; + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { + s->status = FINISH_STATE; + strm->msg = ERR_MSG(Z_MEM_ERROR); + deflateEnd (strm); + return Z_MEM_ERROR; + } + s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + + s->level = level; + s->strategy = strategy; + s->method = (Byte)method; + + return deflateReset(strm); +} + +/* ========================================================================= */ +int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) + z_streamp strm; + const Bytef *dictionary; + uInt dictLength; +{ + deflate_state *s; + uInt str, n; + int wrap; + unsigned avail; + z_const unsigned char *next; + + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) + return Z_STREAM_ERROR; + s = strm->state; + wrap = s->wrap; + if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) + return Z_STREAM_ERROR; + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap == 1) + strm->adler = adler32(strm->adler, dictionary, dictLength); + s->wrap = 0; /* avoid computing Adler-32 in read_buf */ + + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s->w_size) { + if (wrap == 0) { /* already empty otherwise */ + CLEAR_HASH(s); + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + dictionary += dictLength - s->w_size; /* use the tail */ + dictLength = s->w_size; + } + + /* insert dictionary into window and hash */ + avail = strm->avail_in; + next = strm->next_in; + strm->avail_in = dictLength; + strm->next_in = (z_const Bytef *)dictionary; + fill_window(s); + while (s->lookahead >= MIN_MATCH) { + str = s->strstart; + n = s->lookahead - (MIN_MATCH-1); + do { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + } while (--n); + s->strstart = str; + s->lookahead = MIN_MATCH-1; + fill_window(s); + } + s->strstart += s->lookahead; + s->block_start = (long)s->strstart; + s->insert = s->lookahead; + s->lookahead = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + strm->next_in = next; + strm->avail_in = avail; + s->wrap = wrap; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateResetKeep (strm) + z_streamp strm; +{ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL || + strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { + return Z_STREAM_ERROR; + } + + strm->total_in = strm->total_out = 0; + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ + strm->data_type = Z_UNKNOWN; + + s = (deflate_state *)strm->state; + s->pending = 0; + s->pending_out = s->pending_buf; + + if (s->wrap < 0) { + s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ + } + s->status = s->wrap ? INIT_STATE : BUSY_STATE; + strm->adler = +#ifdef GZIP + s->wrap == 2 ? crc32(0L, Z_NULL, 0) : +#endif + adler32(0L, Z_NULL, 0); + s->last_flush = Z_NO_FLUSH; + + _tr_init(s); + + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateReset (strm) + z_streamp strm; +{ + int ret; + + ret = deflateResetKeep(strm); + if (ret == Z_OK) + lm_init(strm->state); + return ret; +} + +/* ========================================================================= */ +int ZEXPORT deflateSetHeader (strm, head) + z_streamp strm; + gz_headerp head; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (strm->state->wrap != 2) return Z_STREAM_ERROR; + strm->state->gzhead = head; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflatePending (strm, pending, bits) + unsigned *pending; + int *bits; + z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (pending != Z_NULL) + *pending = strm->state->pending; + if (bits != Z_NULL) + *bits = strm->state->bi_valid; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflatePrime (strm, bits, value) + z_streamp strm; + int bits; + int value; +{ + deflate_state *s; + int put; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; + if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; + if (put > bits) + put = bits; + s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); + s->bi_valid += put; + _tr_flush_bits(s); + value >>= put; + bits -= put; + } while (bits); + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateParams(strm, level, strategy) + z_streamp strm; + int level; + int strategy; +{ + deflate_state *s; + compress_func func; + int err = Z_OK; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; + +#ifdef FASTEST + if (level != 0) level = 1; +#else + if (level == Z_DEFAULT_COMPRESSION) level = 6; +#endif + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { + return Z_STREAM_ERROR; + } + func = configuration_table[s->level].func; + + if ((strategy != s->strategy || func != configuration_table[level].func) && + strm->total_in != 0) { + /* Flush the last buffer: */ + err = deflate(strm, Z_BLOCK); + if (err == Z_BUF_ERROR && s->pending == 0) + err = Z_OK; + } + if (s->level != level) { + s->level = level; + s->max_lazy_match = configuration_table[level].max_lazy; + s->good_match = configuration_table[level].good_length; + s->nice_match = configuration_table[level].nice_length; + s->max_chain_length = configuration_table[level].max_chain; + } + s->strategy = strategy; + return err; +} + +/* ========================================================================= */ +int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) + z_streamp strm; + int good_length; + int max_lazy; + int nice_length; + int max_chain; +{ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; + s->good_match = good_length; + s->max_lazy_match = max_lazy; + s->nice_match = nice_length; + s->max_chain_length = max_chain; + return Z_OK; +} + +/* ========================================================================= + * For the default windowBits of 15 and memLevel of 8, this function returns + * a close to exact, as well as small, upper bound on the compressed size. + * They are coded as constants here for a reason--if the #define's are + * changed, then this function needs to be changed as well. The return + * value for 15 and 8 only works for those exact settings. + * + * For any setting other than those defaults for windowBits and memLevel, + * the value returned is a conservative worst case for the maximum expansion + * resulting from using fixed blocks instead of stored blocks, which deflate + * can emit on compressed data for some combinations of the parameters. + * + * This function could be more sophisticated to provide closer upper bounds for + * every combination of windowBits and memLevel. But even the conservative + * upper bound of about 14% expansion does not seem onerous for output buffer + * allocation. + */ +uLong ZEXPORT deflateBound(strm, sourceLen) + z_streamp strm; + uLong sourceLen; +{ + deflate_state *s; + uLong complen, wraplen; + Bytef *str; + + /* conservative upper bound for compressed data */ + complen = sourceLen + + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; + + /* if can't get parameters, return conservative bound plus zlib wrapper */ + if (strm == Z_NULL || strm->state == Z_NULL) + return complen + 6; + + /* compute wrapper length */ + s = strm->state; + switch (s->wrap) { + case 0: /* raw deflate */ + wraplen = 0; + break; + case 1: /* zlib wrapper */ + wraplen = 6 + (s->strstart ? 4 : 0); + break; + case 2: /* gzip wrapper */ + wraplen = 18; + if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ + if (s->gzhead->extra != Z_NULL) + wraplen += 2 + s->gzhead->extra_len; + str = s->gzhead->name; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + str = s->gzhead->comment; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + if (s->gzhead->hcrc) + wraplen += 2; + } + break; + default: /* for compiler happiness */ + wraplen = 6; + } + + /* if not default parameters, return conservative bound */ + if (s->w_bits != 15 || s->hash_bits != 8 + 7) + return complen + wraplen; + + /* default settings: return tight bound for that case */ + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13 - 6 + wraplen; +} + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +local void putShortMSB (s, b) + deflate_state *s; + uInt b; +{ + put_byte(s, (Byte)(b >> 8)); + put_byte(s, (Byte)(b & 0xff)); +} + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output goes + * through this function so some applications may wish to modify it + * to avoid allocating a large strm->next_out buffer and copying into it. + * (See also read_buf()). + */ +local void flush_pending(strm) + z_streamp strm; +{ + unsigned len; + deflate_state *s = strm->state; + + _tr_flush_bits(s); + len = s->pending; + if (len > strm->avail_out) len = strm->avail_out; + if (len == 0) return; + + zmemcpy(strm->next_out, s->pending_out, len); + strm->next_out += len; + s->pending_out += len; + strm->total_out += len; + strm->avail_out -= len; + s->pending -= len; + if (s->pending == 0) { + s->pending_out = s->pending_buf; + } +} + +/* ========================================================================= */ +int ZEXPORT deflate (strm, flush) + z_streamp strm; + int flush; +{ + int old_flush; /* value of flush param for previous deflate call */ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL || + flush > Z_BLOCK || flush < 0) { + return Z_STREAM_ERROR; + } + s = strm->state; + + if (strm->next_out == Z_NULL || + (strm->next_in == Z_NULL && strm->avail_in != 0) || + (s->status == FINISH_STATE && flush != Z_FINISH)) { + ERR_RETURN(strm, Z_STREAM_ERROR); + } + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); + + s->strm = strm; /* just in case */ + old_flush = s->last_flush; + s->last_flush = flush; + + /* Write the header */ + if (s->status == INIT_STATE) { +#ifdef GZIP + if (s->wrap == 2) { + strm->adler = crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (s->gzhead == Z_NULL) { + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s->level == 9 ? 2 : + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s->status = BUSY_STATE; + } + else { + put_byte(s, (s->gzhead->text ? 1 : 0) + + (s->gzhead->hcrc ? 2 : 0) + + (s->gzhead->extra == Z_NULL ? 0 : 4) + + (s->gzhead->name == Z_NULL ? 0 : 8) + + (s->gzhead->comment == Z_NULL ? 0 : 16) + ); + put_byte(s, (Byte)(s->gzhead->time & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); + put_byte(s, s->level == 9 ? 2 : + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? + 4 : 0)); + put_byte(s, s->gzhead->os & 0xff); + if (s->gzhead->extra != Z_NULL) { + put_byte(s, s->gzhead->extra_len & 0xff); + put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); + } + if (s->gzhead->hcrc) + strm->adler = crc32(strm->adler, s->pending_buf, + s->pending); + s->gzindex = 0; + s->status = EXTRA_STATE; + } + } + else +#endif + { + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; + uInt level_flags; + + if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) + level_flags = 0; + else if (s->level < 6) + level_flags = 1; + else if (s->level == 6) + level_flags = 2; + else + level_flags = 3; + header |= (level_flags << 6); + if (s->strstart != 0) header |= PRESET_DICT; + header += 31 - (header % 31); + + s->status = BUSY_STATE; + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s->strstart != 0) { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + strm->adler = adler32(0L, Z_NULL, 0); + } + } +#ifdef GZIP + if (s->status == EXTRA_STATE) { + if (s->gzhead->extra != Z_NULL) { + uInt beg = s->pending; /* start of bytes to update crc */ + + while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { + if (s->pending == s->pending_buf_size) { + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + flush_pending(strm); + beg = s->pending; + if (s->pending == s->pending_buf_size) + break; + } + put_byte(s, s->gzhead->extra[s->gzindex]); + s->gzindex++; + } + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + if (s->gzindex == s->gzhead->extra_len) { + s->gzindex = 0; + s->status = NAME_STATE; + } + } + else + s->status = NAME_STATE; + } + if (s->status == NAME_STATE) { + if (s->gzhead->name != Z_NULL) { + uInt beg = s->pending; /* start of bytes to update crc */ + int val; + + do { + if (s->pending == s->pending_buf_size) { + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + flush_pending(strm); + beg = s->pending; + if (s->pending == s->pending_buf_size) { + val = 1; + break; + } + } + val = s->gzhead->name[s->gzindex++]; + put_byte(s, val); + } while (val != 0); + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + if (val == 0) { + s->gzindex = 0; + s->status = COMMENT_STATE; + } + } + else + s->status = COMMENT_STATE; + } + if (s->status == COMMENT_STATE) { + if (s->gzhead->comment != Z_NULL) { + uInt beg = s->pending; /* start of bytes to update crc */ + int val; + + do { + if (s->pending == s->pending_buf_size) { + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + flush_pending(strm); + beg = s->pending; + if (s->pending == s->pending_buf_size) { + val = 1; + break; + } + } + val = s->gzhead->comment[s->gzindex++]; + put_byte(s, val); + } while (val != 0); + if (s->gzhead->hcrc && s->pending > beg) + strm->adler = crc32(strm->adler, s->pending_buf + beg, + s->pending - beg); + if (val == 0) + s->status = HCRC_STATE; + } + else + s->status = HCRC_STATE; + } + if (s->status == HCRC_STATE) { + if (s->gzhead->hcrc) { + if (s->pending + 2 > s->pending_buf_size) + flush_pending(strm); + if (s->pending + 2 <= s->pending_buf_size) { + put_byte(s, (Byte)(strm->adler & 0xff)); + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); + strm->adler = crc32(0L, Z_NULL, 0); + s->status = BUSY_STATE; + } + } + else + s->status = BUSY_STATE; + } +#endif + + /* Flush as much pending output as possible */ + if (s->pending != 0) { + flush_pending(strm); + if (strm->avail_out == 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s->last_flush = -1; + return Z_OK; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && + flush != Z_FINISH) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* User must not provide more input after the first FINISH: */ + if (s->status == FINISH_STATE && strm->avail_in != 0) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* Start a new block or continue the current one. + */ + if (strm->avail_in != 0 || s->lookahead != 0 || + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { + block_state bstate; + + bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : + (s->strategy == Z_RLE ? deflate_rle(s, flush) : + (*(configuration_table[s->level].func))(s, flush)); + + if (bstate == finish_started || bstate == finish_done) { + s->status = FINISH_STATE; + } + if (bstate == need_more || bstate == finish_started) { + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate == block_done) { + if (flush == Z_PARTIAL_FLUSH) { + _tr_align(s); + } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ + _tr_stored_block(s, (char*)0, 0L, 0); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush == Z_FULL_FLUSH) { + CLEAR_HASH(s); /* forget history */ + if (s->lookahead == 0) { + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + } + } + flush_pending(strm); + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } + } + } + Assert(strm->avail_out > 0, "bug2"); + + if (flush != Z_FINISH) return Z_OK; + if (s->wrap <= 0) return Z_STREAM_END; + + /* Write the trailer */ +#ifdef GZIP + if (s->wrap == 2) { + put_byte(s, (Byte)(strm->adler & 0xff)); + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); + put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); + put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); + put_byte(s, (Byte)(strm->total_in & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); + } + else +#endif + { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ + return s->pending != 0 ? Z_OK : Z_STREAM_END; +} + +/* ========================================================================= */ +int ZEXPORT deflateEnd (strm) + z_streamp strm; +{ + int status; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + + status = strm->state->status; + if (status != INIT_STATE && + status != EXTRA_STATE && + status != NAME_STATE && + status != COMMENT_STATE && + status != HCRC_STATE && + status != BUSY_STATE && + status != FINISH_STATE) { + return Z_STREAM_ERROR; + } + + /* Deallocate in reverse order of allocations: */ + TRY_FREE(strm, strm->state->pending_buf); + TRY_FREE(strm, strm->state->head); + TRY_FREE(strm, strm->state->prev); + TRY_FREE(strm, strm->state->window); + + ZFREE(strm, strm->state); + strm->state = Z_NULL; + + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; +} + +/* ========================================================================= + * Copy the source state to the destination state. + * To simplify the source, this is not supported for 16-bit MSDOS (which + * doesn't have enough memory anyway to duplicate compression states). + */ +int ZEXPORT deflateCopy (dest, source) + z_streamp dest; + z_streamp source; +{ +#ifdef MAXSEG_64K + return Z_STREAM_ERROR; +#else + deflate_state *ds; + deflate_state *ss; + ushf *overlay; + + + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { + return Z_STREAM_ERROR; + } + + ss = source->state; + + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); + if (ds == Z_NULL) return Z_MEM_ERROR; + dest->state = (struct internal_state FAR *) ds; + zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); + ds->strm = dest; + + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); + ds->pending_buf = (uchf *) overlay; + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { + deflateEnd (dest); + return Z_MEM_ERROR; + } + /* following zmemcpy do not work for 16-bit MSDOS */ + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); + zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); + zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; + ds->bl_desc.dyn_tree = ds->bl_tree; + + return Z_OK; +#endif /* MAXSEG_64K */ +} + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->next_in buffer and copying from it. + * (See also flush_pending()). + */ +local int read_buf(strm, buf, size) + z_streamp strm; + Bytef *buf; + unsigned size; +{ + unsigned len = strm->avail_in; + + if (len > size) len = size; + if (len == 0) return 0; + + strm->avail_in -= len; + + zmemcpy(buf, strm->next_in, len); + if (strm->state->wrap == 1) { + strm->adler = adler32(strm->adler, buf, len); + } +#ifdef GZIP + else if (strm->state->wrap == 2) { + strm->adler = crc32(strm->adler, buf, len); + } +#endif + strm->next_in += len; + strm->total_in += len; + + return (int)len; +} + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +local void lm_init (s) + deflate_state *s; +{ + s->window_size = (ulg)2L*s->w_size; + + CLEAR_HASH(s); + + /* Set the default configuration parameters: + */ + s->max_lazy_match = configuration_table[s->level].max_lazy; + s->good_match = configuration_table[s->level].good_length; + s->nice_match = configuration_table[s->level].nice_length; + s->max_chain_length = configuration_table[s->level].max_chain; + + s->strstart = 0; + s->block_start = 0L; + s->lookahead = 0; + s->insert = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + s->ins_h = 0; +#ifndef FASTEST +#ifdef ASMV + match_init(); /* initialize the asm code */ +#endif +#endif +} + +#ifndef FASTEST +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +#ifndef ASMV +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or + * match.S. The code will be functionally equivalent. + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + unsigned chain_length = s->max_chain_length;/* max hash chain length */ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + int best_len = s->prev_length; /* best match length so far */ + int nice_match = s->nice_match; /* stop if match long enough */ + IPos limit = s->strstart > (IPos)MAX_DIST(s) ? + s->strstart - (IPos)MAX_DIST(s) : NIL; + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + Posf *prev = s->prev; + uInt wmask = s->w_mask; + +#ifdef UNALIGNED_OK + /* Compare two bytes at a time. Note: this is not always beneficial. + * Try with and without -DUNALIGNED_OK to check. + */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; + register ush scan_start = *(ushf*)scan; + register ush scan_end = *(ushf*)(scan+best_len-1); +#else + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + register Byte scan_end1 = scan[best_len-1]; + register Byte scan_end = scan[best_len]; +#endif + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s->prev_length >= s->good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + Assert(cur_match < s->strstart, "no future"); + match = s->window + cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) + /* This code assumes sizeof(unsigned short) == 2. Do not use + * UNALIGNED_OK if your compiler uses a different size. + */ + if (*(ushf*)(match+best_len-1) != scan_end || + *(ushf*)match != scan_start) continue; + + /* It is not necessary to compare scan[2] and match[2] since they are + * always equal when the other bytes match, given that the hash keys + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at + * strstart+3, +5, ... up to strstart+257. We check for insufficient + * lookahead only every 4th comparison; the 128th check will be made + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is + * necessary to put more guard bytes at the end of the window, or + * to check more often for insufficient lookahead. + */ + Assert(scan[2] == match[2], "scan[2]?"); + scan++, match++; + do { + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + scan < strend); + /* The funny "do {}" generates better code on most compilers */ + + /* Here, scan <= window+strstart+257 */ + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + if (*scan == *match) scan++; + + len = (MAX_MATCH - 1) - (int)(strend-scan); + scan = strend - (MAX_MATCH-1); + +#else /* UNALIGNED_OK */ + + if (match[best_len] != scan_end || + match[best_len-1] != scan_end1 || + *match != *scan || + *++match != scan[1]) continue; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match++; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + scan = strend - MAX_MATCH; + +#endif /* UNALIGNED_OK */ + + if (len > best_len) { + s->match_start = cur_match; + best_len = len; + if (len >= nice_match) break; +#ifdef UNALIGNED_OK + scan_end = *(ushf*)(scan+best_len-1); +#else + scan_end1 = scan[best_len-1]; + scan_end = scan[best_len]; +#endif + } + } while ((cur_match = prev[cur_match & wmask]) > limit + && --chain_length != 0); + + if ((uInt)best_len <= s->lookahead) return (uInt)best_len; + return s->lookahead; +} +#endif /* ASMV */ + +#else /* FASTEST */ + +/* --------------------------------------------------------------------------- + * Optimized version for FASTEST only + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + Assert(cur_match < s->strstart, "no future"); + + match = s->window + cur_match; + + /* Return failure if the match length is less than 2: + */ + if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match += 2; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + + if (len < MIN_MATCH) return MIN_MATCH - 1; + + s->match_start = cur_match; + return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; +} + +#endif /* FASTEST */ + +#ifdef DEBUG +/* =========================================================================== + * Check that the match at match_start is indeed a match. + */ +local void check_match(s, start, match, length) + deflate_state *s; + IPos start, match; + int length; +{ + /* check that the match is indeed a match */ + if (zmemcmp(s->window + match, + s->window + start, length) != EQUAL) { + fprintf(stderr, " start %u, match %u, length %d\n", + start, match, length); + do { + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); + } while (--length != 0); + z_error("invalid match"); + } + if (z_verbose > 1) { + fprintf(stderr,"\\[%d,%d]", start-match, length); + do { putc(s->window[start++], stderr); } while (--length != 0); + } +} +#else +# define check_match(s, start, match, length) +#endif /* DEBUG */ + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +local void fill_window(s) + deflate_state *s; +{ + register unsigned n, m; + register Posf *p; + unsigned more; /* Amount of free space at the end of the window. */ + uInt wsize = s->w_size; + + Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + + do { + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); + + /* Deal with !@#$% 64K limit: */ + if (sizeof(int) <= 2) { + if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + more = wsize; + + } else if (more == (unsigned)(-1)) { + /* Very unlikely, but possible on 16 bit machine if + * strstart == 0 && lookahead == 1 (input done a byte at time) + */ + more--; + } + } + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s->strstart >= wsize+MAX_DIST(s)) { + + zmemcpy(s->window, s->window+wsize, (unsigned)wsize); + s->match_start -= wsize; + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ + s->block_start -= (long) wsize; + + /* Slide the hash table (could be avoided with 32 bit values + at the expense of memory usage). We slide even when level == 0 + to keep the hash table consistent if we switch back to level > 0 + later. (Using level 0 permanently is not an optimal usage of + zlib, so we don't care about this pathological case.) + */ + n = s->hash_size; + p = &s->head[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m-wsize : NIL); + } while (--n); + + n = wsize; +#ifndef FASTEST + p = &s->prev[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m-wsize : NIL); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); +#endif + more += wsize; + } + if (s->strm->avail_in == 0) break; + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + Assert(more >= 2, "more < 2"); + + n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); + s->lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s->lookahead + s->insert >= MIN_MATCH) { + uInt str = s->strstart - s->insert; + s->ins_h = s->window[str]; + UPDATE_HASH(s, s->ins_h, s->window[str + 1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + while (s->insert) { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + s->insert--; + if (s->lookahead + s->insert < MIN_MATCH) + break; + } + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ + if (s->high_water < s->window_size) { + ulg curr = s->strstart + (ulg)(s->lookahead); + ulg init; + + if (s->high_water < curr) { + /* Previous high water mark below current data -- zero WIN_INIT + * bytes or up to end of window, whichever is less. + */ + init = s->window_size - curr; + if (init > WIN_INIT) + init = WIN_INIT; + zmemzero(s->window + curr, (unsigned)init); + s->high_water = curr + init; + } + else if (s->high_water < (ulg)curr + WIN_INIT) { + /* High water mark at or above current data, but below current data + * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up + * to end of window, whichever is less. + */ + init = (ulg)curr + WIN_INIT - s->high_water; + if (init > s->window_size - s->high_water) + init = s->window_size - s->high_water; + zmemzero(s->window + s->high_water, (unsigned)init); + s->high_water += init; + } + } + + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "not enough room for search"); +} + +/* =========================================================================== + * Flush the current block, with given end-of-file flag. + * IN assertion: strstart is set to the end of the current match. + */ +#define FLUSH_BLOCK_ONLY(s, last) { \ + _tr_flush_block(s, (s->block_start >= 0L ? \ + (charf *)&s->window[(unsigned)s->block_start] : \ + (charf *)Z_NULL), \ + (ulg)((long)s->strstart - s->block_start), \ + (last)); \ + s->block_start = s->strstart; \ + flush_pending(s->strm); \ + Tracev((stderr,"[FLUSH]")); \ +} + +/* Same but force premature exit if necessary. */ +#define FLUSH_BLOCK(s, last) { \ + FLUSH_BLOCK_ONLY(s, last); \ + if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ +} + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * This function does not insert new strings in the dictionary since + * uncompressible data is probably not useful. This function is used + * only for the level=0 compression option. + * NOTE: this function should be optimized to avoid extra copying from + * window to pending_buf. + */ +local block_state deflate_stored(s, flush) + deflate_state *s; + int flush; +{ + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited + * to pending_buf_size, and each stored block has a 5 byte header: + */ + ulg max_block_size = 0xffff; + ulg max_start; + + if (max_block_size > s->pending_buf_size - 5) { + max_block_size = s->pending_buf_size - 5; + } + + /* Copy as much as possible from input to output: */ + for (;;) { + /* Fill the window as much as possible: */ + if (s->lookahead <= 1) { + + Assert(s->strstart < s->w_size+MAX_DIST(s) || + s->block_start >= (long)s->w_size, "slide too late"); + + fill_window(s); + if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; + + if (s->lookahead == 0) break; /* flush the current block */ + } + Assert(s->block_start >= 0L, "block gone"); + + s->strstart += s->lookahead; + s->lookahead = 0; + + /* Emit a stored block if pending_buf will be full: */ + max_start = s->block_start + max_block_size; + if (s->strstart == 0 || (ulg)s->strstart >= max_start) { + /* strstart == 0 is possible when wraparound on 16-bit machine */ + s->lookahead = (uInt)(s->strstart - max_start); + s->strstart = (uInt)max_start; + FLUSH_BLOCK(s, 0); + } + /* Flush if we may have to slide, otherwise block_start may become + * negative and the data will be gone: + */ + if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { + FLUSH_BLOCK(s, 0); + } + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if ((long)s->strstart > s->block_start) + FLUSH_BLOCK(s, 0); + return block_done; +} + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +local block_state deflate_fast(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head; /* head of the hash chain */ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = NIL; + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ + } + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->match_start, s->match_length); + + _tr_tally_dist(s, s->strstart - s->match_start, + s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ +#ifndef FASTEST + if (s->match_length <= s->max_insert_length && + s->lookahead >= MIN_MATCH) { + s->match_length--; /* string at strstart already in table */ + do { + s->strstart++; + INSERT_STRING(s, s->strstart, hash_head); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s->match_length != 0); + s->strstart++; + } else +#endif + { + s->strstart += s->match_length; + s->match_length = 0; + s->ins_h = s->window[s->strstart]; + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +#ifndef FASTEST +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +local block_state deflate_slow(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head; /* head of hash chain */ + int bflush; /* set if current block must be flushed */ + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = NIL; + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + */ + s->prev_length = s->match_length, s->prev_match = s->match_start; + s->match_length = MIN_MATCH-1; + + if (hash_head != NIL && s->prev_length < s->max_lazy_match && + s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ + + if (s->match_length <= 5 && (s->strategy == Z_FILTERED +#if TOO_FAR <= 32767 + || (s->match_length == MIN_MATCH && + s->strstart - s->match_start > TOO_FAR) +#endif + )) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s->match_length = MIN_MATCH-1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + check_match(s, s->strstart-1, s->prev_match, s->prev_length); + + _tr_tally_dist(s, s->strstart -1 - s->prev_match, + s->prev_length - MIN_MATCH, bflush); + + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s->lookahead -= s->prev_length-1; + s->prev_length -= 2; + do { + if (++s->strstart <= max_insert) { + INSERT_STRING(s, s->strstart, hash_head); + } + } while (--s->prev_length != 0); + s->match_available = 0; + s->match_length = MIN_MATCH-1; + s->strstart++; + + if (bflush) FLUSH_BLOCK(s, 0); + + } else if (s->match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + if (bflush) { + FLUSH_BLOCK_ONLY(s, 0); + } + s->strstart++; + s->lookahead--; + if (s->strm->avail_out == 0) return need_more; + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s->match_available = 1; + s->strstart++; + s->lookahead--; + } + } + Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s->match_available) { + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + s->match_available = 0; + } + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} +#endif /* FASTEST */ + +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) + */ +local block_state deflate_rle(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + uInt prev; /* byte at distance one to match */ + Bytef *scan, *strend; /* scan goes up to strend for length of run */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s->lookahead <= MAX_MATCH) { + fill_window(s); + if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* See how many times the previous byte repeats */ + s->match_length = 0; + if (s->lookahead >= MIN_MATCH && s->strstart > 0) { + scan = s->window + s->strstart - 1; + prev = *scan; + if (prev == *++scan && prev == *++scan && prev == *++scan) { + strend = s->window + s->strstart + MAX_MATCH; + do { + } while (prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + scan < strend); + s->match_length = MAX_MATCH - (int)(strend - scan); + if (s->match_length > s->lookahead) + s->match_length = s->lookahead; + } + Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } + + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->strstart - 1, s->match_length); + + _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + s->strstart += s->match_length; + s->match_length = 0; + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +local block_state deflate_huff(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s->lookahead == 0) { + fill_window(s); + if (s->lookahead == 0) { + if (flush == Z_NO_FLUSH) + return need_more; + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s->match_length = 0; + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} diff --git a/tests/scancode/data/resource/samples/zlib/deflate.h b/tests/scancode/data/resource/samples/zlib/deflate.h new file mode 100644 index 00000000000..ce0299edd19 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/deflate.h @@ -0,0 +1,346 @@ +/* deflate.h -- internal compression state + * Copyright (C) 1995-2012 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id$ */ + +#ifndef DEFLATE_H +#define DEFLATE_H + +#include "zutil.h" + +/* define NO_GZIP when compiling if you want to disable gzip header and + trailer creation by deflate(). NO_GZIP would be used to avoid linking in + the crc code when it is not needed. For shared libraries, gzip encoding + should be left enabled. */ +#ifndef NO_GZIP +# define GZIP +#endif + +/* =========================================================================== + * Internal compression state. + */ + +#define LENGTH_CODES 29 +/* number of length codes, not counting the special END_BLOCK code */ + +#define LITERALS 256 +/* number of literal bytes 0..255 */ + +#define L_CODES (LITERALS+1+LENGTH_CODES) +/* number of Literal or Length codes, including the END_BLOCK code */ + +#define D_CODES 30 +/* number of distance codes */ + +#define BL_CODES 19 +/* number of codes used to transfer the bit lengths */ + +#define HEAP_SIZE (2*L_CODES+1) +/* maximum heap size */ + +#define MAX_BITS 15 +/* All codes must not exceed MAX_BITS bits */ + +#define Buf_size 16 +/* size of bit buffer in bi_buf */ + +#define INIT_STATE 42 +#define EXTRA_STATE 69 +#define NAME_STATE 73 +#define COMMENT_STATE 91 +#define HCRC_STATE 103 +#define BUSY_STATE 113 +#define FINISH_STATE 666 +/* Stream status */ + + +/* Data structure describing a single value and its code string. */ +typedef struct ct_data_s { + union { + ush freq; /* frequency count */ + ush code; /* bit string */ + } fc; + union { + ush dad; /* father node in Huffman tree */ + ush len; /* length of bit string */ + } dl; +} FAR ct_data; + +#define Freq fc.freq +#define Code fc.code +#define Dad dl.dad +#define Len dl.len + +typedef struct static_tree_desc_s static_tree_desc; + +typedef struct tree_desc_s { + ct_data *dyn_tree; /* the dynamic tree */ + int max_code; /* largest code with non zero frequency */ + static_tree_desc *stat_desc; /* the corresponding static tree */ +} FAR tree_desc; + +typedef ush Pos; +typedef Pos FAR Posf; +typedef unsigned IPos; + +/* A Pos is an index in the character window. We use short instead of int to + * save space in the various tables. IPos is used only for parameter passing. + */ + +typedef struct internal_state { + z_streamp strm; /* pointer back to this zlib stream */ + int status; /* as the name implies */ + Bytef *pending_buf; /* output still pending */ + ulg pending_buf_size; /* size of pending_buf */ + Bytef *pending_out; /* next pending byte to output to the stream */ + uInt pending; /* nb of bytes in the pending buffer */ + int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ + gz_headerp gzhead; /* gzip header information to write */ + uInt gzindex; /* where in extra, name, or comment */ + Byte method; /* can only be DEFLATED */ + int last_flush; /* value of flush param for previous deflate call */ + + /* used by deflate.c: */ + + uInt w_size; /* LZ77 window size (32K by default) */ + uInt w_bits; /* log2(w_size) (8..16) */ + uInt w_mask; /* w_size - 1 */ + + Bytef *window; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. Also, it limits + * the window size to 64K, which is quite useful on MSDOS. + * To do: use the user input buffer as sliding window. + */ + + ulg window_size; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + Posf *prev; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + Posf *head; /* Heads of the hash chains or NIL. */ + + uInt ins_h; /* hash index of string to be inserted */ + uInt hash_size; /* number of elements in hash table */ + uInt hash_bits; /* log2(hash_size) */ + uInt hash_mask; /* hash_size-1 */ + + uInt hash_shift; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + long block_start; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + uInt match_length; /* length of best match */ + IPos prev_match; /* previous match */ + int match_available; /* set if previous match exists */ + uInt strstart; /* start of string to insert */ + uInt match_start; /* start of matching string */ + uInt lookahead; /* number of valid bytes ahead in window */ + + uInt prev_length; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + uInt max_chain_length; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + uInt max_lazy_match; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ +# define max_insert_length max_lazy_match + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + int level; /* compression level (1..9) */ + int strategy; /* favor or force Huffman coding*/ + + uInt good_match; + /* Use a faster search when the previous match is longer than this */ + + int nice_match; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + /* Didn't use ct_data typedef below to suppress compiler warning */ + struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + struct tree_desc_s l_desc; /* desc. for literal tree */ + struct tree_desc_s d_desc; /* desc. for distance tree */ + struct tree_desc_s bl_desc; /* desc. for bit length tree */ + + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + int heap_len; /* number of elements in the heap */ + int heap_max; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + uch depth[2*L_CODES+1]; + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + uchf *l_buf; /* buffer for literals or lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + uInt last_lit; /* running index in l_buf */ + + ushf *d_buf; + /* Buffer for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ + uInt matches; /* number of string matches in current block */ + uInt insert; /* bytes at end of window left to insert */ + +#ifdef DEBUG + ulg compressed_len; /* total bit length of compressed file mod 2^32 */ + ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ +#endif + + ush bi_buf; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + int bi_valid; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + + ulg high_water; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ + +} FAR deflate_state; + +/* Output a byte on the stream. + * IN assertion: there is enough room in pending_buf. + */ +#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} + + +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) +/* Minimum amount of lookahead, except at the end of the input file. + * See deflate.c for comments about the MIN_MATCH+1. + */ + +#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) +/* In order to simplify the code, particularly on 16 bit machines, match + * distances are limited to MAX_DIST instead of WSIZE. + */ + +#define WIN_INIT MAX_MATCH +/* Number of bytes after end of data in window to initialize in order to avoid + memory checker errors from longest match routines */ + + /* in trees.c */ +void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); +int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); +void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); +void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); + +#define d_code(dist) \ + ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) +/* Mapping from a distance to a distance code. dist is the distance - 1 and + * must not have side effects. _dist_code[256] and _dist_code[257] are never + * used. + */ + +#ifndef DEBUG +/* Inline versions of _tr_tally for speed: */ + +#if defined(GEN_TREES_H) || !defined(STDC) + extern uch ZLIB_INTERNAL _length_code[]; + extern uch ZLIB_INTERNAL _dist_code[]; +#else + extern const uch ZLIB_INTERNAL _length_code[]; + extern const uch ZLIB_INTERNAL _dist_code[]; +#endif + +# define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ + s->d_buf[s->last_lit] = 0; \ + s->l_buf[s->last_lit++] = cc; \ + s->dyn_ltree[cc].Freq++; \ + flush = (s->last_lit == s->lit_bufsize-1); \ + } +# define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (length); \ + ush dist = (distance); \ + s->d_buf[s->last_lit] = dist; \ + s->l_buf[s->last_lit++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ + flush = (s->last_lit == s->lit_bufsize-1); \ + } +#else +# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +# define _tr_tally_dist(s, distance, length, flush) \ + flush = _tr_tally(s, distance, length) +#endif + +#endif /* DEFLATE_H */ diff --git a/tests/scancode/data/resource/samples/zlib/dotzlib/AssemblyInfo.cs b/tests/scancode/data/resource/samples/zlib/dotzlib/AssemblyInfo.cs new file mode 100644 index 00000000000..0491bfc2b03 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/dotzlib/AssemblyInfo.cs @@ -0,0 +1,58 @@ +using System.Reflection; +using System.Runtime.CompilerServices; + +// +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +// +[assembly: AssemblyTitle("DotZLib")] +[assembly: AssemblyDescription(".Net bindings for ZLib compression dll 1.2.x")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("Henrik Ravn")] +[assembly: AssemblyProduct("")] +[assembly: AssemblyCopyright("(c) 2004 by Henrik Ravn")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Revision and Build Numbers +// by using the '*' as shown below: + +[assembly: AssemblyVersion("1.0.*")] + +// +// In order to sign your assembly you must specify a key to use. Refer to the +// Microsoft .NET Framework documentation for more information on assembly signing. +// +// Use the attributes below to control which key is used for signing. +// +// Notes: +// (*) If no key is specified, the assembly is not signed. +// (*) KeyName refers to a key that has been installed in the Crypto Service +// Provider (CSP) on your machine. KeyFile refers to a file which contains +// a key. +// (*) If the KeyFile and the KeyName values are both specified, the +// following processing occurs: +// (1) If the KeyName can be found in the CSP, that key is used. +// (2) If the KeyName does not exist and the KeyFile does exist, the key +// in the KeyFile is installed into the CSP and used. +// (*) In order to create a KeyFile, you can use the sn.exe (Strong Name) utility. +// When specifying the KeyFile, the location of the KeyFile should be +// relative to the project output directory which is +// %Project Directory%\obj\. For example, if your KeyFile is +// located in the project directory, you would specify the AssemblyKeyFile +// attribute as [assembly: AssemblyKeyFile("..\\..\\mykey.snk")] +// (*) Delay Signing is an advanced option - see the Microsoft .NET Framework +// documentation for more information on this. +// +[assembly: AssemblyDelaySign(false)] +[assembly: AssemblyKeyFile("")] +[assembly: AssemblyKeyName("")] diff --git a/tests/scancode/data/resource/samples/zlib/dotzlib/ChecksumImpl.cs b/tests/scancode/data/resource/samples/zlib/dotzlib/ChecksumImpl.cs new file mode 100644 index 00000000000..788b2fceced --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/dotzlib/ChecksumImpl.cs @@ -0,0 +1,202 @@ +// +// Copyright Henrik Ravn 2004 +// +// Use, modification and distribution are subject to the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +using System; +using System.Runtime.InteropServices; +using System.Text; + + +namespace DotZLib +{ + #region ChecksumGeneratorBase + ///

+ /// Implements the common functionality needed for all s + /// + /// + public abstract class ChecksumGeneratorBase : ChecksumGenerator + { + /// + /// The value of the current checksum + /// + protected uint _current; + + /// + /// Initializes a new instance of the checksum generator base - the current checksum is + /// set to zero + /// + public ChecksumGeneratorBase() + { + _current = 0; + } + + /// + /// Initializes a new instance of the checksum generator basewith a specified value + /// + /// The value to set the current checksum to + public ChecksumGeneratorBase(uint initialValue) + { + _current = initialValue; + } + + /// + /// Resets the current checksum to zero + /// + public void Reset() { _current = 0; } + + /// + /// Gets the current checksum value + /// + public uint Value { get { return _current; } } + + /// + /// Updates the current checksum with part of an array of bytes + /// + /// The data to update the checksum with + /// Where in data to start updating + /// The number of bytes from data to use + /// The sum of offset and count is larger than the length of data + /// data is a null reference + /// Offset or count is negative. + /// All the other Update methods are implmeneted in terms of this one. + /// This is therefore the only method a derived class has to implement + public abstract void Update(byte[] data, int offset, int count); + + /// + /// Updates the current checksum with an array of bytes. + /// + /// The data to update the checksum with + public void Update(byte[] data) + { + Update(data, 0, data.Length); + } + + /// + /// Updates the current checksum with the data from a string + /// + /// The string to update the checksum with + /// The characters in the string are converted by the UTF-8 encoding + public void Update(string data) + { + Update(Encoding.UTF8.GetBytes(data)); + } + + /// + /// Updates the current checksum with the data from a string, using a specific encoding + /// + /// The string to update the checksum with + /// The encoding to use + public void Update(string data, Encoding encoding) + { + Update(encoding.GetBytes(data)); + } + + } + #endregion + + #region CRC32 + /// + /// Implements a CRC32 checksum generator + /// + public sealed class CRC32Checksum : ChecksumGeneratorBase + { + #region DLL imports + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern uint crc32(uint crc, int data, uint length); + + #endregion + + /// + /// Initializes a new instance of the CRC32 checksum generator + /// + public CRC32Checksum() : base() {} + + /// + /// Initializes a new instance of the CRC32 checksum generator with a specified value + /// + /// The value to set the current checksum to + public CRC32Checksum(uint initialValue) : base(initialValue) {} + + /// + /// Updates the current checksum with part of an array of bytes + /// + /// The data to update the checksum with + /// Where in data to start updating + /// The number of bytes from data to use + /// The sum of offset and count is larger than the length of data + /// data is a null reference + /// Offset or count is negative. + public override void Update(byte[] data, int offset, int count) + { + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > data.Length) throw new ArgumentException(); + GCHandle hData = GCHandle.Alloc(data, GCHandleType.Pinned); + try + { + _current = crc32(_current, hData.AddrOfPinnedObject().ToInt32()+offset, (uint)count); + } + finally + { + hData.Free(); + } + } + + } + #endregion + + #region Adler + /// + /// Implements a checksum generator that computes the Adler checksum on data + /// + public sealed class AdlerChecksum : ChecksumGeneratorBase + { + #region DLL imports + + [DllImport("ZLIB1.dll", CallingConvention=CallingConvention.Cdecl)] + private static extern uint adler32(uint adler, int data, uint length); + + #endregion + + /// + /// Initializes a new instance of the Adler checksum generator + /// + public AdlerChecksum() : base() {} + + /// + /// Initializes a new instance of the Adler checksum generator with a specified value + /// + /// The value to set the current checksum to + public AdlerChecksum(uint initialValue) : base(initialValue) {} + + /// + /// Updates the current checksum with part of an array of bytes + /// + /// The data to update the checksum with + /// Where in data to start updating + /// The number of bytes from data to use + /// The sum of offset and count is larger than the length of data + /// data is a null reference + /// Offset or count is negative. + public override void Update(byte[] data, int offset, int count) + { + if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException(); + if ((offset+count) > data.Length) throw new ArgumentException(); + GCHandle hData = GCHandle.Alloc(data, GCHandleType.Pinned); + try + { + _current = adler32(_current, hData.AddrOfPinnedObject().ToInt32()+offset, (uint)count); + } + finally + { + hData.Free(); + } + } + + } + #endregion + +} \ No newline at end of file diff --git a/tests/scancode/data/resource/samples/zlib/dotzlib/LICENSE_1_0.txt b/tests/scancode/data/resource/samples/zlib/dotzlib/LICENSE_1_0.txt new file mode 100644 index 00000000000..30aac2cf479 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/dotzlib/LICENSE_1_0.txt @@ -0,0 +1,23 @@ +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/tests/scancode/data/resource/samples/zlib/dotzlib/readme.txt b/tests/scancode/data/resource/samples/zlib/dotzlib/readme.txt new file mode 100644 index 00000000000..b2395720d4c --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/dotzlib/readme.txt @@ -0,0 +1,58 @@ +This directory contains a .Net wrapper class library for the ZLib1.dll + +The wrapper includes support for inflating/deflating memory buffers, +.Net streaming wrappers for the gz streams part of zlib, and wrappers +for the checksum parts of zlib. See DotZLib/UnitTests.cs for examples. + +Directory structure: +-------------------- + +LICENSE_1_0.txt - License file. +readme.txt - This file. +DotZLib.chm - Class library documentation +DotZLib.build - NAnt build file +DotZLib.sln - Microsoft Visual Studio 2003 solution file + +DotZLib\*.cs - Source files for the class library + +Unit tests: +----------- +The file DotZLib/UnitTests.cs contains unit tests for use with NUnit 2.1 or higher. +To include unit tests in the build, define nunit before building. + + +Build instructions: +------------------- + +1. Using Visual Studio.Net 2003: + Open DotZLib.sln in VS.Net and build from there. Output file (DotZLib.dll) + will be found ./DotZLib/bin/release or ./DotZLib/bin/debug, depending on + you are building the release or debug version of the library. Check + DotZLib/UnitTests.cs for instructions on how to include unit tests in the + build. + +2. Using NAnt: + Open a command prompt with access to the build environment and run nant + in the same directory as the DotZLib.build file. + You can define 2 properties on the nant command-line to control the build: + debug={true|false} to toggle between release/debug builds (default=true). + nunit={true|false} to include or esclude unit tests (default=true). + Also the target clean will remove binaries. + Output file (DotZLib.dll) will be found in either ./DotZLib/bin/release + or ./DotZLib/bin/debug, depending on whether you are building the release + or debug version of the library. + + Examples: + nant -D:debug=false -D:nunit=false + will build a release mode version of the library without unit tests. + nant + will build a debug version of the library with unit tests + nant clean + will remove all previously built files. + + +--------------------------------- +Copyright (c) Henrik Ravn 2004 + +Use, modification and distribution are subject to the Boost Software License, Version 1.0. +(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff --git a/tests/scancode/data/resource/samples/zlib/gcc_gvmat64/gvmat64.S b/tests/scancode/data/resource/samples/zlib/gcc_gvmat64/gvmat64.S new file mode 100644 index 00000000000..dd858ddbd16 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/gcc_gvmat64/gvmat64.S @@ -0,0 +1,574 @@ +/* +;uInt longest_match_x64( +; deflate_state *s, +; IPos cur_match); // current match + +; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64 +; (AMD64 on Athlon 64, Opteron, Phenom +; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7) +; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode) +; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant. +; +; File written by Gilles Vollant, by converting to assembly the longest_match +; from Jean-loup Gailly in deflate.c of zLib and infoZip zip. +; and by taking inspiration on asm686 with masm, optimised assembly code +; from Brian Raiter, written 1998 +; +; This software is provided 'as-is', without any express or implied +; warranty. In no event will the authors be held liable for any damages +; arising from the use of this software. +; +; Permission is granted to anyone to use this software for any purpose, +; including commercial applications, and to alter it and redistribute it +; freely, subject to the following restrictions: +; +; 1. The origin of this software must not be misrepresented; you must not +; claim that you wrote the original software. If you use this software +; in a product, an acknowledgment in the product documentation would be +; appreciated but is not required. +; 2. Altered source versions must be plainly marked as such, and must not be +; misrepresented as being the original software +; 3. This notice may not be removed or altered from any source distribution. +; +; http://www.zlib.net +; http://www.winimage.com/zLibDll +; http://www.muppetlabs.com/~breadbox/software/assembly.html +; +; to compile this file for zLib, I use option: +; gcc -c -arch x86_64 gvmat64.S + + +;uInt longest_match(s, cur_match) +; deflate_state *s; +; IPos cur_match; // current match / +; +; with XCode for Mac, I had strange error with some jump on intel syntax +; this is why BEFORE_JMP and AFTER_JMP are used + */ + + +#define BEFORE_JMP .att_syntax +#define AFTER_JMP .intel_syntax noprefix + +#ifndef NO_UNDERLINE +# define match_init _match_init +# define longest_match _longest_match +#endif + +.intel_syntax noprefix + +.globl match_init, longest_match +.text +longest_match: + + + +#define LocalVarsSize 96 +/* +; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12 +; free register : r14,r15 +; register can be saved : rsp +*/ + +#define chainlenwmask (rsp + 8 - LocalVarsSize) +#define nicematch (rsp + 16 - LocalVarsSize) + +#define save_rdi (rsp + 24 - LocalVarsSize) +#define save_rsi (rsp + 32 - LocalVarsSize) +#define save_rbx (rsp + 40 - LocalVarsSize) +#define save_rbp (rsp + 48 - LocalVarsSize) +#define save_r12 (rsp + 56 - LocalVarsSize) +#define save_r13 (rsp + 64 - LocalVarsSize) +#define save_r14 (rsp + 72 - LocalVarsSize) +#define save_r15 (rsp + 80 - LocalVarsSize) + + +/* +; all the +4 offsets are due to the addition of pending_buf_size (in zlib +; in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, remove the +4). +; Note : these value are good with a 8 bytes boundary pack structure +*/ + +#define MAX_MATCH 258 +#define MIN_MATCH 3 +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) + +/* +;;; Offsets for fields in the deflate_state structure. These numbers +;;; are calculated from the definition of deflate_state, with the +;;; assumption that the compiler will dword-align the fields. (Thus, +;;; changing the definition of deflate_state could easily cause this +;;; program to crash horribly, without so much as a warning at +;;; compile time. Sigh.) + +; all the +zlib1222add offsets are due to the addition of fields +; in zlib in the deflate_state structure since the asm code was first written +; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)"). +; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0"). +; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8"). +*/ + + + +/* you can check the structure offset by running + +#include +#include +#include "deflate.h" + +void print_depl() +{ +deflate_state ds; +deflate_state *s=&ds; +printf("size pointer=%u\n",(int)sizeof(void*)); + +printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s))); +printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s))); +printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s))); +printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s))); +printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s))); +printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s))); +printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s))); +printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s))); +printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s))); +printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s))); +printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s))); +printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s))); +printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s))); +} +*/ + +#define dsWSize 68 +#define dsWMask 76 +#define dsWindow 80 +#define dsPrev 96 +#define dsMatchLen 144 +#define dsPrevMatch 148 +#define dsStrStart 156 +#define dsMatchStart 160 +#define dsLookahead 164 +#define dsPrevLen 168 +#define dsMaxChainLen 172 +#define dsGoodMatch 188 +#define dsNiceMatch 192 + +#define window_size [ rcx + dsWSize] +#define WMask [ rcx + dsWMask] +#define window_ad [ rcx + dsWindow] +#define prev_ad [ rcx + dsPrev] +#define strstart [ rcx + dsStrStart] +#define match_start [ rcx + dsMatchStart] +#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip +#define prev_length [ rcx + dsPrevLen] +#define max_chain_length [ rcx + dsMaxChainLen] +#define good_match [ rcx + dsGoodMatch] +#define nice_match [ rcx + dsNiceMatch] + +/* +; windows: +; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match) + +; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and +; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp +; +; All registers must be preserved across the call, except for +; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch. + +; +; gcc on macosx-linux: +; see http://www.x86-64.org/documentation/abi-0.99.pdf +; param 1 in rdi, param 2 in rsi +; rbx, rsp, rbp, r12 to r15 must be preserved + +;;; Save registers that the compiler may be using, and adjust esp to +;;; make room for our stack frame. + + +;;; Retrieve the function arguments. r8d will hold cur_match +;;; throughout the entire function. edx will hold the pointer to the +;;; deflate_state structure during the function's setup (before +;;; entering the main loop. + +; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match) +; mac: param 1 in rdi, param 2 rsi +; this clear high 32 bits of r8, which can be garbage in both r8 and rdx +*/ + mov [save_rbx],rbx + mov [save_rbp],rbp + + + mov rcx,rdi + + mov r8d,esi + + + mov [save_r12],r12 + mov [save_r13],r13 + mov [save_r14],r14 + mov [save_r15],r15 + + +//;;; uInt wmask = s->w_mask; +//;;; unsigned chain_length = s->max_chain_length; +//;;; if (s->prev_length >= s->good_match) { +//;;; chain_length >>= 2; +//;;; } + + + mov edi, prev_length + mov esi, good_match + mov eax, WMask + mov ebx, max_chain_length + cmp edi, esi + jl LastMatchGood + shr ebx, 2 +LastMatchGood: + +//;;; chainlen is decremented once beforehand so that the function can +//;;; use the sign flag instead of the zero flag for the exit test. +//;;; It is then shifted into the high word, to make room for the wmask +//;;; value, which it will always accompany. + + dec ebx + shl ebx, 16 + or ebx, eax + +//;;; on zlib only +//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + + + + mov eax, nice_match + mov [chainlenwmask], ebx + mov r10d, Lookahead + cmp r10d, eax + cmovnl r10d, eax + mov [nicematch],r10d + + + +//;;; register Bytef *scan = s->window + s->strstart; + mov r10, window_ad + mov ebp, strstart + lea r13, [r10 + rbp] + +//;;; Determine how many bytes the scan ptr is off from being +//;;; dword-aligned. + + mov r9,r13 + neg r13 + and r13,3 + +//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ? +//;;; s->strstart - (IPos)MAX_DIST(s) : NIL; + + + mov eax, window_size + sub eax, MIN_LOOKAHEAD + + + xor edi,edi + sub ebp, eax + + mov r11d, prev_length + + cmovng ebp,edi + +//;;; int best_len = s->prev_length; + + +//;;; Store the sum of s->window + best_len in esi locally, and in esi. + + lea rsi,[r10+r11] + +//;;; register ush scan_start = *(ushf*)scan; +//;;; register ush scan_end = *(ushf*)(scan+best_len-1); +//;;; Posf *prev = s->prev; + + movzx r12d,word ptr [r9] + movzx ebx, word ptr [r9 + r11 - 1] + + mov rdi, prev_ad + +//;;; Jump into the main loop. + + mov edx, [chainlenwmask] + + cmp bx,word ptr [rsi + r8 - 1] + jz LookupLoopIsZero + + + +LookupLoop1: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + jbe LeaveNow + + + + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry1: + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jz LookupLoopIsZero + AFTER_JMP + +LookupLoop2: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + BEFORE_JMP + jbe LeaveNow + AFTER_JMP + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry2: + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jz LookupLoopIsZero + AFTER_JMP + +LookupLoop4: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + BEFORE_JMP + jbe LeaveNow + AFTER_JMP + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry4: + + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jnz LookupLoop1 + jmp LookupLoopIsZero + AFTER_JMP +/* +;;; do { +;;; match = s->window + cur_match; +;;; if (*(ushf*)(match+best_len-1) != scan_end || +;;; *(ushf*)match != scan_start) continue; +;;; [...] +;;; } while ((cur_match = prev[cur_match & wmask]) > limit +;;; && --chain_length != 0); +;;; +;;; Here is the inner loop of the function. The function will spend the +;;; majority of its time in this loop, and majority of that time will +;;; be spent in the first ten instructions. +;;; +;;; Within this loop: +;;; ebx = scanend +;;; r8d = curmatch +;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask) +;;; esi = windowbestlen - i.e., (window + bestlen) +;;; edi = prev +;;; ebp = limit +*/ +.balign 16 +LookupLoop: + and r8d, edx + + movzx r8d, word ptr [rdi + r8*2] + cmp r8d, ebp + BEFORE_JMP + jbe LeaveNow + AFTER_JMP + sub edx, 0x00010000 + BEFORE_JMP + js LeaveNow + AFTER_JMP + +LoopEntry: + + cmp bx,word ptr [rsi + r8 - 1] + BEFORE_JMP + jnz LookupLoop1 + AFTER_JMP +LookupLoopIsZero: + cmp r12w, word ptr [r10 + r8] + BEFORE_JMP + jnz LookupLoop1 + AFTER_JMP + + +//;;; Store the current value of chainlen. + mov [chainlenwmask], edx +/* +;;; Point edi to the string under scrutiny, and esi to the string we +;;; are hoping to match it up with. In actuality, esi and edi are +;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is +;;; initialized to -(MAX_MATCH_8 - scanalign). +*/ + lea rsi,[r8+r10] + mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8) + lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8] + lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8] + + prefetcht1 [rsi+rdx] + prefetcht1 [rdi+rdx] + +/* +;;; Test the strings for equality, 8 bytes at a time. At the end, +;;; adjust rdx so that it is offset to the exact byte that mismatched. +;;; +;;; We already know at this point that the first three bytes of the +;;; strings match each other, and they can be safely passed over before +;;; starting the compare loop. So what this code does is skip over 0-3 +;;; bytes, as much as necessary in order to dword-align the edi +;;; pointer. (rsi will still be misaligned three times out of four.) +;;; +;;; It should be confessed that this loop usually does not represent +;;; much of the total running time. Replacing it with a more +;;; straightforward "rep cmpsb" would not drastically degrade +;;; performance. +*/ + +LoopCmps: + mov rax, [rsi + rdx] + xor rax, [rdi + rdx] + jnz LeaveLoopCmps + + mov rax, [rsi + rdx + 8] + xor rax, [rdi + rdx + 8] + jnz LeaveLoopCmps8 + + + mov rax, [rsi + rdx + 8+8] + xor rax, [rdi + rdx + 8+8] + jnz LeaveLoopCmps16 + + add rdx,8+8+8 + + BEFORE_JMP + jnz LoopCmps + jmp LenMaximum + AFTER_JMP + +LeaveLoopCmps16: add rdx,8 +LeaveLoopCmps8: add rdx,8 +LeaveLoopCmps: + + test eax, 0x0000FFFF + jnz LenLower + + test eax,0xffffffff + + jnz LenLower32 + + add rdx,4 + shr rax,32 + or ax,ax + BEFORE_JMP + jnz LenLower + AFTER_JMP + +LenLower32: + shr eax,16 + add rdx,2 + +LenLower: + sub al, 1 + adc rdx, 0 +//;;; Calculate the length of the match. If it is longer than MAX_MATCH, +//;;; then automatically accept it as the best possible match and leave. + + lea rax, [rdi + rdx] + sub rax, r9 + cmp eax, MAX_MATCH + BEFORE_JMP + jge LenMaximum + AFTER_JMP +/* +;;; If the length of the match is not longer than the best match we +;;; have so far, then forget it and return to the lookup loop. +;/////////////////////////////////// +*/ + cmp eax, r11d + jg LongerMatch + + lea rsi,[r10+r11] + + mov rdi, prev_ad + mov edx, [chainlenwmask] + BEFORE_JMP + jmp LookupLoop + AFTER_JMP +/* +;;; s->match_start = cur_match; +;;; best_len = len; +;;; if (len >= nice_match) break; +;;; scan_end = *(ushf*)(scan+best_len-1); +*/ +LongerMatch: + mov r11d, eax + mov match_start, r8d + cmp eax, [nicematch] + BEFORE_JMP + jge LeaveNow + AFTER_JMP + + lea rsi,[r10+rax] + + movzx ebx, word ptr [r9 + rax - 1] + mov rdi, prev_ad + mov edx, [chainlenwmask] + BEFORE_JMP + jmp LookupLoop + AFTER_JMP + +//;;; Accept the current string, with the maximum possible length. + +LenMaximum: + mov r11d,MAX_MATCH + mov match_start, r8d + +//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len; +//;;; return s->lookahead; + +LeaveNow: + mov eax, Lookahead + cmp r11d, eax + cmovng eax, r11d + + + +//;;; Restore the stack and return from whence we came. + + +// mov rsi,[save_rsi] +// mov rdi,[save_rdi] + mov rbx,[save_rbx] + mov rbp,[save_rbp] + mov r12,[save_r12] + mov r13,[save_r13] + mov r14,[save_r14] + mov r15,[save_r15] + + + ret 0 +//; please don't remove this string ! +//; Your can freely use gvmat64 in any free or commercial app +//; but it is far better don't remove the string in the binary! + // db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0 + + +match_init: + ret 0 + + diff --git a/tests/scancode/data/resource/samples/zlib/infback9/infback9.c b/tests/scancode/data/resource/samples/zlib/infback9/infback9.c new file mode 100644 index 00000000000..05fb3e33807 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/infback9/infback9.c @@ -0,0 +1,615 @@ +/* infback9.c -- inflate deflate64 data using a call-back interface + * Copyright (C) 1995-2008 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "infback9.h" +#include "inftree9.h" +#include "inflate9.h" + +#define WSIZE 65536UL + +/* + strm provides memory allocation functions in zalloc and zfree, or + Z_NULL to use the library memory allocation functions. + + window is a user-supplied window and output buffer that is 64K bytes. + */ +int ZEXPORT inflateBack9Init_(strm, window, version, stream_size) +z_stream FAR *strm; +unsigned char FAR *window; +const char *version; +int stream_size; +{ + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL || window == Z_NULL) + return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; + } + if (strm->zfree == (free_func)0) strm->zfree = zcfree; + state = (struct inflate_state FAR *)ZALLOC(strm, 1, + sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (voidpf)state; + state->window = window; + return Z_OK; +} + +/* + Build and output length and distance decoding tables for fixed code + decoding. + */ +#ifdef MAKEFIXED +#include + +void makefixed9(void) +{ + unsigned sym, bits, low, size; + code *next, *lenfix, *distfix; + struct inflate_state state; + code fixed[544]; + + /* literal/length table */ + sym = 0; + while (sym < 144) state.lens[sym++] = 8; + while (sym < 256) state.lens[sym++] = 9; + while (sym < 280) state.lens[sym++] = 7; + while (sym < 288) state.lens[sym++] = 8; + next = fixed; + lenfix = next; + bits = 9; + inflate_table9(LENS, state.lens, 288, &(next), &(bits), state.work); + + /* distance table */ + sym = 0; + while (sym < 32) state.lens[sym++] = 5; + distfix = next; + bits = 5; + inflate_table9(DISTS, state.lens, 32, &(next), &(bits), state.work); + + /* write tables */ + puts(" /* inffix9.h -- table for decoding deflate64 fixed codes"); + puts(" * Generated automatically by makefixed9()."); + puts(" */"); + puts(""); + puts(" /* WARNING: this file should *not* be used by applications."); + puts(" It is part of the implementation of this library and is"); + puts(" subject to change. Applications should only use zlib.h."); + puts(" */"); + puts(""); + size = 1U << 9; + printf(" static const code lenfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 6) == 0) printf("\n "); + printf("{%u,%u,%d}", lenfix[low].op, lenfix[low].bits, + lenfix[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); + size = 1U << 5; + printf("\n static const code distfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 5) == 0) printf("\n "); + printf("{%u,%u,%d}", distfix[low].op, distfix[low].bits, + distfix[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); +} +#endif /* MAKEFIXED */ + +/* Macros for inflateBack(): */ + +/* Clear the input bit accumulator */ +#define INITBITS() \ + do { \ + hold = 0; \ + bits = 0; \ + } while (0) + +/* Assure that some input is available. If input is requested, but denied, + then return a Z_BUF_ERROR from inflateBack(). */ +#define PULL() \ + do { \ + if (have == 0) { \ + have = in(in_desc, &next); \ + if (have == 0) { \ + next = Z_NULL; \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* Get a byte of input into the bit accumulator, or return from inflateBack() + with an error if there is no input available. */ +#define PULLBYTE() \ + do { \ + PULL(); \ + have--; \ + hold += (unsigned long)(*next++) << bits; \ + bits += 8; \ + } while (0) + +/* Assure that there are at least n bits in the bit accumulator. If there is + not enough available input to do that, then return from inflateBack() with + an error. */ +#define NEEDBITS(n) \ + do { \ + while (bits < (unsigned)(n)) \ + PULLBYTE(); \ + } while (0) + +/* Return the low n bits of the bit accumulator (n <= 16) */ +#define BITS(n) \ + ((unsigned)hold & ((1U << (n)) - 1)) + +/* Remove n bits from the bit accumulator */ +#define DROPBITS(n) \ + do { \ + hold >>= (n); \ + bits -= (unsigned)(n); \ + } while (0) + +/* Remove zero to seven bits as needed to go to a byte boundary */ +#define BYTEBITS() \ + do { \ + hold >>= bits & 7; \ + bits -= bits & 7; \ + } while (0) + +/* Assure that some output space is available, by writing out the window + if it's full. If the write fails, return from inflateBack() with a + Z_BUF_ERROR. */ +#define ROOM() \ + do { \ + if (left == 0) { \ + put = window; \ + left = WSIZE; \ + wrap = 1; \ + if (out(out_desc, put, (unsigned)left)) { \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* + strm provides the memory allocation functions and window buffer on input, + and provides information on the unused input on return. For Z_DATA_ERROR + returns, strm will also provide an error message. + + in() and out() are the call-back input and output functions. When + inflateBack() needs more input, it calls in(). When inflateBack() has + filled the window with output, or when it completes with data in the + window, it calls out() to write out the data. The application must not + change the provided input until in() is called again or inflateBack() + returns. The application must not change the window/output buffer until + inflateBack() returns. + + in() and out() are called with a descriptor parameter provided in the + inflateBack() call. This parameter can be a structure that provides the + information required to do the read or write, as well as accumulated + information on the input and output such as totals and check values. + + in() should return zero on failure. out() should return non-zero on + failure. If either in() or out() fails, than inflateBack() returns a + Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it + was in() or out() that caused in the error. Otherwise, inflateBack() + returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format + error, or Z_MEM_ERROR if it could not allocate memory for the state. + inflateBack() can also return Z_STREAM_ERROR if the input parameters + are not correct, i.e. strm is Z_NULL or the state was not initialized. + */ +int ZEXPORT inflateBack9(strm, in, in_desc, out, out_desc) +z_stream FAR *strm; +in_func in; +void FAR *in_desc; +out_func out; +void FAR *out_desc; +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *next; /* next input */ + unsigned char FAR *put; /* next output */ + unsigned have; /* available input */ + unsigned long left; /* available output */ + inflate_mode mode; /* current inflate mode */ + int lastblock; /* true if processing last block */ + int wrap; /* true if the window has wrapped */ + unsigned char FAR *window; /* allocated sliding window, if needed */ + unsigned long hold; /* bit buffer */ + unsigned bits; /* bits in bit buffer */ + unsigned extra; /* extra bits needed */ + unsigned long length; /* literal or length of data to copy */ + unsigned long offset; /* distance back to copy string from */ + unsigned long copy; /* number of stored or match bytes to copy */ + unsigned char FAR *from; /* where to copy match bytes from */ + code const FAR *lencode; /* starting table for length/literal codes */ + code const FAR *distcode; /* starting table for distance codes */ + unsigned lenbits; /* index bits for lencode */ + unsigned distbits; /* index bits for distcode */ + code here; /* current decoding table entry */ + code last; /* parent table entry */ + unsigned len; /* length to copy for repeats, bits to drop */ + int ret; /* return code */ + static const unsigned short order[19] = /* permutation of code lengths */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +#include "inffix9.h" + + /* Check that the strm exists and that the state was initialized */ + if (strm == Z_NULL || strm->state == Z_NULL) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* Reset the state */ + strm->msg = Z_NULL; + mode = TYPE; + lastblock = 0; + wrap = 0; + window = state->window; + next = strm->next_in; + have = next != Z_NULL ? strm->avail_in : 0; + hold = 0; + bits = 0; + put = window; + left = WSIZE; + lencode = Z_NULL; + distcode = Z_NULL; + + /* Inflate until end of block marked as last */ + for (;;) + switch (mode) { + case TYPE: + /* determine and dispatch block type */ + if (lastblock) { + BYTEBITS(); + mode = DONE; + break; + } + NEEDBITS(3); + lastblock = BITS(1); + DROPBITS(1); + switch (BITS(2)) { + case 0: /* stored block */ + Tracev((stderr, "inflate: stored block%s\n", + lastblock ? " (last)" : "")); + mode = STORED; + break; + case 1: /* fixed block */ + lencode = lenfix; + lenbits = 9; + distcode = distfix; + distbits = 5; + Tracev((stderr, "inflate: fixed codes block%s\n", + lastblock ? " (last)" : "")); + mode = LEN; /* decode codes */ + break; + case 2: /* dynamic block */ + Tracev((stderr, "inflate: dynamic codes block%s\n", + lastblock ? " (last)" : "")); + mode = TABLE; + break; + case 3: + strm->msg = (char *)"invalid block type"; + mode = BAD; + } + DROPBITS(2); + break; + + case STORED: + /* get and verify stored block length */ + BYTEBITS(); /* go to byte boundary */ + NEEDBITS(32); + if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { + strm->msg = (char *)"invalid stored block lengths"; + mode = BAD; + break; + } + length = (unsigned)hold & 0xffff; + Tracev((stderr, "inflate: stored length %lu\n", + length)); + INITBITS(); + + /* copy stored block from input to output */ + while (length != 0) { + copy = length; + PULL(); + ROOM(); + if (copy > have) copy = have; + if (copy > left) copy = left; + zmemcpy(put, next, copy); + have -= copy; + next += copy; + left -= copy; + put += copy; + length -= copy; + } + Tracev((stderr, "inflate: stored end\n")); + mode = TYPE; + break; + + case TABLE: + /* get dynamic table entries descriptor */ + NEEDBITS(14); + state->nlen = BITS(5) + 257; + DROPBITS(5); + state->ndist = BITS(5) + 1; + DROPBITS(5); + state->ncode = BITS(4) + 4; + DROPBITS(4); + if (state->nlen > 286) { + strm->msg = (char *)"too many length symbols"; + mode = BAD; + break; + } + Tracev((stderr, "inflate: table sizes ok\n")); + + /* get code length code lengths (not a typo) */ + state->have = 0; + while (state->have < state->ncode) { + NEEDBITS(3); + state->lens[order[state->have++]] = (unsigned short)BITS(3); + DROPBITS(3); + } + while (state->have < 19) + state->lens[order[state->have++]] = 0; + state->next = state->codes; + lencode = (code const FAR *)(state->next); + lenbits = 7; + ret = inflate_table9(CODES, state->lens, 19, &(state->next), + &(lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid code lengths set"; + mode = BAD; + break; + } + Tracev((stderr, "inflate: code lengths ok\n")); + + /* get length and distance code code lengths */ + state->have = 0; + while (state->have < state->nlen + state->ndist) { + for (;;) { + here = lencode[BITS(lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.val < 16) { + NEEDBITS(here.bits); + DROPBITS(here.bits); + state->lens[state->have++] = here.val; + } + else { + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); + if (state->have == 0) { + strm->msg = (char *)"invalid bit length repeat"; + mode = BAD; + break; + } + len = (unsigned)(state->lens[state->have - 1]); + copy = 3 + BITS(2); + DROPBITS(2); + } + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); + len = 0; + copy = 3 + BITS(3); + DROPBITS(3); + } + else { + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); + len = 0; + copy = 11 + BITS(7); + DROPBITS(7); + } + if (state->have + copy > state->nlen + state->ndist) { + strm->msg = (char *)"invalid bit length repeat"; + mode = BAD; + break; + } + while (copy--) + state->lens[state->have++] = (unsigned short)len; + } + } + + /* handle error breaks in while */ + if (mode == BAD) break; + + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftree9.h + concerning the ENOUGH constants, which depend on those values */ + state->next = state->codes; + lencode = (code const FAR *)(state->next); + lenbits = 9; + ret = inflate_table9(LENS, state->lens, state->nlen, + &(state->next), &(lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid literal/lengths set"; + mode = BAD; + break; + } + distcode = (code const FAR *)(state->next); + distbits = 6; + ret = inflate_table9(DISTS, state->lens + state->nlen, + state->ndist, &(state->next), &(distbits), + state->work); + if (ret) { + strm->msg = (char *)"invalid distances set"; + mode = BAD; + break; + } + Tracev((stderr, "inflate: codes ok\n")); + mode = LEN; + + case LEN: + /* get a literal, length, or end-of-block code */ + for (;;) { + here = lencode[BITS(lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.op && (here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = lencode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + length = (unsigned)here.val; + + /* process literal */ + if (here.op == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + ROOM(); + *put++ = (unsigned char)(length); + left--; + mode = LEN; + break; + } + + /* process end of block */ + if (here.op & 32) { + Tracevv((stderr, "inflate: end of block\n")); + mode = TYPE; + break; + } + + /* invalid code */ + if (here.op & 64) { + strm->msg = (char *)"invalid literal/length code"; + mode = BAD; + break; + } + + /* length code -- get extra bits, if any */ + extra = (unsigned)(here.op) & 31; + if (extra != 0) { + NEEDBITS(extra); + length += BITS(extra); + DROPBITS(extra); + } + Tracevv((stderr, "inflate: length %lu\n", length)); + + /* get distance code */ + for (;;) { + here = distcode[BITS(distbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if ((here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = distcode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + if (here.op & 64) { + strm->msg = (char *)"invalid distance code"; + mode = BAD; + break; + } + offset = (unsigned)here.val; + + /* get distance extra bits, if any */ + extra = (unsigned)(here.op) & 15; + if (extra != 0) { + NEEDBITS(extra); + offset += BITS(extra); + DROPBITS(extra); + } + if (offset > WSIZE - (wrap ? 0: left)) { + strm->msg = (char *)"invalid distance too far back"; + mode = BAD; + break; + } + Tracevv((stderr, "inflate: distance %lu\n", offset)); + + /* copy match from window to output */ + do { + ROOM(); + copy = WSIZE - offset; + if (copy < left) { + from = put + copy; + copy = left - copy; + } + else { + from = put - offset; + copy = left; + } + if (copy > length) copy = length; + length -= copy; + left -= copy; + do { + *put++ = *from++; + } while (--copy); + } while (length != 0); + break; + + case DONE: + /* inflate stream terminated properly -- write leftover output */ + ret = Z_STREAM_END; + if (left < WSIZE) { + if (out(out_desc, window, (unsigned)(WSIZE - left))) + ret = Z_BUF_ERROR; + } + goto inf_leave; + + case BAD: + ret = Z_DATA_ERROR; + goto inf_leave; + + default: /* can't happen, but makes compilers happy */ + ret = Z_STREAM_ERROR; + goto inf_leave; + } + + /* Return unused input */ + inf_leave: + strm->next_in = next; + strm->avail_in = have; + return ret; +} + +int ZEXPORT inflateBack9End(strm) +z_stream FAR *strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) + return Z_STREAM_ERROR; + ZFREE(strm, strm->state); + strm->state = Z_NULL; + Tracev((stderr, "inflate: end\n")); + return Z_OK; +} diff --git a/tests/scancode/data/resource/samples/zlib/infback9/infback9.h b/tests/scancode/data/resource/samples/zlib/infback9/infback9.h new file mode 100644 index 00000000000..1073c0a38e6 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/infback9/infback9.h @@ -0,0 +1,37 @@ +/* infback9.h -- header for using inflateBack9 functions + * Copyright (C) 2003 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * This header file and associated patches provide a decoder for PKWare's + * undocumented deflate64 compression method (method 9). Use with infback9.c, + * inftree9.h, inftree9.c, and inffix9.h. These patches are not supported. + * This should be compiled with zlib, since it uses zutil.h and zutil.o. + * This code has not yet been tested on 16-bit architectures. See the + * comments in zlib.h for inflateBack() usage. These functions are used + * identically, except that there is no windowBits parameter, and a 64K + * window must be provided. Also if int's are 16 bits, then a zero for + * the third parameter of the "out" function actually means 65536UL. + * zlib.h must be included before this header file. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +ZEXTERN int ZEXPORT inflateBack9 OF((z_stream FAR *strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); +ZEXTERN int ZEXPORT inflateBack9End OF((z_stream FAR *strm)); +ZEXTERN int ZEXPORT inflateBack9Init_ OF((z_stream FAR *strm, + unsigned char FAR *window, + const char *version, + int stream_size)); +#define inflateBack9Init(strm, window) \ + inflateBack9Init_((strm), (window), \ + ZLIB_VERSION, sizeof(z_stream)) + +#ifdef __cplusplus +} +#endif diff --git a/tests/scancode/data/resource/samples/zlib/iostream2/zstream.h b/tests/scancode/data/resource/samples/zlib/iostream2/zstream.h new file mode 100644 index 00000000000..43d2332b79b --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/iostream2/zstream.h @@ -0,0 +1,307 @@ +/* + * + * Copyright (c) 1997 + * Christian Michelsen Research AS + * Advanced Computing + * Fantoftvegen 38, 5036 BERGEN, Norway + * http://www.cmr.no + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Christian Michelsen Research AS makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ + +#ifndef ZSTREAM__H +#define ZSTREAM__H + +/* + * zstream.h - C++ interface to the 'zlib' general purpose compression library + * $Id: zstream.h 1.1 1997-06-25 12:00:56+02 tyge Exp tyge $ + */ + +#include +#include +#include +#include "zlib.h" + +#if defined(_WIN32) +# include +# include +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +class zstringlen { +public: + zstringlen(class izstream&); + zstringlen(class ozstream&, const char*); + size_t value() const { return val.word; } +private: + struct Val { unsigned char byte; size_t word; } val; +}; + +// ----------------------------- izstream ----------------------------- + +class izstream +{ + public: + izstream() : m_fp(0) {} + izstream(FILE* fp) : m_fp(0) { open(fp); } + izstream(const char* name) : m_fp(0) { open(name); } + ~izstream() { close(); } + + /* Opens a gzip (.gz) file for reading. + * open() can be used to read a file which is not in gzip format; + * in this case read() will directly read from the file without + * decompression. errno can be checked to distinguish two error + * cases (if errno is zero, the zlib error is Z_MEM_ERROR). + */ + void open(const char* name) { + if (m_fp) close(); + m_fp = ::gzopen(name, "rb"); + } + + void open(FILE* fp) { + SET_BINARY_MODE(fp); + if (m_fp) close(); + m_fp = ::gzdopen(fileno(fp), "rb"); + } + + /* Flushes all pending input if necessary, closes the compressed file + * and deallocates all the (de)compression state. The return value is + * the zlib error number (see function error() below). + */ + int close() { + int r = ::gzclose(m_fp); + m_fp = 0; return r; + } + + /* Binary read the given number of bytes from the compressed file. + */ + int read(void* buf, size_t len) { + return ::gzread(m_fp, buf, len); + } + + /* Returns the error message for the last error which occurred on the + * given compressed file. errnum is set to zlib error number. If an + * error occurred in the file system and not in the compression library, + * errnum is set to Z_ERRNO and the application may consult errno + * to get the exact error code. + */ + const char* error(int* errnum) { + return ::gzerror(m_fp, errnum); + } + + gzFile fp() { return m_fp; } + + private: + gzFile m_fp; +}; + +/* + * Binary read the given (array of) object(s) from the compressed file. + * If the input file was not in gzip format, read() copies the objects number + * of bytes into the buffer. + * returns the number of uncompressed bytes actually read + * (0 for end of file, -1 for error). + */ +template +inline int read(izstream& zs, T* x, Items items) { + return ::gzread(zs.fp(), x, items*sizeof(T)); +} + +/* + * Binary input with the '>' operator. + */ +template +inline izstream& operator>(izstream& zs, T& x) { + ::gzread(zs.fp(), &x, sizeof(T)); + return zs; +} + + +inline zstringlen::zstringlen(izstream& zs) { + zs > val.byte; + if (val.byte == 255) zs > val.word; + else val.word = val.byte; +} + +/* + * Read length of string + the string with the '>' operator. + */ +inline izstream& operator>(izstream& zs, char* x) { + zstringlen len(zs); + ::gzread(zs.fp(), x, len.value()); + x[len.value()] = '\0'; + return zs; +} + +inline char* read_string(izstream& zs) { + zstringlen len(zs); + char* x = new char[len.value()+1]; + ::gzread(zs.fp(), x, len.value()); + x[len.value()] = '\0'; + return x; +} + +// ----------------------------- ozstream ----------------------------- + +class ozstream +{ + public: + ozstream() : m_fp(0), m_os(0) { + } + ozstream(FILE* fp, int level = Z_DEFAULT_COMPRESSION) + : m_fp(0), m_os(0) { + open(fp, level); + } + ozstream(const char* name, int level = Z_DEFAULT_COMPRESSION) + : m_fp(0), m_os(0) { + open(name, level); + } + ~ozstream() { + close(); + } + + /* Opens a gzip (.gz) file for writing. + * The compression level parameter should be in 0..9 + * errno can be checked to distinguish two error cases + * (if errno is zero, the zlib error is Z_MEM_ERROR). + */ + void open(const char* name, int level = Z_DEFAULT_COMPRESSION) { + char mode[4] = "wb\0"; + if (level != Z_DEFAULT_COMPRESSION) mode[2] = '0'+level; + if (m_fp) close(); + m_fp = ::gzopen(name, mode); + } + + /* open from a FILE pointer. + */ + void open(FILE* fp, int level = Z_DEFAULT_COMPRESSION) { + SET_BINARY_MODE(fp); + char mode[4] = "wb\0"; + if (level != Z_DEFAULT_COMPRESSION) mode[2] = '0'+level; + if (m_fp) close(); + m_fp = ::gzdopen(fileno(fp), mode); + } + + /* Flushes all pending output if necessary, closes the compressed file + * and deallocates all the (de)compression state. The return value is + * the zlib error number (see function error() below). + */ + int close() { + if (m_os) { + ::gzwrite(m_fp, m_os->str(), m_os->pcount()); + delete[] m_os->str(); delete m_os; m_os = 0; + } + int r = ::gzclose(m_fp); m_fp = 0; return r; + } + + /* Binary write the given number of bytes into the compressed file. + */ + int write(const void* buf, size_t len) { + return ::gzwrite(m_fp, (voidp) buf, len); + } + + /* Flushes all pending output into the compressed file. The parameter + * _flush is as in the deflate() function. The return value is the zlib + * error number (see function gzerror below). flush() returns Z_OK if + * the flush_ parameter is Z_FINISH and all output could be flushed. + * flush() should be called only when strictly necessary because it can + * degrade compression. + */ + int flush(int _flush) { + os_flush(); + return ::gzflush(m_fp, _flush); + } + + /* Returns the error message for the last error which occurred on the + * given compressed file. errnum is set to zlib error number. If an + * error occurred in the file system and not in the compression library, + * errnum is set to Z_ERRNO and the application may consult errno + * to get the exact error code. + */ + const char* error(int* errnum) { + return ::gzerror(m_fp, errnum); + } + + gzFile fp() { return m_fp; } + + ostream& os() { + if (m_os == 0) m_os = new ostrstream; + return *m_os; + } + + void os_flush() { + if (m_os && m_os->pcount()>0) { + ostrstream* oss = new ostrstream; + oss->fill(m_os->fill()); + oss->flags(m_os->flags()); + oss->precision(m_os->precision()); + oss->width(m_os->width()); + ::gzwrite(m_fp, m_os->str(), m_os->pcount()); + delete[] m_os->str(); delete m_os; m_os = oss; + } + } + + private: + gzFile m_fp; + ostrstream* m_os; +}; + +/* + * Binary write the given (array of) object(s) into the compressed file. + * returns the number of uncompressed bytes actually written + * (0 in case of error). + */ +template +inline int write(ozstream& zs, const T* x, Items items) { + return ::gzwrite(zs.fp(), (voidp) x, items*sizeof(T)); +} + +/* + * Binary output with the '<' operator. + */ +template +inline ozstream& operator<(ozstream& zs, const T& x) { + ::gzwrite(zs.fp(), (voidp) &x, sizeof(T)); + return zs; +} + +inline zstringlen::zstringlen(ozstream& zs, const char* x) { + val.byte = 255; val.word = ::strlen(x); + if (val.word < 255) zs < (val.byte = val.word); + else zs < val; +} + +/* + * Write length of string + the string with the '<' operator. + */ +inline ozstream& operator<(ozstream& zs, const char* x) { + zstringlen len(zs, x); + ::gzwrite(zs.fp(), (voidp) x, len.value()); + return zs; +} + +#ifdef _MSC_VER +inline ozstream& operator<(ozstream& zs, char* const& x) { + return zs < (const char*) x; +} +#endif + +/* + * Ascii write with the << operator; + */ +template +inline ostream& operator<<(ozstream& zs, const T& x) { + zs.os_flush(); + return zs.os() << x; +} + +#endif diff --git a/tests/scancode/data/resource/samples/zlib/iostream2/zstream_test.cpp b/tests/scancode/data/resource/samples/zlib/iostream2/zstream_test.cpp new file mode 100644 index 00000000000..6273f62d62a --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/iostream2/zstream_test.cpp @@ -0,0 +1,25 @@ +#include "zstream.h" +#include +#include +#include + +void main() { + char h[256] = "Hello"; + char* g = "Goodbye"; + ozstream out("temp.gz"); + out < "This works well" < h < g; + out.close(); + + izstream in("temp.gz"); // read it back + char *x = read_string(in), *y = new char[256], z[256]; + in > y > z; + in.close(); + cout << x << endl << y << endl << z << endl; + + out.open("temp.gz"); // try ascii output; zcat temp.gz to see the results + out << setw(50) << setfill('#') << setprecision(20) << x << endl << y << endl << z << endl; + out << z << endl << y << endl << x << endl; + out << 1.1234567890123456789 << endl; + + delete[] x; delete[] y; +} diff --git a/tests/scancode/data/resource/samples/zlib/zlib.h b/tests/scancode/data/resource/samples/zlib/zlib.h new file mode 100644 index 00000000000..3e0c7672ac5 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/zlib.h @@ -0,0 +1,1768 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.8, April 28th, 2013 + + Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). +*/ + +#ifndef ZLIB_H +#define ZLIB_H + +#include "zconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZLIB_VERSION "1.2.8" +#define ZLIB_VERNUM 0x1280 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 2 +#define ZLIB_VER_REVISION 8 +#define ZLIB_VER_SUBREVISION 0 + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio using the functions that start + with "gz". The gzip format is different from the zlib format. gzip is a + gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + + This library can optionally read and write gzip streams in memory as well. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in case of corrupted input. +*/ + +typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); +typedef void (*free_func) OF((voidpf opaque, voidpf address)); + +struct internal_state; + +typedef struct z_stream_s { + z_const Bytef *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total number of input bytes read so far */ + + Bytef *next_out; /* next output byte should be put there */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total number of bytes output so far */ + + z_const char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: binary or text */ + uLong adler; /* adler32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream FAR *z_streamp; + +/* + gzip header information passed to and from zlib routines. See RFC 1952 + for more details on the meanings of these fields. +*/ +typedef struct gz_header_s { + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef *extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ + uInt extra_max; /* space at extra (only when reading header) */ + Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max; /* space at name (only when reading header) */ + Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max; /* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + when writing a gzip file) */ +} gz_header; + +typedef gz_header FAR *gz_headerp; + +/* + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use in the decompressor (particularly + if the decompressor wants to decompress everything in a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_SYNC_FLUSH 2 +#define Z_FULL_FLUSH 3 +#define Z_FINISH 4 +#define Z_BLOCK 5 +#define Z_TREES 6 +/* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_RLE 3 +#define Z_FIXED 4 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_TEXT 1 +#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ +#define Z_UNKNOWN 2 +/* Possible values of the data_type field (though see inflate()) */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() +/* for compatibility with versions < 1.0.2 */ + + + /* basic functions */ + +ZEXTERN const char * ZEXPORT zlibVersion OF((void)); +/* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. + */ + +/* +ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). +*/ + + +ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); +/* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary (in interactive applications). Some + output may be provided even if flush is not set. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. + + Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + decide how much data to accumulate before producing output, in order to + maximize compression. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed code + block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out is greater than six to avoid repeated flush markers due to + avail_out == 0 on return. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space; if deflate returns with Z_OK, this function must be + called again with Z_FINISH and more output space (updated avail_out) but no + more input data, until it returns with Z_STREAM_END or an error. After + deflate has returned Z_STREAM_END, the only possible operations on the stream + are deflateReset or deflateEnd. + + Z_FINISH can be used immediately after deflateInit if all the compression + is to be done in a single step. In this case, avail_out must be at least the + value returned by deflateBound (see below). Then deflate is guaranteed to + return Z_STREAM_END. If not enough output space is provided, deflate will + not return Z_STREAM_END, and it must be called again as described above. + + deflate() sets strm->adler to the adler32 checksum of all input read + so far (that is, total_in bytes). + + deflate() may update strm->data_type if it can make a good guess about + the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered + binary. This field is only for information purposes and does not affect the + compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was Z_NULL), Z_BUF_ERROR if no progress is possible + (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not + fatal, and deflate() can be called again with more input and more output + space to continue compressing. +*/ + + +ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be + deallocated). +*/ + + +/* +ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, zalloc, zfree and opaque must be initialized before by + the caller. If next_in is not Z_NULL and avail_in is large enough (the + exact value depends on the compression method), inflateInit determines the + compression method from the zlib header and allocates all data structures + accordingly; otherwise the allocation will be deferred to the first call of + inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to + use default allocation functions. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit() does not process any header information -- that is deferred + until inflate() is called. +*/ + + +ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); +/* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in is updated and processing will + resume at this point for the next call of inflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + Also to assist in this, on return inflate() will set strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the Adler-32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed adler32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained, so applications that need that information should + instead use raw inflate, see inflateInit2() below, or inflateBack() and + perform their own processing of the gzip header and trailer. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + producted so far. The CRC-32 is checked against the gzip trailer. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value), Z_STREAM_ERROR if the stream structure was inconsistent (for example + next_in or next_out was Z_NULL), Z_MEM_ERROR if there was not enough memory, + Z_BUF_ERROR if no progress is possible or if there was not enough room in the + output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is desired. +*/ + + +ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state + was inconsistent. In the error case, msg may be set but then points to a + static string (which must not be deallocated). +*/ + + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy)); + + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by the + caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data + with no zlib header or trailer, and will not compute an adler32 check value. + + windowBits can also be greater than 15 for optional gzip encoding. Add + 16 to windowBits to write a simple gzip header and trailer around the + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to 255 (unknown). If a + gzip stream is being written, strm->adler is a crc32 instead of an adler32. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match), or Z_RLE to limit match distances to one (run-length + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman + coding and less string matching; it is somewhat intermediate between + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. + + Upon return of this function, strm->adler is set to the adler32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The adler32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) If a raw deflate was requested, then the + adler32 value is not computed and strm->adler is not set. + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); +/* + This function is equivalent to deflateEnd followed by deflateInit, + but does not free and reallocate all the internal compression state. The + stream will keep the same compression level and any other attributes that + may have been set by deflateInit2. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, + int level, + int strategy)); +/* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2. This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different strategy. + If the compression level is changed, the input available so far is + compressed with the old level (and may be flushed); the new level will take + effect only at the next call of deflate(). + + Before the call of deflateParams, the stream state must be set as for + a call of deflate(), since the currently available input may have to be + compressed and flushed. In particular, strm->avail_out must be non-zero. + + deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source + stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR if + strm->avail_out was zero. +*/ + +ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, + int good_length, + int max_lazy, + int nice_length, + int max_chain)); +/* + Fine tune deflate's internal compression parameters. This should only be + used by someone who understands the algorithm used by zlib's deflate for + searching for the best matching string, and even then only by the most + fanatic optimizer trying to squeeze out the last compressed bit for their + specific input data. Read the deflate.c source code for the meaning of the + max_lazy, good_length, nice_length, and max_chain parameters. + + deflateTune() can be called after deflateInit() or deflateInit2(), and + returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + */ + +ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, + uLong sourceLen)); +/* + deflateBound() returns an upper bound on the compressed size after + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. +*/ + +ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, + unsigned *pending, + int *bits)); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + +ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + deflatePrime() inserts bits in the deflate output stream. The intent + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, + gz_headerp head)); +/* + deflateSetHeader() provides gzip header information for when a gzip + stream is requested by deflateInit2(). deflateSetHeader() may be called + after deflateInit2() or deflateReset() and before the first call of + deflate(). The text, time, os, extra field, name, and comment information + in the provided gz_header structure are written to the gzip header (xflag is + ignored -- the extra flags are set according to the compression level). The + caller must assure that, if not Z_NULL, name and comment are terminated with + a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + available there. If hcrc is true, a gzip header crc is included. Note that + the current versions of the command-line version of gzip (up through version + 1.3.x) do not support header crc's, and will report that it is a "multi-part + gzip file" and give up. + + If deflateSetHeader is not used, the default gzip header has text false, + the time set to zero, and os set to 255, with no extra, name, or comment + fields. The gzip header is returned to the default state by deflateReset(). + + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, + int windowBits)); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an adler32 or a crc32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + crc32 instead of an adler32. + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. +*/ + +ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the adler32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect adler32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). +*/ + +ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); +/* + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current current value of + total_in which indicates where valid compressed data was found. In the + error case, the application may repeatedly call inflateSync, providing more + input each time, until success or end of the input data. +*/ + +ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when randomly accessing a large stream. The + first pass through the stream can periodically record the inflate state, + allowing restarting inflate at those points when randomly accessing the + stream. + + inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); +/* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate all the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, + int windowBits)); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. +*/ + +ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + This function inserts bits in the inflate input stream. The intent is + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above or -1 << 16 if the provided + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, + gz_headerp head)); +/* + inflateGetHeader() requests that gzip header information be stored in the + provided gz_header structure. inflateGetHeader() may be called after + inflateInit2() or inflateReset(), and before the first call of inflate(). + As inflate() processes the gzip stream, head->done is zero until the header + is completed, at which time head->done is set to one. If a zlib stream is + being decoded, then head->done is set to -1 to indicate that there will be + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. + + The text, time, xflags, and os fields are filled in with the gzip header + contents. hcrc is set to true if there is a header CRC. (The header CRC + was valid if done is set to one.) If extra is not Z_NULL, then extra_max + contains the maximum number of bytes to write to extra. Once done is true, + extra_len contains the actual extra field length, and extra contains the + extra field, or that field truncated if extra_max is less than extra_len. + If name is not Z_NULL, then up to name_max characters are written there, + terminated with a zero unless the length is greater than name_max. If + comment is not Z_NULL, then up to comm_max characters are written there, + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its + absence. This allows the use of deflateSetHeader() with the returned + structure to duplicate the header. However if those fields are set to + allocated memory, then the application will need to save those pointers + elsewhere so that they can be eventually freed. + + If inflateGetHeader is not used, then the header information is simply + discarded. The header is always checked for validity, including the header + CRC if present. inflateReset() will reset the process to discard the header + information. The application would need to call inflateGetHeader() again to + retrieve the header from the next gzip stream. + + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, + unsigned char FAR *window)); + + Initialize the internal stream state for decompression using inflateBack() + calls. The fields zalloc, zfree and opaque in strm must be initialized + before the call. If zalloc and zfree are Z_NULL, then the default library- + derived memory allocation routines are used. windowBits is the base two + logarithm of the window size, in the range 8..15. window is a caller + supplied buffer of that size. Except for special applications where it is + assured that deflate was used with small window sizes, windowBits must be 15 + and a 32K byte window must be supplied to be able to decompress general + deflate streams. + + See inflateBack() for the usage of these routines. + + inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. +*/ + +typedef unsigned (*in_func) OF((void FAR *, + z_const unsigned char FAR * FAR *)); +typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); + +ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); +/* + inflateBack() does a raw inflate with a single call using a call-back + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. + + inflateBackInit() must be called first to allocate the internal state + and to initialize the state with the user-provided window buffer. + inflateBack() may then be used multiple times to inflate a complete, raw + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. + + A raw deflate stream is one with no zlib or gzip header or trailer. + This routine would normally be used in a utility that reads zip or gzip + files and writes out uncompressed files. The utility would decode the + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the normal + behavior of inflate(), which expects either a zlib or gzip header and + trailer around the deflate stream. + + inflateBack() uses two subroutines supplied by the caller that are then + called by inflateBack() for input and output. inflateBack() calls those + routines until it reads a complete deflate stream and writes out all of the + uncompressed data, or until it encounters an error. The function's + parameters and return types are defined above in the in_func and out_func + typedefs. inflateBack() will call in(in_desc, &buf) which should return the + number of bytes of provided input, and a pointer to that input in buf. If + there is no input available, in() must return zero--buf is ignored in that + case--and inflateBack() will return a buffer error. inflateBack() will call + out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() + should return zero on success, or non-zero on failure. If out() returns + non-zero, inflateBack() will return with an error. Neither in() nor out() + are permitted to change the contents of the window provided to + inflateBackInit(), which is also the buffer that out() uses to write from. + The length written by out() will be at most the window size. Any non-zero + amount of input may be provided by in(). + + For convenience, inflateBack() can be provided input on the first call by + setting strm->next_in and strm->avail_in. If that input is exhausted, then + in() will be called. Therefore strm->next_in must be initialized before + calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + must also be initialized, and then if strm->avail_in is not zero, input will + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + + The in_desc and out_desc parameters of inflateBack() is passed as the + first parameter of in() and out() respectively when they are called. These + descriptors can be optionally used to pass any information that the caller- + supplied in() and out() functions need to do their job. + + On return, inflateBack() will set strm->next_in and strm->avail_in to + pass back any unused input that was provided by the last in() call. The + return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. +*/ + +ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); +/* + All memory allocated by inflateBackInit() is freed. + + inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + state was inconsistent. +*/ + +ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); +/* Return flags indicating compile-time options. + + Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + 1.0: size of uInt + 3.2: size of uLong + 5.4: size of voidpf (pointer) + 7.6: size of z_off_t + + Compiler, assembler, and debug options: + 8: DEBUG + 9: ASMV or ASMINF -- use ASM code + 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + 11: 0 (reserved) + + One-time table building (smaller code, but not thread-safe if true): + 12: BUILDFIXED -- build static block decoding tables when needed + 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + 14,15: 0 (reserved) + + Library content (indicates missing functionality): + 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + deflate code when not needed) + 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + and decode gzip streams (to avoid linking crc code) + 18-19: 0 (reserved) + + Operation variations (changes in library functionality): + 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + 21: FASTEST -- deflate algorithm with only one, lowest compression level + 22,23: 0 (reserved) + + The sprintf variant used by gzprintf (zero is best): + 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + + Remainder: + 27-31: 0 (reserved) + */ + +#ifndef Z_SOLO + + /* utility functions */ + +/* + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. +*/ + +ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed buffer. + + compress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer. +*/ + +ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen, + int level)); +/* + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed buffer. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ + +ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); +/* + compressBound() returns an upper bound on the compressed size after + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. +*/ + +ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed buffer. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. +*/ + + /* gzip file access functions */ + +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ + +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ + +/* +ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); + + Opens a gzip (.gz) file for reading or writing. The mode parameter is as + in fopen ("rb" or "wb") but can also include a compression level ("wb9") or + a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only + compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' + for fixed code compression as in "wb9F". (See the description of + deflateInit2 for more information about the strategy parameter.) 'T' will + request transparent writing or appending with no compression and not using + the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. + + gzopen can be used to read a file which is not in gzip format; in this + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ + +ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); +/* + gzdopen associates a gzFile with the file descriptor fd. File descriptors + are obtained from calls like open, dup, creat, pipe or fileno (if the file + has been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); +/* + Set the internal buffer size used by this library's functions. The + default buffer size is 8192 bytes. This function must be called after + gzopen() or gzdopen(), and before any other calls that read or write the + file. The buffer memory allocation is always deferred to the first read or + write. Two buffers are allocated, either both of the specified size when + writing, or one of the specified size and the other twice that size when + reading. A larger buffer size of, for example, 64K or 128K bytes will + noticeably increase the speed of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. +*/ + +ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); +/* + Dynamically update the compression level or strategy. See the description + of deflateInit2 for the meaning of these parameters. + + gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not + opened for writing. +*/ + +ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); +/* + Reads the given number of uncompressed bytes from the compressed file. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. +*/ + +ZEXTERN int ZEXPORT gzwrite OF((gzFile file, + voidpc buf, unsigned len)); +/* + Writes the given number of uncompressed bytes into the compressed file. + gzwrite returns the number of uncompressed bytes written or 0 in case of + error. +*/ + +ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); +/* + Converts, formats, and writes the arguments to the compressed file under + control of the format string, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or 0 in case of error. The number of + uncompressed bytes written is limited to 8191, or one less than the buffer + size given to gzbuffer(). The caller should assure that this limit is not + exceeded. If it is exceeded, then gzprintf() will return an error (0) with + nothing written. In this case, there may also be a buffer overflow with + unpredictable consequences, which is possible only if zlib was compiled with + the insecure functions sprintf() or vsprintf() because the secure snprintf() + or vsnprintf() functions were not available. This can be determined using + zlibCompileFlags(). +*/ + +ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); +/* + Writes the given null-terminated string to the compressed file, excluding + the terminating null character. + + gzputs returns the number of characters written, or -1 in case of error. +*/ + +ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); +/* + Reads bytes from the compressed file until len-1 characters are read, or a + newline character is read and transferred to buf, or an end-of-file + condition is encountered. If any characters are read or if len == 1, the + string is terminated with a null character. If no characters are read due + to an end-of-file or len < 1, then the buffer is left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. +*/ + +ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); +/* + Writes c, converted to an unsigned char, into the compressed file. gzputc + returns the value that was written, or -1 in case of error. +*/ + +ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); +/* + Reads one byte from the compressed file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. +*/ + +ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); +/* + Push one character back onto the stream to be read as the first character + on the next read. At least one character of push-back is allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). +*/ + +ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); +/* + Flushes all pending output into the compressed file. The parameter flush + is as in the deflate() function. The return value is the zlib error number + (see function gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatented gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, + z_off_t offset, int whence)); + + Sets the starting position for the next gzread or gzwrite on the given + compressed file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); + the value SEEK_END is not supported. + + If the file is opened for reading, this function is emulated but can be + extremely slow. If the file is opened for writing, only forward seeks are + supported; gzseek then compresses a sequence of zeroes up to the new + starting position. + + gzseek returns the resulting offset location as measured in bytes from + the beginning of the uncompressed stream, or -1 in case of error, in + particular if the file is opened for writing and the new starting position + would be before the current position. +*/ + +ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); +/* + Rewinds the given file. This function is supported only for reading. + + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); + + Returns the starting position for the next gzread or gzwrite on the given + compressed file. This position represents a number of bytes in the + uncompressed data stream, and is zero when starting, even if appending or + reading a gzip stream from the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); + + Returns the current offset in the file being read or written. This offset + includes the count of bytes that precede the gzip stream, for example when + appending or when using gzdopen() for reading. When reading, the offset + does not include as yet unused buffered input. This information can be used + for a progress indicator. On error, gzoffset() returns -1. +*/ + +ZEXTERN int ZEXPORT gzeof OF((gzFile file)); +/* + Returns true (1) if the end-of-file indicator has been set while reading, + false (0) otherwise. Note that the end-of-file indicator is set only if the + read tried to go past the end of the input, but came up short. Therefore, + just like feof(), gzeof() may return false even if there is no more data to + read, in the event that the last read request was for the exact number of + bytes remaining in the input file. This will happen if the input file size + is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. +*/ + +ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); +/* + Returns true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) +*/ + +ZEXTERN int ZEXPORT gzclose OF((gzFile file)); +/* + Flushes all pending output if necessary, closes the compressed file and + deallocates the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. +*/ + +ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); +/* + Returns the error message for the last error which occurred on the given + compressed file. errnum is set to zlib error number. If an error occurred + in the file system and not in the compression library, errnum is set to + Z_ERRNO and the application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. +*/ + +ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); +/* + Clears the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip + file that is being written concurrently. +*/ + +#endif /* !Z_SOLO */ + + /* checksum functions */ + +/* + These functions are not related to compression but are exported + anyway because they might be useful in applications using the compression + library. +*/ + +ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); +/* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. If buf is Z_NULL, this function returns the + required initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC32 but can be computed + much faster. + + Usage example: + + uLong adler = adler32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); +*/ + +/* +ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, + z_off_t len2)); + + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. +*/ + +ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); +/* + Update a running CRC-32 with the bytes buf[0..len-1] and return the + updated CRC-32. If buf is Z_NULL, this function returns the required + initial value for the crc. Pre- and post-conditioning (one's complement) is + performed within this function so it shouldn't be done by the application. + + Usage example: + + uLong crc = crc32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + crc = crc32(crc, buffer, length); + } + if (crc != original_crc) error(); +*/ + +/* +ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); + + Combine two CRC-32 check values into one. For two sequences of bytes, + seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + len2. +*/ + + + /* various hacks, don't look :) */ + +/* deflateInit and inflateInit are macros to allow checking the zlib version + * and the compiler's view of z_stream: + */ +ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, + int windowBits, int memLevel, + int strategy, const char *version, + int stream_size)); +ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, + unsigned char FAR *window, + const char *version, + int stream_size)); +#define deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +#define inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) + +#ifndef Z_SOLO + +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#endif + +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); +#endif + +#else /* Z_SOLO */ + + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); + +#endif /* !Z_SOLO */ + +/* hack for buggy compilers */ +#if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) + struct internal_state {int dummy;}; +#endif + +/* undocumented functions */ +ZEXTERN const char * ZEXPORT zError OF((int)); +ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); +ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); +ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); +#if defined(_WIN32) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, + const char *mode)); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, + const char *format, + va_list va)); +# endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ZLIB_H */ diff --git a/tests/scancode/data/resource/samples/zlib/zutil.c b/tests/scancode/data/resource/samples/zlib/zutil.c new file mode 100644 index 00000000000..23d2ebef008 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/zutil.c @@ -0,0 +1,324 @@ +/* zutil.c -- target dependent utility functions for the compression library + * Copyright (C) 1995-2005, 2010, 2011, 2012 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zutil.h" +#ifndef Z_SOLO +# include "gzguts.h" +#endif + +#ifndef NO_DUMMY_DECL +struct internal_state {int dummy;}; /* for buggy compilers */ +#endif + +z_const char * const z_errmsg[10] = { +"need dictionary", /* Z_NEED_DICT 2 */ +"stream end", /* Z_STREAM_END 1 */ +"", /* Z_OK 0 */ +"file error", /* Z_ERRNO (-1) */ +"stream error", /* Z_STREAM_ERROR (-2) */ +"data error", /* Z_DATA_ERROR (-3) */ +"insufficient memory", /* Z_MEM_ERROR (-4) */ +"buffer error", /* Z_BUF_ERROR (-5) */ +"incompatible version",/* Z_VERSION_ERROR (-6) */ +""}; + + +const char * ZEXPORT zlibVersion() +{ + return ZLIB_VERSION; +} + +uLong ZEXPORT zlibCompileFlags() +{ + uLong flags; + + flags = 0; + switch ((int)(sizeof(uInt))) { + case 2: break; + case 4: flags += 1; break; + case 8: flags += 2; break; + default: flags += 3; + } + switch ((int)(sizeof(uLong))) { + case 2: break; + case 4: flags += 1 << 2; break; + case 8: flags += 2 << 2; break; + default: flags += 3 << 2; + } + switch ((int)(sizeof(voidpf))) { + case 2: break; + case 4: flags += 1 << 4; break; + case 8: flags += 2 << 4; break; + default: flags += 3 << 4; + } + switch ((int)(sizeof(z_off_t))) { + case 2: break; + case 4: flags += 1 << 6; break; + case 8: flags += 2 << 6; break; + default: flags += 3 << 6; + } +#ifdef DEBUG + flags += 1 << 8; +#endif +#if defined(ASMV) || defined(ASMINF) + flags += 1 << 9; +#endif +#ifdef ZLIB_WINAPI + flags += 1 << 10; +#endif +#ifdef BUILDFIXED + flags += 1 << 12; +#endif +#ifdef DYNAMIC_CRC_TABLE + flags += 1 << 13; +#endif +#ifdef NO_GZCOMPRESS + flags += 1L << 16; +#endif +#ifdef NO_GZIP + flags += 1L << 17; +#endif +#ifdef PKZIP_BUG_WORKAROUND + flags += 1L << 20; +#endif +#ifdef FASTEST + flags += 1L << 21; +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifdef NO_vsnprintf + flags += 1L << 25; +# ifdef HAS_vsprintf_void + flags += 1L << 26; +# endif +# else +# ifdef HAS_vsnprintf_void + flags += 1L << 26; +# endif +# endif +#else + flags += 1L << 24; +# ifdef NO_snprintf + flags += 1L << 25; +# ifdef HAS_sprintf_void + flags += 1L << 26; +# endif +# else +# ifdef HAS_snprintf_void + flags += 1L << 26; +# endif +# endif +#endif + return flags; +} + +#ifdef DEBUG + +# ifndef verbose +# define verbose 0 +# endif +int ZLIB_INTERNAL z_verbose = verbose; + +void ZLIB_INTERNAL z_error (m) + char *m; +{ + fprintf(stderr, "%s\n", m); + exit(1); +} +#endif + +/* exported to allow conversion of error code to string for compress() and + * uncompress() + */ +const char * ZEXPORT zError(err) + int err; +{ + return ERR_MSG(err); +} + +#if defined(_WIN32_WCE) + /* The Microsoft C Run-Time Library for Windows CE doesn't have + * errno. We define it as a global variable to simplify porting. + * Its value is always 0 and should not be used. + */ + int errno = 0; +#endif + +#ifndef HAVE_MEMCPY + +void ZLIB_INTERNAL zmemcpy(dest, source, len) + Bytef* dest; + const Bytef* source; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = *source++; /* ??? to be unrolled */ + } while (--len != 0); +} + +int ZLIB_INTERNAL zmemcmp(s1, s2, len) + const Bytef* s1; + const Bytef* s2; + uInt len; +{ + uInt j; + + for (j = 0; j < len; j++) { + if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; + } + return 0; +} + +void ZLIB_INTERNAL zmemzero(dest, len) + Bytef* dest; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = 0; /* ??? to be unrolled */ + } while (--len != 0); +} +#endif + +#ifndef Z_SOLO + +#ifdef SYS16BIT + +#ifdef __TURBOC__ +/* Turbo C in 16-bit mode */ + +# define MY_ZCALLOC + +/* Turbo C malloc() does not allow dynamic allocation of 64K bytes + * and farmalloc(64K) returns a pointer with an offset of 8, so we + * must fix the pointer. Warning: the pointer must be put back to its + * original form in order to free it, use zcfree(). + */ + +#define MAX_PTR 10 +/* 10*64K = 640K */ + +local int next_ptr = 0; + +typedef struct ptr_table_s { + voidpf org_ptr; + voidpf new_ptr; +} ptr_table; + +local ptr_table table[MAX_PTR]; +/* This table is used to remember the original form of pointers + * to large buffers (64K). Such pointers are normalized with a zero offset. + * Since MSDOS is not a preemptive multitasking OS, this table is not + * protected from concurrent access. This hack doesn't work anyway on + * a protected system like OS/2. Use Microsoft C instead. + */ + +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) +{ + voidpf buf = opaque; /* just to make some compilers happy */ + ulg bsize = (ulg)items*size; + + /* If we allocate less than 65520 bytes, we assume that farmalloc + * will return a usable pointer which doesn't have to be normalized. + */ + if (bsize < 65520L) { + buf = farmalloc(bsize); + if (*(ush*)&buf != 0) return buf; + } else { + buf = farmalloc(bsize + 16L); + } + if (buf == NULL || next_ptr >= MAX_PTR) return NULL; + table[next_ptr].org_ptr = buf; + + /* Normalize the pointer to seg:0 */ + *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; + *(ush*)&buf = 0; + table[next_ptr++].new_ptr = buf; + return buf; +} + +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) +{ + int n; + if (*(ush*)&ptr != 0) { /* object < 64K */ + farfree(ptr); + return; + } + /* Find the original pointer */ + for (n = 0; n < next_ptr; n++) { + if (ptr != table[n].new_ptr) continue; + + farfree(table[n].org_ptr); + while (++n < next_ptr) { + table[n-1] = table[n]; + } + next_ptr--; + return; + } + ptr = opaque; /* just to make some compilers happy */ + Assert(0, "zcfree: ptr not found"); +} + +#endif /* __TURBOC__ */ + + +#ifdef M_I86 +/* Microsoft C in 16-bit mode */ + +# define MY_ZCALLOC + +#if (!defined(_MSC_VER) || (_MSC_VER <= 600)) +# define _halloc halloc +# define _hfree hfree +#endif + +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) +{ + if (opaque) opaque = 0; /* to make compiler happy */ + return _halloc((long)items, size); +} + +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) +{ + if (opaque) opaque = 0; /* to make compiler happy */ + _hfree(ptr); +} + +#endif /* M_I86 */ + +#endif /* SYS16BIT */ + + +#ifndef MY_ZCALLOC /* Any system without a special alloc function */ + +#ifndef STDC +extern voidp malloc OF((uInt size)); +extern voidp calloc OF((uInt items, uInt size)); +extern void free OF((voidpf ptr)); +#endif + +voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) + voidpf opaque; + unsigned items; + unsigned size; +{ + if (opaque) items += size - size; /* make compiler happy */ + return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : + (voidpf)calloc(items, size); +} + +void ZLIB_INTERNAL zcfree (opaque, ptr) + voidpf opaque; + voidpf ptr; +{ + free(ptr); + if (opaque) return; /* make compiler happy */ +} + +#endif /* MY_ZCALLOC */ + +#endif /* !Z_SOLO */ diff --git a/tests/scancode/data/resource/samples/zlib/zutil.h b/tests/scancode/data/resource/samples/zlib/zutil.h new file mode 100644 index 00000000000..24ab06b1cf6 --- /dev/null +++ b/tests/scancode/data/resource/samples/zlib/zutil.h @@ -0,0 +1,253 @@ +/* zutil.h -- internal interface and configuration of the compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id$ */ + +#ifndef ZUTIL_H +#define ZUTIL_H + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include "zlib.h" + +#if defined(STDC) && !defined(Z_SOLO) +# if !(defined(_WIN32_WCE) && defined(_MSC_VER)) +# include +# endif +# include +# include +#endif + +#ifdef Z_SOLO + typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */ +#endif + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +typedef unsigned char uch; +typedef uch FAR uchf; +typedef unsigned short ush; +typedef ush FAR ushf; +typedef unsigned long ulg; + +extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ +/* (size given to avoid silly warnings with Visual C++) */ + +#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] + +#define ERR_RETURN(strm,err) \ + return (strm->msg = ERR_MSG(err), (err)) +/* To be used only when the state is known to be valid */ + + /* common constants */ + +#ifndef DEF_WBITS +# define DEF_WBITS MAX_WBITS +#endif +/* default windowBits for decompression. MAX_WBITS is for compression only */ + +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif +/* default memLevel */ + +#define STORED_BLOCK 0 +#define STATIC_TREES 1 +#define DYN_TREES 2 +/* The three kinds of block type */ + +#define MIN_MATCH 3 +#define MAX_MATCH 258 +/* The minimum and maximum match lengths */ + +#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ + + /* target dependencies */ + +#if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) +# define OS_CODE 0x00 +# ifndef Z_SOLO +# if defined(__TURBOC__) || defined(__BORLANDC__) +# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) + /* Allow compilation with ANSI keywords only enabled */ + void _Cdecl farfree( void *block ); + void *_Cdecl farmalloc( unsigned long nbytes ); +# else +# include +# endif +# else /* MSC or DJGPP */ +# include +# endif +# endif +#endif + +#ifdef AMIGA +# define OS_CODE 0x01 +#endif + +#if defined(VAXC) || defined(VMS) +# define OS_CODE 0x02 +# define F_OPEN(name, mode) \ + fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") +#endif + +#if defined(ATARI) || defined(atarist) +# define OS_CODE 0x05 +#endif + +#ifdef OS2 +# define OS_CODE 0x06 +# if defined(M_I86) && !defined(Z_SOLO) +# include +# endif +#endif + +#if defined(MACOS) || defined(TARGET_OS_MAC) +# define OS_CODE 0x07 +# ifndef Z_SOLO +# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fdopen */ +# else +# ifndef fdopen +# define fdopen(fd,mode) NULL /* No fdopen() */ +# endif +# endif +# endif +#endif + +#ifdef TOPS20 +# define OS_CODE 0x0a +#endif + +#ifdef WIN32 +# ifndef __CYGWIN__ /* Cygwin is Unix, not Win32 */ +# define OS_CODE 0x0b +# endif +#endif + +#ifdef __50SERIES /* Prime/PRIMOS */ +# define OS_CODE 0x0f +#endif + +#if defined(_BEOS_) || defined(RISCOS) +# define fdopen(fd,mode) NULL /* No fdopen() */ +#endif + +#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX +# if defined(_WIN32_WCE) +# define fdopen(fd,mode) NULL /* No fdopen() */ +# ifndef _PTRDIFF_T_DEFINED + typedef int ptrdiff_t; +# define _PTRDIFF_T_DEFINED +# endif +# else +# define fdopen(fd,type) _fdopen(fd,type) +# endif +#endif + +#if defined(__BORLANDC__) && !defined(MSDOS) + #pragma warn -8004 + #pragma warn -8008 + #pragma warn -8066 +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_WIN32) && \ + (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +#endif + + /* common defaults */ + +#ifndef OS_CODE +# define OS_CODE 0x03 /* assume Unix */ +#endif + +#ifndef F_OPEN +# define F_OPEN(name, mode) fopen((name), (mode)) +#endif + + /* functions */ + +#if defined(pyr) || defined(Z_SOLO) +# define NO_MEMCPY +#endif +#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) + /* Use our own functions for small and medium model with MSC <= 5.0. + * You may have to use the same strategy for Borland C (untested). + * The __SC__ check is for Symantec. + */ +# define NO_MEMCPY +#endif +#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) +# define HAVE_MEMCPY +#endif +#ifdef HAVE_MEMCPY +# ifdef SMALL_MEDIUM /* MSDOS small or medium model */ +# define zmemcpy _fmemcpy +# define zmemcmp _fmemcmp +# define zmemzero(dest, len) _fmemset(dest, 0, len) +# else +# define zmemcpy memcpy +# define zmemcmp memcmp +# define zmemzero(dest, len) memset(dest, 0, len) +# endif +#else + void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); + int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); + void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); +#endif + +/* Diagnostic functions */ +#ifdef DEBUG +# include + extern int ZLIB_INTERNAL z_verbose; + extern void ZLIB_INTERNAL z_error OF((char *m)); +# define Assert(cond,msg) {if(!(cond)) z_error(msg);} +# define Trace(x) {if (z_verbose>=0) fprintf x ;} +# define Tracev(x) {if (z_verbose>0) fprintf x ;} +# define Tracevv(x) {if (z_verbose>1) fprintf x ;} +# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;} +# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +#ifndef Z_SOLO + voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, + unsigned size)); + void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); +#endif + +#define ZALLOC(strm, items, size) \ + (*((strm)->zalloc))((strm)->opaque, (items), (size)) +#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) +#define TRY_FREE(s, p) {if (p) ZFREE(s, p);} + +/* Reverse the bytes in a 32-bit value */ +#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ + (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) + +#endif /* ZUTIL_H */ diff --git a/tests/scancode/data/rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm.expected.json b/tests/scancode/data/rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm.expected.json index 1af4296df8d..30c2f68cb21 100644 --- a/tests/scancode/data/rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm.expected.json +++ b/tests/scancode/data/rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm.expected.json @@ -1,15 +1,14 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { - "--package": true, - "--license-score": 0, - "--format": "json" + "input": "", + "--json": "", + "--package": true }, "files_count": 1, "files": [ { - "path": "rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm", - "scan_errors": [], + "path": "fping-2.4-0.b2.rhfc1.dag.i386.rpm", "packages": [ { "type": "RPM", @@ -83,7 +82,8 @@ } ] } - ] + ], + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/single/iproute.c b/tests/scancode/data/single/iproute.c new file mode 100644 index 00000000000..5936d16e935 --- /dev/null +++ b/tests/scancode/data/single/iproute.c @@ -0,0 +1,12 @@ +/* +# Copyright (c) 2010 Patrick McHardy All rights reserved. + + * iplink_vlan.c VLAN device support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Patrick McHardy + */ \ No newline at end of file diff --git a/tests/scancode/data/single/iproute.expected.json b/tests/scancode/data/single/iproute.expected.json new file mode 100644 index 00000000000..2f9a4e33c56 --- /dev/null +++ b/tests/scancode/data/single/iproute.expected.json @@ -0,0 +1,35 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--info": true, + "--json": "", + "--strip-root": true + }, + "files_count": 1, + "files": [ + { + "path": "iproute.c", + "type": "file", + "name": "iproute.c", + "base_name": "iproute", + "extension": ".c", + "size": 469, + "sha1": "f0f352c14a8d0b0510cbbeae056542ae7f252151", + "md5": "b8e7112a6e82921687fd1e008e72058f", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": "C", + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": true, + "is_script": false, + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/timing/basic.tgz b/tests/scancode/data/timing/basic.tgz new file mode 100644 index 00000000000..e9a24f937b4 Binary files /dev/null and b/tests/scancode/data/timing/basic.tgz differ diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-linux.json b/tests/scancode/data/unicodepath/unicodepath.expected-linux.json index c61d80baee1..b1b13ee36c3 100644 --- a/tests/scancode/data/unicodepath/unicodepath.expected-linux.json +++ b/tests/scancode/data/unicodepath/unicodepath.expected-linux.json @@ -1,17 +1,17 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, - "--package": true, "--email": true, - "--url": true, "--info": true, - "--license-score": 0, + "--json": "", + "--license": true, + "--package": true, "--strip-root": true, - "--format": "json" + "--url": true }, - "files_count": 4, + "files_count": 3, "files": [ { "path": "unicodepath", @@ -19,10 +19,9 @@ "name": "unicodepath", "base_name": "unicodepath", "extension": "", - "size": 20, + "size": 0, "sha1": null, "md5": null, - "files_count": 3, "mime_type": null, "file_type": null, "programming_language": null, @@ -32,12 +31,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] }, { "path": "unicodepath/Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", @@ -48,7 +50,6 @@ "size": 2, "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text", "programming_language": null, @@ -58,12 +59,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "unicodepath/\u03e8\u1f40\u2328", @@ -74,7 +78,6 @@ "size": 9, "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", "md5": "552e21cd4cd9918678e3c1a0df491bc3", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with no line terminators", "programming_language": null, @@ -84,12 +87,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "unicodepath/\u03e8\u1f40\u2328a", @@ -100,7 +106,6 @@ "size": 9, "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", "md5": "552e21cd4cd9918678e3c1a0df491bc3", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with no line terminators", "programming_language": null, @@ -110,12 +115,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-linux.json--quiet b/tests/scancode/data/unicodepath/unicodepath.expected-linux.json--quiet new file mode 100644 index 00000000000..6bd0f547d37 --- /dev/null +++ b/tests/scancode/data/unicodepath/unicodepath.expected-linux.json--quiet @@ -0,0 +1,130 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--copyright": true, + "--email": true, + "--info": true, + "--json": "", + "--license": true, + "--package": true, + "--quiet": true, + "--strip-root": true, + "--url": true + }, + "files_count": 3, + "files": [ + { + "path": "unicodepath", + "type": "directory", + "name": "unicodepath", + "base_name": "unicodepath", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] + }, + { + "path": "unicodepath/Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "type": "file", + "name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "base_name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma", + "extension": ".pdf", + "size": 2, + "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328", + "type": "file", + "name": "\u03e8\u1f40\u2328", + "base_name": "\u03e8\u1f40\u2328", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328a", + "type": "file", + "name": "\u03e8\u1f40\u2328a", + "base_name": "\u03e8\u1f40\u2328a", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-linux.json--verbose b/tests/scancode/data/unicodepath/unicodepath.expected-linux.json--verbose new file mode 100644 index 00000000000..02f1c4763ce --- /dev/null +++ b/tests/scancode/data/unicodepath/unicodepath.expected-linux.json--verbose @@ -0,0 +1,130 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--copyright": true, + "--email": true, + "--info": true, + "--json": "", + "--license": true, + "--package": true, + "--strip-root": true, + "--url": true, + "--verbose": true + }, + "files_count": 3, + "files": [ + { + "path": "unicodepath", + "type": "directory", + "name": "unicodepath", + "base_name": "unicodepath", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] + }, + { + "path": "unicodepath/Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "type": "file", + "name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "base_name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma", + "extension": ".pdf", + "size": 2, + "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328", + "type": "file", + "name": "\u03e8\u1f40\u2328", + "base_name": "\u03e8\u1f40\u2328", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328a", + "type": "file", + "name": "\u03e8\u1f40\u2328a", + "base_name": "\u03e8\u1f40\u2328a", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-mac.json b/tests/scancode/data/unicodepath/unicodepath.expected-mac.json index 2148ad88e03..56e70c6d551 100644 --- a/tests/scancode/data/unicodepath/unicodepath.expected-mac.json +++ b/tests/scancode/data/unicodepath/unicodepath.expected-mac.json @@ -1,17 +1,17 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, - "--package": true, "--email": true, - "--url": true, "--info": true, - "--license-score": 0, + "--json": "", + "--license": true, + "--package": true, "--strip-root": true, - "--format": "json" + "--url": true }, - "files_count": 4, + "files_count": 3, "files": [ { "path": "unicodepath", @@ -19,11 +19,9 @@ "name": "unicodepath", "base_name": "unicodepath", "extension": "", - "date": null, - "size": 20, + "size": 0, "sha1": null, "md5": null, - "files_count": 3, "mime_type": null, "file_type": null, "programming_language": null, @@ -33,12 +31,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] }, { "path": "unicodepath/\u03e8\u03bf\u0313\u2328", @@ -50,7 +51,6 @@ "size": 9, "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", "md5": "552e21cd4cd9918678e3c1a0df491bc3", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with no line terminators", "programming_language": null, @@ -60,12 +60,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "unicodepath/\u03e8\u03bf\u0313\u2328a", @@ -77,7 +80,6 @@ "size": 9, "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", "md5": "552e21cd4cd9918678e3c1a0df491bc3", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with no line terminators", "programming_language": null, @@ -87,12 +89,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "unicodepath/Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma.pdf", @@ -104,7 +109,6 @@ "size": 2, "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text", "programming_language": null, @@ -114,12 +118,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-mac.json--quiet b/tests/scancode/data/unicodepath/unicodepath.expected-mac.json--quiet new file mode 100644 index 00000000000..2675c516b33 --- /dev/null +++ b/tests/scancode/data/unicodepath/unicodepath.expected-mac.json--quiet @@ -0,0 +1,133 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--copyright": true, + "--email": true, + "--info": true, + "--json": "", + "--license": true, + "--package": true, + "--quiet": true, + "--strip-root": true, + "--url": true + }, + "files_count": 3, + "files": [ + { + "path": "unicodepath", + "type": "directory", + "name": "unicodepath", + "base_name": "unicodepath", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u03bf\u0313\u2328", + "type": "file", + "name": "\u03e8\u03bf\u0313\u2328", + "base_name": "\u03e8\u03bf\u0313\u2328", + "extension": "", + "date": "2016-12-05", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u03bf\u0313\u2328a", + "type": "file", + "name": "\u03e8\u03bf\u0313\u2328a", + "base_name": "\u03e8\u03bf\u0313\u2328a", + "extension": "", + "date": "2016-12-05", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma.pdf", + "type": "file", + "name": "Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma.pdf", + "base_name": "Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma", + "extension": ".pdf", + "date": "2016-12-05", + "size": 2, + "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-mac.json--verbose b/tests/scancode/data/unicodepath/unicodepath.expected-mac.json--verbose new file mode 100644 index 00000000000..872d3f42f8a --- /dev/null +++ b/tests/scancode/data/unicodepath/unicodepath.expected-mac.json--verbose @@ -0,0 +1,133 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--copyright": true, + "--email": true, + "--info": true, + "--json": "", + "--license": true, + "--package": true, + "--strip-root": true, + "--url": true, + "--verbose": true + }, + "files_count": 3, + "files": [ + { + "path": "unicodepath", + "type": "directory", + "name": "unicodepath", + "base_name": "unicodepath", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u03bf\u0313\u2328", + "type": "file", + "name": "\u03e8\u03bf\u0313\u2328", + "base_name": "\u03e8\u03bf\u0313\u2328", + "extension": "", + "date": "2016-12-05", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u03bf\u0313\u2328a", + "type": "file", + "name": "\u03e8\u03bf\u0313\u2328a", + "base_name": "\u03e8\u03bf\u0313\u2328a", + "extension": "", + "date": "2016-12-05", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma.pdf", + "type": "file", + "name": "Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma.pdf", + "base_name": "Izgradnja sufiksnog polja koris\u030ctenjem Ka\u0308rkka\u0308inen \u2013 Sandersovog algoritma", + "extension": ".pdf", + "date": "2016-12-05", + "size": 2, + "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-win.json b/tests/scancode/data/unicodepath/unicodepath.expected-win.json index 82ba666dcbb..b1b13ee36c3 100644 --- a/tests/scancode/data/unicodepath/unicodepath.expected-win.json +++ b/tests/scancode/data/unicodepath/unicodepath.expected-win.json @@ -1,17 +1,17 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, - "--license": true, - "--package": true, "--email": true, - "--url": true, "--info": true, - "--license-score": 0, + "--json": "", + "--license": true, + "--package": true, "--strip-root": true, - "--format": "json" + "--url": true }, - "files_count": 4, + "files_count": 3, "files": [ { "path": "unicodepath", @@ -19,10 +19,9 @@ "name": "unicodepath", "base_name": "unicodepath", "extension": "", - "size": 20, + "size": 0, "sha1": null, "md5": null, - "files_count": 3, "mime_type": null, "file_type": null, "programming_language": null, @@ -32,12 +31,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] }, { "path": "unicodepath/Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", @@ -48,7 +50,6 @@ "size": 2, "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text", "programming_language": null, @@ -58,12 +59,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "unicodepath/\u03e8\u1f40\u2328", @@ -74,7 +78,6 @@ "size": 9, "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", "md5": "552e21cd4cd9918678e3c1a0df491bc3", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with no line terminators", "programming_language": null, @@ -84,12 +87,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "unicodepath/\u03e8\u1f40\u2328a", @@ -100,7 +106,6 @@ "size": 9, "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", "md5": "552e21cd4cd9918678e3c1a0df491bc3", - "files_count": null, "mime_type": "text/plain", "file_type": "ASCII text, with no line terminators", "programming_language": null, @@ -110,12 +115,15 @@ "is_media": false, "is_source": false, "is_script": false, - "scan_errors": [], "licenses": [], "copyrights": [], "packages": [], "emails": [], - "urls": [] + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] -} +} \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-win.json--quiet b/tests/scancode/data/unicodepath/unicodepath.expected-win.json--quiet new file mode 100644 index 00000000000..6bd0f547d37 --- /dev/null +++ b/tests/scancode/data/unicodepath/unicodepath.expected-win.json--quiet @@ -0,0 +1,130 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--copyright": true, + "--email": true, + "--info": true, + "--json": "", + "--license": true, + "--package": true, + "--quiet": true, + "--strip-root": true, + "--url": true + }, + "files_count": 3, + "files": [ + { + "path": "unicodepath", + "type": "directory", + "name": "unicodepath", + "base_name": "unicodepath", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] + }, + { + "path": "unicodepath/Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "type": "file", + "name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "base_name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma", + "extension": ".pdf", + "size": 2, + "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328", + "type": "file", + "name": "\u03e8\u1f40\u2328", + "base_name": "\u03e8\u1f40\u2328", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328a", + "type": "file", + "name": "\u03e8\u1f40\u2328a", + "base_name": "\u03e8\u1f40\u2328a", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/unicodepath/unicodepath.expected-win.json--verbose b/tests/scancode/data/unicodepath/unicodepath.expected-win.json--verbose new file mode 100644 index 00000000000..02f1c4763ce --- /dev/null +++ b/tests/scancode/data/unicodepath/unicodepath.expected-win.json--verbose @@ -0,0 +1,130 @@ +{ + "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", + "scancode_options": { + "input": "", + "--copyright": true, + "--email": true, + "--info": true, + "--json": "", + "--license": true, + "--package": true, + "--strip-root": true, + "--url": true, + "--verbose": true + }, + "files_count": 3, + "files": [ + { + "path": "unicodepath", + "type": "directory", + "name": "unicodepath", + "base_name": "unicodepath", + "extension": "", + "size": 0, + "sha1": null, + "md5": null, + "mime_type": null, + "file_type": null, + "programming_language": null, + "is_binary": false, + "is_text": false, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 3, + "dirs_count": 0, + "size_count": 20, + "scan_errors": [] + }, + { + "path": "unicodepath/Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "type": "file", + "name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma.pdf", + "base_name": "Izgradnja sufiksnog polja kori\u0161tenjem K\u00e4rkk\u00e4inen \u2013 Sandersovog algoritma", + "extension": ".pdf", + "size": 2, + "sha1": "71853c6197a6a7f222db0f1978c7cb232b87c5ee", + "md5": "e1c06d85ae7b8b032bef47e42e4c08f9", + "mime_type": "text/plain", + "file_type": "ASCII text", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328", + "type": "file", + "name": "\u03e8\u1f40\u2328", + "base_name": "\u03e8\u1f40\u2328", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + }, + { + "path": "unicodepath/\u03e8\u1f40\u2328a", + "type": "file", + "name": "\u03e8\u1f40\u2328a", + "base_name": "\u03e8\u1f40\u2328a", + "extension": "", + "size": 9, + "sha1": "37aa63c77398d954473262e1a0057c1e632eda77", + "md5": "552e21cd4cd9918678e3c1a0df491bc3", + "mime_type": "text/plain", + "file_type": "ASCII text, with no line terminators", + "programming_language": null, + "is_binary": false, + "is_text": true, + "is_archive": false, + "is_media": false, + "is_source": false, + "is_script": false, + "licenses": [], + "copyrights": [], + "packages": [], + "emails": [], + "urls": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] + } + ] +} \ No newline at end of file diff --git a/tests/scancode/data/weird_file_name/expected-linux.json b/tests/scancode/data/weird_file_name/expected-linux.json index 25225a75c16..a799c2d6229 100644 --- a/tests/scancode/data/weird_file_name/expected-linux.json +++ b/tests/scancode/data/weird_file_name/expected-linux.json @@ -1,11 +1,11 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, "files_count": 5, "files": [ @@ -15,11 +15,10 @@ "name": "some 'file", "base_name": "some 'file", "extension": "", - "date": "2016-12-21", "size": 20, + "date": "2016-12-21", "sha1": "715037088f2582f3fbb7e9492f819987f713a332", "md5": "62c4cdf80d860c09f215ffff0a9ed020", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -29,8 +28,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some /file", @@ -38,11 +40,10 @@ "name": "some \\file", "base_name": "some \\file", "extension": "", - "date": "2016-12-21", "size": 21, + "date": "2016-12-21", "sha1": "73e029b07257966106d79d35271bf400e3543cea", "md5": "e99c06d03836700154f01778ac782d50", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -52,8 +53,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some file", @@ -61,11 +65,10 @@ "name": "some file", "base_name": "some file", "extension": "", - "date": "2016-12-21", "size": 38, + "date": "2016-12-21", "sha1": "5fbba80b758b93a311369979d8a68f22c4817d37", "md5": "41ac81497162f2ff48a0442847238ad7", - "files_count": null, "mime_type": "application/javascript", "file_type": "Node.js script, ASCII text executable", "programming_language": null, @@ -75,8 +78,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some\"file", @@ -84,11 +90,10 @@ "name": "some\"file", "base_name": "some\"file", "extension": "", - "date": "2016-12-21", "size": 39, + "date": "2016-12-21", "sha1": "b2016984d073f405f9788fbf6ae270b452ab73b0", "md5": "9153a386e70bd1713fef91121fb9cbbf", - "files_count": null, "mime_type": "application/javascript", "file_type": "Node.js script, ASCII text executable", "programming_language": null, @@ -98,8 +103,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some/\"file", @@ -107,11 +115,10 @@ "name": "some\\\"file", "base_name": "some\\\"file", "extension": "", - "date": "2016-12-21", "size": 21, + "date": "2016-12-21", "sha1": "73e029b07257966106d79d35271bf400e3543cea", "md5": "e99c06d03836700154f01778ac782d50", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -121,8 +128,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] } \ No newline at end of file diff --git a/tests/scancode/data/weird_file_name/expected-mac.json b/tests/scancode/data/weird_file_name/expected-mac.json index ee28dbb6552..df7d6f029df 100644 --- a/tests/scancode/data/weird_file_name/expected-mac.json +++ b/tests/scancode/data/weird_file_name/expected-mac.json @@ -1,11 +1,11 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, "files_count": 5, "files": [ @@ -15,11 +15,10 @@ "name": "some 'file", "base_name": "some 'file", "extension": "", - "date": "2016-12-21", "size": 20, + "date": "2016-12-21", "sha1": "715037088f2582f3fbb7e9492f819987f713a332", "md5": "62c4cdf80d860c09f215ffff0a9ed020", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -29,8 +28,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some /file", @@ -38,11 +40,10 @@ "name": "some \\file", "base_name": "some \\file", "extension": "", - "date": "2016-12-21", "size": 21, + "date": "2016-12-21", "sha1": "73e029b07257966106d79d35271bf400e3543cea", "md5": "e99c06d03836700154f01778ac782d50", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -52,8 +53,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some file", @@ -61,11 +65,10 @@ "name": "some file", "base_name": "some file", "extension": "", - "date": "2016-12-21", "size": 38, + "date": "2016-12-21", "sha1": "5fbba80b758b93a311369979d8a68f22c4817d37", "md5": "41ac81497162f2ff48a0442847238ad7", - "files_count": null, "mime_type": "text/plain", "file_type": "a /usr/bin/env node script, ASCII text executable", "programming_language": null, @@ -75,8 +78,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some\"file", @@ -84,11 +90,10 @@ "name": "some\"file", "base_name": "some\"file", "extension": "", - "date": "2016-12-21", "size": 39, + "date": "2016-12-21", "sha1": "b2016984d073f405f9788fbf6ae270b452ab73b0", "md5": "9153a386e70bd1713fef91121fb9cbbf", - "files_count": null, "mime_type": "text/plain", "file_type": "a /usr/bin/env node script, ASCII text executable", "programming_language": null, @@ -98,8 +103,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some/\"file", @@ -107,11 +115,10 @@ "name": "some\\\"file", "base_name": "some\\\"file", "extension": "", - "date": "2016-12-21", "size": 21, + "date": "2016-12-21", "sha1": "73e029b07257966106d79d35271bf400e3543cea", "md5": "e99c06d03836700154f01778ac782d50", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -121,8 +128,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] -} +} \ No newline at end of file diff --git a/tests/scancode/data/weird_file_name/expected-win.json b/tests/scancode/data/weird_file_name/expected-win.json index 4c28553bf3d..0de4ba42fcf 100644 --- a/tests/scancode/data/weird_file_name/expected-win.json +++ b/tests/scancode/data/weird_file_name/expected-win.json @@ -1,11 +1,11 @@ { "scancode_notice": "Generated with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\nOR CONDITIONS OF ANY KIND, either express or implied. No content created from\nScanCode should be considered or used as legal advice. Consult an Attorney\nfor any legal advice.\nScanCode is a free software code scanning tool from nexB Inc. and others.\nVisit https://github.com/nexB/scancode-toolkit/ for support and download.", "scancode_options": { + "input": "", "--copyright": true, "--info": true, - "--license-score": 0, - "--strip-root": true, - "--format": "json" + "--json": "", + "--strip-root": true }, "files_count": 5, "files": [ @@ -14,11 +14,10 @@ "type": "file", "name": "some%22file", "extension": "", - "date": "2016-12-21", "size": 39, + "date": "2016-12-21", "sha1": "b2016984d073f405f9788fbf6ae270b452ab73b0", "md5": "9153a386e70bd1713fef91121fb9cbbf", - "files_count": null, "mime_type": "application/javascript", "file_type": "Node.js script, ASCII text executable", "programming_language": null, @@ -28,19 +27,21 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some+%27file", "type": "file", "name": "some+%27file", "extension": "", - "date": "2016-12-21", "size": 20, + "date": "2016-12-21", "sha1": "715037088f2582f3fbb7e9492f819987f713a332", "md5": "62c4cdf80d860c09f215ffff0a9ed020", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -50,19 +51,21 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some+/file", "type": "file", "name": "some+%5Cfile", "extension": "", - "date": "2016-12-21", "size": 21, + "date": "2016-12-21", "sha1": "73e029b07257966106d79d35271bf400e3543cea", "md5": "e99c06d03836700154f01778ac782d50", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -72,19 +75,21 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some+file", "type": "file", "name": "some+file", "extension": "", - "date": "2016-12-21", "size": 38, + "date": "2016-12-21", "sha1": "5fbba80b758b93a311369979d8a68f22c4817d37", "md5": "41ac81497162f2ff48a0442847238ad7", - "files_count": null, "mime_type": "application/javascript", "file_type": "Node.js script, ASCII text executable", "programming_language": null, @@ -94,19 +99,21 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] }, { "path": "some/%22file", "type": "file", "name": "some%5C%22file", "extension": "", - "date": "2016-12-21", "size": 21, + "date": "2016-12-21", "sha1": "73e029b07257966106d79d35271bf400e3543cea", "md5": "e99c06d03836700154f01778ac782d50", - "files_count": null, "mime_type": "text/x-shellscript", "file_type": "POSIX shell script, ASCII text executable", "programming_language": "Bash", @@ -116,8 +123,11 @@ "is_media": false, "is_source": true, "is_script": true, - "scan_errors": [], - "copyrights": [] + "copyrights": [], + "files_count": 0, + "dirs_count": 0, + "size_count": 0, + "scan_errors": [] } ] -} +} \ No newline at end of file diff --git a/tests/scancode/test_api.py b/tests/scancode/test_api.py index 1e743154bbf..ff93cbe7478 100644 --- a/tests/scancode/test_api.py +++ b/tests/scancode/test_api.py @@ -37,9 +37,9 @@ class TestAPI(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') - def test_get_package_infos_can_pickle(self): + def test_get_package_info_can_pickle(self): test_file = self.get_test_loc('api/package/package.json') - package = api.get_package_infos(test_file) + package = api.get_package_info(test_file) import pickle import cPickle @@ -51,36 +51,42 @@ def test_get_package_infos_can_pickle(self): _pickled = pickle.dumps(package) _cpickled = cPickle.dumps(package) - def test_get_file_infos_flag_are_not_null(self): + def test_get_file_info_include_size(self): # note the test file is EMPTY on purpose to generate all False is_* flags - test_dir = self.get_test_loc('api/info') - info = api.get_file_infos(test_dir) - is_key_values = [v for k, v in info.items() if k.startswith('is_')] - assert all(v is not None for v in is_key_values) + test_dir = self.get_test_loc('api/info/test.txt') + info = api.get_file_info(test_dir) + expected = [ + (u'size', 0), + (u'sha1', None), + (u'md5', None), + (u'mime_type', u'inode/x-empty'), + (u'file_type', u'empty'), + (u'programming_language', None), + (u'is_binary', False), + (u'is_text', True), + (u'is_archive', False), + (u'is_media', False), + (u'is_source', False), + (u'is_script', False) + ] + assert expected == [(k, v) for k, v in info.items() if k != 'date'] - def test_get_package_infos_works_for_maven_dot_pom(self): + def test_get_package_info_works_for_maven_dot_pom(self): test_file = self.get_test_loc('api/package/p6spy-1.3.pom') - packages = api.get_package_infos(test_file) + packages = api.get_package_info(test_file) assert len(packages) == 1 - package = packages[0] - assert package['version'] == '1.3' + assert packages['packages'][0]['version'] == '1.3' - def test_get_package_infos_works_for_maven_pom_dot_xml(self): + def test_get_package_info_works_for_maven_pom_dot_xml(self): test_file = self.get_test_loc('api/package/pom.xml') - packages = api.get_package_infos(test_file) + packages = api.get_package_info(test_file) assert len(packages) == 1 - package = packages[0] - assert package['version'] == '1.3' - - def test_get_file_infos_include_base_name(self): - test_dir = self.get_test_loc('api/info/test.txt') - info = api.get_file_infos(test_dir) - assert 'test' == info['base_name'] + assert packages['packages'][0]['version'] == '1.3' def test_get_copyrights_include_copyrights_and_authors(self): test_file = self.get_test_loc('api/copyright/iproute.c') - cops = list(api.get_copyrights(test_file)) - expected = [ + cops = api.get_copyrights(test_file) + expected = dict(copyrights=[ OrderedDict([ (u'statements', [u'Copyright (c) 2010 Patrick McHardy']), (u'holders', [u'Patrick McHardy']), @@ -91,5 +97,5 @@ def test_get_copyrights_include_copyrights_and_authors(self): (u'holders', []), (u'authors', [u'Patrick McHardy ']), (u'start_line', 11), (u'end_line', 11)]) - ] + ]) assert expected == cops diff --git a/tests/scancode/test_cli.py b/tests/scancode/test_cli.py index 8a83f60f7e1..2ca3e213161 100644 --- a/tests/scancode/test_cli.py +++ b/tests/scancode/test_cli.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -30,44 +30,39 @@ from collections import OrderedDict import json import os -from unittest import TestCase from unittest.case import skipIf -# from click.testing import CliRunner +import click +click.disable_unicode_literals_warning = True from commoncode import fileutils -from commoncode.fileutils import path_to_bytes +from commoncode.fileutils import fsencode from commoncode.testcase import FileDrivenTesting from commoncode.system import on_linux from commoncode.system import on_mac from commoncode.system import on_windows -from scancode.cli_test_utils import _load_json_result from scancode.cli_test_utils import check_json_scan +from scancode.cli_test_utils import load_json_result from scancode.cli_test_utils import run_scan_click from scancode.cli_test_utils import run_scan_plain -from scancode import cli - - test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') - """ -Some of these CLI tests are dependent on py.test monkeypatch to ensure -we are testing the actual command outputs as if using a real command -line call. Some are using a subprocess to the same effect. +Most of these tests spawn new process as if launched from the command line. Some +of these CLI tests are dependent on py.test monkeypatch to ensure we are testing +the actual command outputs as if using a real command line call. Some are using +a plain subprocess to the same effect. """ def test_package_option_detects_packages(monkeypatch): test_dir = test_env.get_test_loc('package', copy=True) result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--package', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--package', test_dir, '--json', result_file] + run_scan_click(args, monkeypatch=monkeypatch) assert os.path.exists(result_file) result = open(result_file).read() assert 'package.json' in result @@ -76,10 +71,8 @@ def test_package_option_detects_packages(monkeypatch): def test_verbose_option_with_packages(monkeypatch): test_dir = test_env.get_test_loc('package', copy=True) result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--package', '--verbose', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--package', '--verbose', test_dir, '--json', result_file] + result = run_scan_click(args, monkeypatch=monkeypatch) assert 'package.json' in result.output assert os.path.exists(result_file) result = open(result_file).read() @@ -89,10 +82,7 @@ def test_verbose_option_with_packages(monkeypatch): def test_copyright_option_detects_copyrights(): test_dir = test_env.get_test_loc('copyright', copy=True) result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + run_scan_click(['--copyright', test_dir, '--json', result_file]) assert os.path.exists(result_file) assert len(open(result_file).read()) > 10 @@ -100,10 +90,8 @@ def test_copyright_option_detects_copyrights(): def test_verbose_option_with_copyrights(monkeypatch): test_dir = test_env.get_test_loc('copyright', copy=True) result_file = test_env.get_temp_file('json') - result = run_scan_click(['--copyright', '--verbose', test_dir, result_file], monkeypatch) - - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--copyright', '--verbose', test_dir, '--json', result_file] + result = run_scan_click(args, monkeypatch=monkeypatch) assert os.path.exists(result_file) assert 'copyright_acme_c-c.c' in result.output assert len(open(result_file).read()) > 10 @@ -112,170 +100,51 @@ def test_verbose_option_with_copyrights(monkeypatch): def test_license_option_detects_licenses(): test_dir = test_env.get_test_loc('license', copy=True) result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--license', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--license', test_dir, '--json', result_file, '--verbose'] + run_scan_click(args) assert os.path.exists(result_file) assert len(open(result_file).read()) > 10 -def test_scancode_skip_vcs_files_and_dirs_by_default(): - test_dir = test_env.extract_test_tar('ignore/vcs.tgz') - result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', '--strip-root', test_dir, result_file]) - assert result.exit_code == 0 - scan_result = _load_json_result(result_file) - # a single test.tst file and its directory that is not a VCS file should be listed - assert 2 == scan_result['files_count'] - scan_locs = [x['path'] for x in scan_result['files']] - assert [u'vcs', u'vcs/test.txt'] == scan_locs - - -def test_scancode_skip_single_file(monkeypatch): - test_dir = test_env.extract_test_tar('ignore/user.tgz') - result_file = test_env.get_temp_file('json') - - result = run_scan_click( - ['--copyright', '--strip-root', '--ignore', 'sample.doc', test_dir, result_file], - monkeypatch - ) - assert result.exit_code == 0 - scan_result = _load_json_result(result_file) - assert 6 == scan_result['files_count'] - scan_locs = [x['path'] for x in scan_result['files']] - expected = [ - 'user', - 'user/ignore.doc', - 'user/src', - 'user/src/ignore.doc', - 'user/src/test', - 'user/src/test/sample.txt' - ] - assert expected == scan_locs - - -def test_scancode_skip_multiple_files(monkeypatch): - test_dir = test_env.extract_test_tar('ignore/user.tgz') - result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', '--strip-root', '--ignore', 'ignore.doc', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 - scan_result = _load_json_result(result_file) - assert 5 == scan_result['files_count'] - scan_locs = [x['path'] for x in scan_result['files']] - assert [u'user', u'user/src', u'user/src/test', u'user/src/test/sample.doc', u'user/src/test/sample.txt'] == scan_locs - - -def test_scancode_skip_glob_files(monkeypatch): - test_dir = test_env.extract_test_tar('ignore/user.tgz') - result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', '--strip-root', '--ignore', '*.doc', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 - scan_result = _load_json_result(result_file) - assert 4 == scan_result['files_count'] - scan_locs = [x['path'] for x in scan_result['files']] - assert [u'user', u'user/src', u'user/src/test', u'user/src/test/sample.txt'] == scan_locs - - -def test_scancode_skip_glob_path(monkeypatch): - test_dir = test_env.extract_test_tar('ignore/user.tgz') - result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', '--strip-root', '--ignore', '*/src/test/*', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 - scan_result = _load_json_result(result_file) - assert 5 == scan_result['files_count'] - scan_locs = [x['path'] for x in scan_result['files']] - assert [u'user', u'user/ignore.doc', u'user/src', u'user/src/ignore.doc', u'user/src/test'] == scan_locs - -def test_scancode_multiple_ignores(monkeypatch): - test_dir = test_env.extract_test_tar('ignore/user.tgz') - result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', '--strip-root', '--ignore', '*/src/test', '--ignore', '*.doc', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 - scan_result = _load_json_result(result_file) - assert 2 == scan_result['files_count'] - scan_locs = [x['path'] for x in scan_result['files']] - assert [u'user', u'user/src'] == scan_locs - - -def test_scan_mark_source_without_info(monkeypatch): - test_dir = test_env.extract_test_tar('mark_source/JGroups.tgz') - result_file = test_env.get_temp_file('json') - expected_file = test_env.get_test_loc('mark_source/without_info.expected.json') - - _result = run_scan_click(['--mark-source', test_dir, result_file], monkeypatch) - check_json_scan(expected_file, result_file, regen=False) - - -def test_scan_mark_source_with_info(monkeypatch): - test_dir = test_env.extract_test_tar('mark_source/JGroups.tgz') - result_file = test_env.get_temp_file('json') - expected_file = test_env.get_test_loc('mark_source/with_info.expected.json') - - _result = run_scan_click(['--info', '--mark-source', test_dir, result_file], monkeypatch) - check_json_scan(expected_file, result_file) - - -def test_scan_only_findings(monkeypatch): - test_dir = test_env.extract_test_tar('info/basic.tgz') - result_file = test_env.get_temp_file('json') - expected_file = test_env.get_test_loc('only_findings/expected.json') - - _result = run_scan_click(['--only-findings', test_dir, result_file], monkeypatch) - check_json_scan(expected_file, result_file) - - def test_usage_and_help_return_a_correct_script_name_on_all_platforms(): result = run_scan_click(['--help']) assert 'Usage: scancode [OPTIONS]' in result.output # this was showing up on Windows assert 'scancode-script.py' not in result.output - result = run_scan_click([]) + result = run_scan_click([], expected_rc=2) assert 'Usage: scancode [OPTIONS]' in result.output # this was showing up on Windows assert 'scancode-script.py' not in result.output - result = run_scan_click(['-xyz']) + result = run_scan_click(['-xyz'], expected_rc=2) # this was showing up on Windows assert 'scancode-script.py' not in result.output -def test_scan_info_does_collect_infos(): +def test_scan_info_does_collect_info(): test_dir = test_env.extract_test_tar('info/basic.tgz') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--info', '--strip-root', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--info', '--strip-root', test_dir, '--json', result_file] + run_scan_click(args) check_json_scan(test_env.get_test_loc('info/basic.expected.json'), result_file) -def test_scan_info_does_collect_infos_with_root(): +def test_scan_info_does_collect_info_with_root(): test_dir = test_env.extract_test_tar('info/basic.tgz') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--info', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + run_scan_click(['--info', test_dir, '--json', result_file]) check_json_scan(test_env.get_test_loc('info/basic.rooted.expected.json'), result_file) def test_scan_info_returns_full_root(): test_dir = test_env.extract_test_tar('info/basic.tgz') result_file = test_env.get_temp_file('json') - result = run_scan_click(['--info', '--full-root', test_dir, result_file]) - - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--info', '--full-root', test_dir, '--json', result_file] + run_scan_click(args) result_data = json.loads(open(result_file, 'rb').read()) file_paths = [f['path'] for f in result_data['files']] - assert 11 == len(file_paths) + assert 12 == len(file_paths) root = fileutils.as_posixpath(test_dir) assert all(p.startswith(root) for p in file_paths) @@ -283,10 +152,8 @@ def test_scan_info_returns_full_root(): def test_scan_info_returns_correct_full_root_with_single_file(): test_file = test_env.get_test_loc('info/basic.tgz') result_file = test_env.get_temp_file('json') - result = run_scan_click(['--info', '--full-root', test_file, result_file]) - - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--info', '--full-root', test_file, '--json', result_file] + run_scan_click(args) result_data = json.loads(open(result_file, 'rb').read()) files = result_data['files'] # we have a single file @@ -296,98 +163,89 @@ def test_scan_info_returns_correct_full_root_with_single_file(): assert fileutils.as_posixpath(test_file) == scanned_file['path'] +def test_scan_info_returns_does_not_strip_root_with_single_file(): + test_file = test_env.get_test_loc('single/iproute.c') + result_file = test_env.get_temp_file('json') + args = ['--info', '--strip-root', test_file, '--json', result_file] + run_scan_click(args) + check_json_scan(test_env.get_test_loc('single/iproute.expected.json'), result_file, strip_dates=True) + + def test_scan_info_license_copyrights(): test_dir = test_env.extract_test_tar('info/basic.tgz') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--info', '--license', '--copyright', '--strip-root', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--info', '--license', '--copyright', '--strip-root', test_dir, '--json', result_file] + run_scan_click(args) check_json_scan(test_env.get_test_loc('info/all.expected.json'), result_file) def test_scan_license_with_url_template(): - test_dir = test_env.get_test_loc('license_url', copy=True) - - result = run_scan_click(['--license', '--license-url-template', 'https://example.com/urn:{}', test_dir]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output - assert 'https://example.com/urn:apache-1.0' in result.output - assert 'https://example.com/urn:public-domain' in result.output + test_dir = test_env.get_test_loc('plugin_license/license_url', copy=True) + result_file = test_env.get_temp_file('json') + args = ['--license', '--license-url-template', 'https://example.com/urn:{}', + test_dir, '--json-pp', result_file] + run_scan_click(args) + check_json_scan(test_env.get_test_loc('plugin_license/license_url.expected.json'), result_file) def test_scan_noinfo_license_copyrights_with_root(): test_dir = test_env.extract_test_tar('info/basic.tgz') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--email', '--url', '--license', '--copyright', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--email', '--url', '--license', '--copyright', test_dir, '--json', result_file] + run_scan_click(args) check_json_scan(test_env.get_test_loc('info/all.rooted.expected.json'), result_file) def test_scan_email_url_info(): test_dir = test_env.extract_test_tar('info/basic.tgz') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--email', '--url', '--info', '--strip-root', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--email', '--url', '--info', '--strip-root', test_dir, '--json', result_file] + run_scan_click(args) check_json_scan(test_env.get_test_loc('info/email_url_info.expected.json'), result_file) def test_scan_should_not_fail_on_faulty_pdf_or_pdfminer_bug_but_instead_report_errors_and_keep_trucking_with_json(): test_file = test_env.get_test_loc('failing/patchelf.pdf') result_file = test_env.get_temp_file('test.json') - - result = run_scan_click([ '--copyright', '--strip-root', test_file, result_file]) - assert result.exit_code == 1 - assert 'Scanning done' in result.output + args = ['--copyright', '--strip-root', test_file, '--json', result_file] + result = run_scan_click(args, expected_rc=1) check_json_scan(test_env.get_test_loc('failing/patchelf.expected.json'), result_file) assert 'Some files failed to scan' in result.output assert 'patchelf.pdf' in result.output -def test_scan_with_errors_and_diag_option_includes_full_traceback(): +def test_scan_with_errors_always_includes_full_traceback(): test_file = test_env.get_test_loc('failing/patchelf.pdf') result_file = test_env.get_temp_file('test.json') - - result = run_scan_click([ '--copyright', '--diag', test_file, result_file]) - assert result.exit_code == 1 - assert 'Scanning done' in result.output + args = ['--copyright', test_file, '--json', result_file] + result = run_scan_click(args, expected_rc=1) assert 'Some files failed to scan' in result.output assert 'patchelf.pdf' in result.output - result_json = json.loads(open(result_file).read()) - expected = 'ERROR: copyrights: unpack requires a string argument of length 8' - assert expected == result_json['files'][0]['scan_errors'][0] - assert result_json['files'][0]['scan_errors'][1].startswith('ERROR: copyrights: Traceback (most recent call') + expected = 'error: unpack requires a string argument of length 8' + assert expected in result_json['files'][0]['scan_errors'][-1] + assert result_json['files'][0]['scan_errors'][0].startswith('ERROR: for scanner: copyrights') def test_failing_scan_return_proper_exit_code(): test_file = test_env.get_test_loc('failing/patchelf.pdf') result_file = test_env.get_temp_file('test.json') - - result = run_scan_click([ '--copyright', test_file, result_file]) - assert result.exit_code == 1 + args = ['--copyright', test_file, '--json', result_file] + run_scan_click(args, expected_rc=1) def test_scan_should_not_fail_on_faulty_pdf_or_pdfminer_bug_but_instead_report_errors_and_keep_trucking_with_html(): test_file = test_env.get_test_loc('failing/patchelf.pdf') result_file = test_env.get_temp_file('test.html') - - result = run_scan_click([ '--copyright', '--format', 'html', test_file, result_file]) - assert result.exit_code == 1 - assert 'Scanning done' in result.output + args = ['--copyright', test_file, '--output-html', result_file] + run_scan_click(args, expected_rc=1) def test_scan_should_not_fail_on_faulty_pdf_or_pdfminer_bug_but_instead_report_errors_and_keep_trucking_with_html_app(): test_file = test_env.get_test_loc('failing/patchelf.pdf') result_file = test_env.get_temp_file('test.app.html') - - result = run_scan_click([ '--copyright', '--format', 'html-app', test_file, result_file]) - assert result.exit_code == 1 - assert 'Scanning done' in result.output + args = ['--copyright', test_file, '--output-html-app', result_file] + run_scan_click(args, expected_rc=1) def test_scan_works_with_multiple_processes(): @@ -395,29 +253,46 @@ def test_scan_works_with_multiple_processes(): # run the same scan with one or three processes result_file_1 = test_env.get_temp_file('json') - result1 = run_scan_click([ '--copyright', '--processes', '1', '--format', 'json', test_dir, result_file_1]) - assert result1.exit_code == 0 + args = ['--copyright', '--processes', '1', test_dir, '--json', result_file_1] + run_scan_click(args) result_file_3 = test_env.get_temp_file('json') - result3 = run_scan_click([ '--copyright', '--processes', '3', '--format', 'json', test_dir, result_file_3]) - assert result3.exit_code == 0 + args = ['--copyright', '--processes', '3', test_dir, '--json', result_file_3] + run_scan_click(args) res1 = json.loads(open(result_file_1).read()) res3 = json.loads(open(result_file_3).read()) assert sorted(res1['files']) == sorted(res3['files']) -def test_scan_works_with_no_processes_in_single_threaded_mode(): +def test_scan_works_with_no_processes_in_threaded_mode(): test_dir = test_env.get_test_loc('multiprocessing', copy=True) # run the same scan with zero or one process result_file_0 = test_env.get_temp_file('json') - result0 = run_scan_click([ '--copyright', '--processes', '0', '--format', 'json', test_dir, result_file_0]) - assert result0.exit_code == 0 - assert 'Disabling multi-processing and multi-threading...' in result0.output + args = ['--copyright', '--processes', '0', test_dir, '--json', result_file_0] + result0 = run_scan_click(args) + assert 'Disabling multi-processing' in result0.output result_file_1 = test_env.get_temp_file('json') - result1 = run_scan_click([ '--copyright', '--processes', '1', '--format', 'json', test_dir, result_file_1]) - assert result1.exit_code == 0 + args = ['--copyright', '--processes', '1', test_dir, '--json', result_file_1] + run_scan_click(args) + res0 = json.loads(open(result_file_0).read()) + res1 = json.loads(open(result_file_1).read()) + assert sorted(res0['files']) == sorted(res1['files']) + + +def test_scan_works_with_no_processes_non_threaded_mode(): + test_dir = test_env.get_test_loc('multiprocessing', copy=True) + + # run the same scan with zero or one process + result_file_0 = test_env.get_temp_file('json') + args = ['--copyright', '--processes', '-1', test_dir, '--json', result_file_0] + result0 = run_scan_click(args) + assert 'Disabling multi-processing and multi-threading' in result0.output + + result_file_1 = test_env.get_temp_file('json') + args = ['--copyright', '--processes', '1', test_dir, '--json', result_file_1] + run_scan_click(args) res0 = json.loads(open(result_file_0).read()) res1 = json.loads(open(result_file_1).read()) assert sorted(res0['files']) == sorted(res1['files']) @@ -430,46 +305,45 @@ def test_scan_works_with_multiple_processes_and_timeouts(): # add some random bytes to the test files to ensure that the license results will # not be cached import time, random - for tf in fileutils.file_iter(test_dir): + for tf in fileutils.resource_iter(test_dir, with_dirs=False): with open(tf, 'ab') as tfh: - tfh.write('(c)' + str(time.time()) + repr([random.randint(0, 10 ** 6) for _ in range(10000)]) + '(c)') + tfh.write( + '(c)' + str(time.time()) + repr([random.randint(0, 10 ** 6) for _ in range(10000)]) + '(c)') result_file = test_env.get_temp_file('json') - result = run_scan_click( - [ '--copyright', '--processes', '2', - '--timeout', '0.000001', - '--strip-root', '--format', 'json', test_dir, result_file], - ) + args = ['--copyright', '--processes', '2', '--timeout', '0.000001', + '--strip-root', test_dir, '--json', result_file] + run_scan_click(args, expected_rc=1) - assert result.exit_code == 1 - assert 'Scanning done' in result.output expected = [ - [(u'path', u'test1.txt'), (u'scan_errors', [u'ERROR: Processing interrupted: timeout after 0 seconds.'])], - [(u'path', u'test2.txt'), (u'scan_errors', [u'ERROR: Processing interrupted: timeout after 0 seconds.'])], - [(u'path', u'test3.txt'), (u'scan_errors', [u'ERROR: Processing interrupted: timeout after 0 seconds.'])], + [(u'path', u'test1.txt'), + (u'copyrights', []), + (u'scan_errors', [u'ERROR: for scanner: copyrights:\nERROR: Processing interrupted: timeout after 0 seconds.'])], + [(u'path', u'test2.txt'), + (u'copyrights', []), + (u'scan_errors', [u'ERROR: for scanner: copyrights:\nERROR: Processing interrupted: timeout after 0 seconds.'])], + [(u'path', u'test3.txt'), + (u'copyrights', []), + (u'scan_errors', [u'ERROR: for scanner: copyrights:\nERROR: Processing interrupted: timeout after 0 seconds.'])] ] result_json = json.loads(open(result_file).read(), object_pairs_hook=OrderedDict) assert sorted(expected) == sorted(x.items() for x in result_json['files']) -def test_scan_does_not_fail_when_scanning_unicode_files_and_paths(): +def check_scan_does_not_fail_when_scanning_unicode_files_and_paths(verbosity): test_dir = test_env.get_test_loc(u'unicodepath/uc') result_file = test_env.get_temp_file('json') if on_linux: - test_dir = path_to_bytes(test_dir) - result_file = path_to_bytes(result_file) + test_dir = fsencode(test_dir) + result_file = fsencode(result_file) - args = ['--info', '--license', '--copyright', - '--package', '--email', '--url', '--strip-root', - test_dir , result_file] - result = run_scan_click(args) - if result.exit_code != 0: - raise Exception(result.output, args) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['--info', '--license', '--copyright', '--package', + '--email', '--url', '--strip-root', test_dir , '--json', + result_file] + ([verbosity] if verbosity else []) + results = run_scan_click(args) # the paths for each OS end up encoded differently. # See for details: @@ -477,13 +351,29 @@ def test_scan_does_not_fail_when_scanning_unicode_files_and_paths(): # https://github.com/nexB/scancode-toolkit/issues/688 if on_linux: - expected = 'unicodepath/unicodepath.expected-linux.json' + expected = 'unicodepath/unicodepath.expected-linux.json' + verbosity elif on_mac: - expected = 'unicodepath/unicodepath.expected-mac.json' + expected = 'unicodepath/unicodepath.expected-mac.json' + verbosity elif on_windows: - expected = 'unicodepath/unicodepath.expected-win.json' + expected = 'unicodepath/unicodepath.expected-win.json' + verbosity + + check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True) + return results + - check_json_scan(test_env.get_test_loc(expected), result_file, strip_dates=True, regen=False) +def test_scan_does_not_fail_when_scanning_unicode_files_and_paths_default(): + result = check_scan_does_not_fail_when_scanning_unicode_files_and_paths('') + assert result.output + + +def test_scan_does_not_fail_when_scanning_unicode_files_and_paths_verbose(): + result = check_scan_does_not_fail_when_scanning_unicode_files_and_paths('--verbose') + assert result.output + + +def test_scan_does_not_fail_when_scanning_unicode_files_and_paths_quiet(): + result = check_scan_does_not_fail_when_scanning_unicode_files_and_paths('--quiet') + assert not result.output @skipIf(on_windows, 'Python tar cannot extract these files on Windows') @@ -496,57 +386,47 @@ def test_scan_does_not_fail_when_scanning_unicode_test_files_from_express(): # rename the problematic files. test_dir = test_env.extract_test_tar_raw(b'unicode_fixtures.tar.gz') - test_dir = path_to_bytes(test_dir) + test_dir = fsencode(test_dir) - args = ['-n0', '--info', '--license', '--copyright', - '--package', '--email', '--url', '--strip-root', - test_dir] - result = run_scan_click(args, catch_exceptions=False) - if result.exit_code != 0: - raise Exception(result.output, args) - assert 'Scanning done' in result.output + args = ['-n0', '--info', '--license', '--copyright', '--package', '--email', + '--url', '--strip-root', '--json', '-', test_dir] + run_scan_click(args) def test_scan_can_handle_licenses_with_unicode_metadata(): test_dir = test_env.get_test_loc('license_with_unicode_meta') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--license', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + run_scan_click(['--license', test_dir, '--json', result_file]) def test_scan_quiet_to_file_does_not_echo_anything(): test_dir = test_env.extract_test_tar('info/basic.tgz') - result1_file = test_env.get_temp_file('json') - - result1 = run_scan_click(['--quiet', '--info', test_dir, result1_file]) - assert result1.exit_code == 0 - assert not result1.output + result_file = test_env.get_temp_file('json') + args = ['--quiet', '--info', test_dir, '--json', result_file] + result = run_scan_click(args) + assert not result.output def test_scan_quiet_to_stdout_only_echoes_json_results(): test_dir = test_env.extract_test_tar('info/basic.tgz') - result1_file = test_env.get_temp_file('json') - - result1 = run_scan_click(['--quiet', '--info', test_dir, result1_file]) - assert result1.exit_code == 0 - assert not result1.output + result_file = test_env.get_temp_file('json') + args = ['--quiet', '--info', test_dir, '--json-pp', result_file] + result_to_file = run_scan_click(args) + assert not result_to_file.output # also test with an output of JSON to stdout - result2 = run_scan_click(['--quiet', '--info', test_dir]) - assert result2.exit_code == 0 + args = ['--quiet', '--info', test_dir, '--json-pp', '-'] + result_to_stdout = run_scan_click(args) # outputs to file or stdout should be identical - result1_output = open(result1_file).read() - assert result1_output == result2.output + result1_output = open(result_file).read() + assert result1_output == result_to_stdout.output -def test_scan_verbose_does_not_echo_ansi_escapes(): +def test_scan_verbose_to_stdout_does_not_echo_ansi_escapes(): test_dir = test_env.extract_test_tar('info/basic.tgz') - - result = run_scan_click(['--verbose', '--info', test_dir]) - assert result.exit_code == 0 + args = ['--verbose', '--info', test_dir, '--json', '-'] + result = run_scan_click(args) assert '[?' not in result.output @@ -554,9 +434,8 @@ def test_scan_can_return_matched_license_text(): test_file = test_env.get_test_loc('license_text/test.txt') expected_file = test_env.get_test_loc('license_text/test.expected') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--license', '--license-text', '--strip-root', test_file, result_file]) - assert result.exit_code == 0 + args = ['--license', '--license-text', '--strip-root', test_file, '--json', result_file] + run_scan_click(args) check_json_scan(test_env.get_test_loc(expected_file), result_file) @@ -564,11 +443,9 @@ def test_scan_can_return_matched_license_text(): def test_scan_can_handle_weird_file_names(): test_dir = test_env.extract_test_tar('weird_file_name/weird_file_name.tar.gz') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['-c', '-i', '--strip-root', test_dir, result_file]) - assert result.exit_code == 0 + args = ['-c', '-i', '--strip-root', test_dir, '--json', result_file] + result = run_scan_click(args) assert "KeyError: 'sha1'" not in result.output - assert 'Scanning done' in result.output # Some info vary on each OS # See https://github.com/nexB/scancode-toolkit/issues/438 for details @@ -586,12 +463,11 @@ def test_scan_can_handle_non_utf8_file_names_on_posix(): result_file = test_env.get_temp_file('json') if on_linux: - test_dir = path_to_bytes(test_dir) - result_file = path_to_bytes(result_file) + test_dir = fsencode(test_dir) + result_file = fsencode(result_file) - result = run_scan_click(['-i', '--strip-root', test_dir, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + args = ['-i', '--strip-root', test_dir, '--json', result_file] + run_scan_click(args) # the paths for each OS end up encoded differently. # See for details: @@ -613,61 +489,98 @@ def test_scan_can_run_from_other_directory(): expected_file = test_env.get_test_loc('altpath/copyright.expected.json') result_file = test_env.get_temp_file('json') work_dir = os.path.dirname(result_file) - - rc, stdout, stderr = run_scan_plain( - ['-ci', '--strip-root', test_file, result_file], cwd=work_dir) - - if rc != 0: - print() - print('stdout:') - print(stdout) - print() - print('stderr:') - print(stderr) - assert rc == 0 + args = ['-ci', '--strip-root', test_file, '--json', result_file] + run_scan_plain(args, cwd=work_dir) check_json_scan(test_env.get_test_loc(expected_file), result_file, strip_dates=True) -def test_scan_logs_errors_messages(): +def test_scan_logs_errors_messages_not_verbosely_on_stderr(): test_file = test_env.get_test_loc('errors', copy=True) - rc, stdout, stderr = run_scan_plain(['-pi', test_file, ]) - assert rc == 1 - assert 'package.json' in stderr - assert 'delimiter: line 5 column 12' in stdout - assert 'ValueError: Expecting' not in stdout + args = ['-pi', '-n', '0', test_file, '--json', '-'] + _rc, stdout, stderr = run_scan_plain(args, expected_rc=1) + assert 'Path: errors/package.json' in stderr + assert "Expecting ':' delimiter: line 5 column 12 (char 143)" in stdout + assert "Expecting ':' delimiter: line 5 column 12 (char 143)" not in stderr -def test_scan_logs_errors_messages_with_diag(): +def test_scan_logs_errors_messages_not_verbosely_on_stderr_with_multiprocessing(): test_file = test_env.get_test_loc('errors', copy=True) + args = ['-pi', '-n', '2', test_file, '--json', '-'] + _rc, stdout, stderr = run_scan_plain(args, expected_rc=1) + assert 'Path: errors/package.json' in stderr + assert "Expecting ':' delimiter: line 5 column 12 (char 143)" in stdout + assert "Expecting ':' delimiter: line 5 column 12 (char 143)" not in stderr - rc, stdout, stderr = run_scan_plain(['-pi', '--diag', test_file, ]) - assert rc == 1 + +def test_scan_logs_errors_messages_verbosely_with_verbose(): + test_file = test_env.get_test_loc('errors', copy=True) + args = ['-pi', '--verbose', '-n', '0', test_file, '--json', '-'] + _rc, stdout, stderr = run_scan_plain(args, expected_rc=1) assert 'package.json' in stderr + assert 'delimiter: line 5 column 12' in stdout assert 'delimiter: line 5 column 12' in stderr assert 'ValueError: Expecting' in stdout + + +def test_scan_logs_errors_messages_verbosely_with_verbose_and_multiprocessing(): + test_file = test_env.get_test_loc('errors', copy=True) + args = ['-pi', '--verbose', '-n', '2', test_file, '--json', '-'] + _rc, stdout, stderr = run_scan_plain(args, expected_rc=1) + assert 'package.json' in stderr assert 'delimiter: line 5 column 12' in stdout + assert 'delimiter: line 5 column 12' in stderr + assert 'ValueError: Expecting' in stdout -def test_scan_progress_display_is_not_damaged_with_long_file_names_orig(monkeypatch): +def test_scan_progress_display_is_not_damaged_with_long_file_names_plain(): test_dir = test_env.get_test_loc('long_file_name') result_file = test_env.get_temp_file('json') - - result = run_scan_click(['--copyright', test_dir, result_file], monkeypatch) - assert result.exit_code == 0 + args = ['--copyright', test_dir, '--json', result_file] + _rc, stdout, stderr = run_scan_plain(args) expected1 = 'Scanned: abcdefghijklmnopqr...234567890123456789.c' expected2 = 'Scanned: 0123456789012345678901234567890123456789.c' - assert expected1 in result.output - assert expected2 in result.output + expected3 = 'abcdefghijklmnopqrtu0123456789012345678901234567890123456789abcdefghijklmnopqrtu0123456789012345678901234567890123456789.c' + assert expected1 not in stdout + assert expected2 not in stdout + assert expected3 not in stdout + assert expected1 not in stderr + assert expected2 not in stderr + assert expected3 not in stderr + + +def test_scan_progress_display_is_not_damaged_with_long_file_names(monkeypatch): + test_dir = test_env.get_test_loc('long_file_name') + result_file = test_env.get_temp_file('json') + args = ['--copyright', test_dir, '--json', result_file] + result = run_scan_click(args, monkeypatch=monkeypatch) + if on_windows: + expected1 = 'Scanned: 0123456789012345678901234567890123456789.c' + expected2 = 'Scanned: abcdefghijklmnopqrt...0123456789012345678' + expected3 = 'abcdefghijklmnopqrtu0123456789012345678901234567890123456789abcdefghijklmnopqrtu0123456789012345678901234567890123456789.c' + try: + assert expected1 in result.output + assert expected2 not in result.output + assert expected3 not in result.output + except: + print() + print('output:') + print(result.output) + print() + raise + else: + expected1 = 'Scanned: abcdefghijklmnopqr...234567890123456789.c' + expected2 = 'Scanned: 0123456789012345678901234567890123456789.c' + expected3 = 'abcdefghijklmnopqrtu0123456789012345678901234567890123456789abcdefghijklmnopqrtu0123456789012345678901234567890123456789.c' + assert expected1 in result.output + assert expected2 in result.output + assert expected3 not in result.output def test_scan_does_scan_php_composer(): test_file = test_env.get_test_loc('composer/composer.json') expected_file = test_env.get_test_loc('composer/composer.expected.json') result_file = test_env.get_temp_file('results.json') - - result = run_scan_click(['--package', test_file, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + run_scan_click(['--package', test_file, '--json', result_file]) check_json_scan(expected_file, result_file) @@ -675,67 +588,87 @@ def test_scan_does_scan_rpm(): test_file = test_env.get_test_loc('rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm') expected_file = test_env.get_test_loc('rpm/fping-2.4-0.b2.rhfc1.dag.i386.rpm.expected.json') result_file = test_env.get_temp_file('results.json') - - result = run_scan_click(['--package', test_file, result_file]) - assert result.exit_code == 0 - assert 'Scanning done' in result.output + run_scan_click(['--package', test_file, '--json', result_file]) check_json_scan(expected_file, result_file, regen=False) -class TestFixedWidthFilename(TestCase): - - def test_fixed_width_file_name_with_file_name_larger_than_max_length_is_shortened(self): - test = cli.fixed_width_file_name('0123456789012345678901234.c', 25) - expected = '0123456789...5678901234.c' - assert expected == test - - def test_fixed_width_file_name_with_file_name_smaller_than_max_length_is_not_shortened(self): - file_name = '0123456789012345678901234.c' - test = cli.fixed_width_file_name(file_name, max_length=50) - assert file_name == test - - def test_fixed_width_file_name_with_file_name_at_max_length_is_not_shortened(self): - test = cli.fixed_width_file_name('01234567890123456789012.c', 25) - expected = '01234567890123456789012.c' - assert expected == test - - def test_fixed_width_file_name_with_file_name_smaller_than_max_length_not_shortened(self): - test = cli.fixed_width_file_name('0123456789012345678901.c', 25) - expected = '0123456789012345678901.c' - assert expected == test - - def test_fixed_width_file_name_with_none_filename_return_empty_string(self): - test = cli.fixed_width_file_name(None, 25) - expected = '' - assert expected == test - - def test_fixed_width_file_name_without_extension(self): - test = cli.fixed_width_file_name('012345678901234567890123456', 25) - expected = '01234567890...67890123456' - assert expected == test - - def test_fixed_width_file_name_with_posix_path_without_shortening(self): - test = cli.fixed_width_file_name('C/Documents_and_Settings/Boki/Desktop/head/patches/drupal6/drupal.js', 25) - expected = 'drupal.js' - assert expected == test - - def test_fixed_width_file_name_with_posix_path_with_shortening(self): - test = cli.fixed_width_file_name('C/Documents_and_Settings/Boki/Desktop/head/patches/drupal6/012345678901234567890123.c', 25) - expected = '0123456789...4567890123.c' - assert expected == test - - def test_fixed_width_file_name_with_win_path_without_shortening(self): - test = cli.fixed_width_file_name('C\\:Documents_and_Settings\\Boki\\Desktop\\head\\patches\\drupal6\\drupal.js', 25) - expected = 'drupal.js' - assert expected == test - - def test_fixed_width_file_name_with_win_path_with_shortening(self): - test = cli.fixed_width_file_name('C\\:Documents_and_Settings\\Boki\\Desktop\\head\\patches\\drupal6\\012345678901234567890123.c', 25) - expected = '0123456789...4567890123.c' - assert expected == test - - def test_fixed_width_file_name_with_very_small_file_name_and_long_extension(self): - test = cli.fixed_width_file_name('abc.abcdef', 5) - # FIXME: what is expected is TBD - expected = '' - assert expected == test +def test_scan_cli_help(regen=False): + expected_file = test_env.get_test_loc('help/help.txt') + result = run_scan_click(['--help']) + if regen: + with open(expected_file, 'wb') as ef: + ef.write(result.output) + assert open(expected_file).read() == result.output + + +def test_scan_errors_out_with_unknown_option(): + test_file = test_env.get_test_loc('license_text/test.txt') + args = ['--json--info', test_file] + result = run_scan_click(args, expected_rc=2) + assert 'Error: no such option: --json--info' in result.output + + +def test_scan_to_json_without_FILE_does_not_write_to_next_option(): + test_file = test_env.get_test_loc('license_text/test.txt') + args = ['--json', '--info', test_file] + result = run_scan_click(args, expected_rc=2) + assert ('Error: Invalid value for "--json": Illegal file name ' + 'conflicting with an option name: --info.') in result.output + + +def test_scan_errors_out_with_conflicting_root_options(): + test_file = test_env.get_test_loc('license_text/test.txt') + result_file = test_env.get_temp_file('results.json') + args = ['--strip-root', '--full-root', '--json', result_file, '--info', test_file] + result = run_scan_click(args, expected_rc=2) + assert ('Error: The option --strip-root cannot be used together with the ' + '--full-root option(s) and --full-root is used.') in result.output + + +def test_scan_errors_out_with_conflicting_verbosity_options(): + test_file = test_env.get_test_loc('license_text/test.txt') + result_file = test_env.get_temp_file('results.json') + args = ['--quiet', '--verbose', '--json', result_file, '--info', test_file] + result = run_scan_click(args, expected_rc=2) + assert ('Error: The option --quiet cannot be used together with the ' + '--verbose option(s) and --verbose is used. You can set only one of ' + 'these options at a time.') in result.output + + +def test_scan_with_timing_json_return_timings_for_each_scanner(): + test_dir = test_env.extract_test_tar('timing/basic.tgz') + result_file = test_env.get_temp_file('json') + args = ['--email', '--url', '--license', '--copyright', '--info', + '--package', '--timing', '--json', result_file, test_dir] + run_scan_click(args) + file_results = load_json_result(result_file)['files'] + + expected = set(['emails', 'urls', 'licenses', 'copyrights', 'info', 'packages']) + check_timings(expected, file_results) + + +def test_scan_with_timing_jsonpp_return_timings_for_each_scanner(): + test_dir = test_env.extract_test_tar('timing/basic.tgz') + result_file = test_env.get_temp_file('json') + args = ['--email', '--url', '--license', '--copyright', '--info', + '--package', '--timing', '--verbose', '--json-pp', result_file, test_dir] + run_scan_click(args) + file_results = load_json_result(result_file)['files'] + expected = set(['emails', 'urls', 'licenses', 'copyrights', 'info', 'packages']) + check_timings(expected, file_results) + + +def check_timings(expected, file_results): + for res in file_results: + scan_timings = res['scan_timings'] + + if not res['type'] == 'file': + # should be an empty dict for dirs + assert not scan_timings + continue + + assert scan_timings + + for scanner, timing in scan_timings.items(): + assert scanner in expected + assert timing diff --git a/tests/scancode/test_extract_cli.py b/tests/scancode/test_extract_cli.py index 18134e6932e..78c162dd789 100644 --- a/tests/scancode/test_extract_cli.py +++ b/tests/scancode/test_extract_cli.py @@ -33,17 +33,16 @@ from click.testing import CliRunner from commoncode.fileutils import as_posixpath -from commoncode.fileutils import file_iter +from commoncode.fileutils import fsencode +from commoncode.fileutils import resource_iter from commoncode.testcase import FileDrivenTesting +from commoncode.system import on_linux from commoncode.system import on_windows from scancode import extract_cli -from commoncode.system import on_linux -from commoncode.fileutils import path_to_bytes test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') - """ These CLI tests are dependent on py.test monkeypatch to ensure we are testing the actual command outputs as if using a TTY or not. @@ -51,6 +50,7 @@ EMPTY_STRING = b'' if on_linux else '' + def test_extractcode_command_can_take_an_empty_directory(monkeypatch): test_dir = test_env.get_temp_dir() monkeypatch.setattr(click._termui_impl, 'isatty', lambda _: True) @@ -68,11 +68,6 @@ def test_extractcode_command_does_extract_verbose(monkeypatch): result = runner.invoke(extract_cli.extractcode, ['--verbose', test_dir]) assert result.exit_code == 1 assert os.path.exists(os.path.join(test_dir, 'some.tar.gz-extract')) - print() - print(result.output) - print() - print(repr(result.output)) - print() expected = [ 'Extracting archives...', 'some.tar.gz', @@ -125,7 +120,7 @@ def test_extractcode_command_works_with_relative_paths(monkeypatch): assert not 'WARNING' in result.output assert not 'ERROR' in result.output expected = ['/c/a/a.txt', '/c/b/a.txt', '/c/c/a.txt'] - file_result = [as_posixpath(f.replace(test_tgt_dir, '')) for f in fileutils.file_iter(test_tgt_dir)] + file_result = [as_posixpath(f.replace(test_tgt_dir, '')) for f in fileutils.resource_iter(test_tgt_dir, with_dirs=False)] assert sorted(expected) == sorted(file_result) finally: fileutils.delete(test_src_dir) @@ -189,9 +184,9 @@ def test_extractcode_command_can_extract_archive_with_unicode_names_verbose(monk monkeypatch.setattr(click._termui_impl, 'isatty', lambda _: True) test_dir = test_env.get_test_loc('unicodearch', copy=True) if on_linux: - test_dir = path_to_bytes(test_dir) + test_dir = fsencode(test_dir) runner = CliRunner() - result = runner.invoke(extract_cli.extractcode, ['--verbose', test_dir], catch_exceptions=False) + result = runner.invoke(extract_cli.extractcode, ['--verbose', test_dir]) assert result.exit_code == 0 assert 'Sanders' in result.output @@ -199,7 +194,7 @@ def test_extractcode_command_can_extract_archive_with_unicode_names_verbose(monk uni_arch = b'unicodepath.tgz' if on_linux else 'unicodepath.tgz' uni_path = b'/unicodepath/' if on_linux else '/unicodepath/' - file_result = [f for f in map(as_posixpath, file_iter(test_dir)) if not f.endswith(uni_arch)] + file_result = [f for f in map(as_posixpath, resource_iter(test_dir, with_dirs=False)) if not f.endswith(uni_arch)] file_result = [EMPTY_STRING.join(f.partition(uni_path)[1:]) for f in file_result] file_result = [f for f in file_result if f] expected = [ @@ -214,15 +209,15 @@ def test_extractcode_command_can_extract_archive_with_unicode_names(monkeypatch) monkeypatch.setattr(click._termui_impl, 'isatty', lambda _: True) test_dir = test_env.get_test_loc('unicodearch', copy=True) if on_linux: - test_dir = path_to_bytes(test_dir) + test_dir = fsencode(test_dir) runner = CliRunner() - result = runner.invoke(extract_cli.extractcode, [test_dir], catch_exceptions=False) + result = runner.invoke(extract_cli.extractcode, [test_dir]) assert result.exit_code == 0 uni_arch = b'unicodepath.tgz' if on_linux else 'unicodepath.tgz' uni_path = b'/unicodepath/' if on_linux else '/unicodepath/' - file_result = [f for f in map(as_posixpath, file_iter(test_dir)) if not f.endswith(uni_arch)] + file_result = [f for f in map(as_posixpath, resource_iter(test_dir, with_dirs=False)) if not f.endswith(uni_arch)] file_result = [EMPTY_STRING.join(f.partition(uni_path)[1:]) for f in file_result] file_result = [f for f in file_result if f] expected = [ @@ -239,7 +234,7 @@ def test_extractcode_command_can_extract_shallow(monkeypatch): runner = CliRunner() result = runner.invoke(extract_cli.extractcode, ['--shallow', test_dir]) assert result.exit_code == 0 - file_result = [f for f in map(as_posixpath, file_iter(test_dir)) if not f.endswith('unicodepath.tgz')] + file_result = [f for f in map(as_posixpath, resource_iter(test_dir, with_dirs=False)) if not f.endswith('unicodepath.tgz')] file_result = [''.join(f.partition('/top.zip-extract/')[1:]) for f in file_result] file_result = [f for f in file_result if f] # this checks that the zip in top.zip are not extracted diff --git a/tests/scancode/test_ignore_files.py b/tests/scancode/test_ignore_files.py deleted file mode 100644 index 0d8ea8e5cf3..00000000000 --- a/tests/scancode/test_ignore_files.py +++ /dev/null @@ -1,141 +0,0 @@ -# -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. -# http://nexb.com and https://github.com/nexB/scancode-toolkit/ -# The ScanCode software is licensed under the Apache License version 2.0. -# Data generated with ScanCode require an acknowledgment. -# ScanCode is a trademark of nexB Inc. -# -# You may not use this software except in compliance with the License. -# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# -# When you publish or redistribute any data created with ScanCode or any ScanCode -# derivative work, you must accompany this data with the following acknowledgment: -# -# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES -# OR CONDITIONS OF ANY KIND, either express or implied. No content created from -# ScanCode should be considered or used as legal advice. Consult an Attorney -# for any legal advice. -# ScanCode is a free software code scanning tool from nexB Inc. and others. -# Visit https://github.com/nexB/scancode-toolkit/ for support and download. - -from __future__ import absolute_import -from __future__ import unicode_literals - -from os import path - -from commoncode.testcase import FileBasedTesting -from commoncode.ignore import is_ignored -from scancode.cache import get_scans_cache_class -from scancode.cli import resource_paths -from scancode.plugin_ignore import ProcessIgnore - - -class TestIgnoreFiles(FileBasedTesting): - - test_data_dir = path.join(path.dirname(__file__), 'data') - - def test_ignore_glob_path(self): - test = ( - 'common/src/test/sample.txt', - {'*/src/test/*': 'test ignore'}, - {} - ) - assert is_ignored(*test) - - def test_ignore_single_path(self): - test = ( - 'common/src/test/sample.txt', - {'src/test/sample.txt': 'test ignore'}, - {} - ) - assert is_ignored(*test) - - def test_ignore_single_file(self): - test = ( - 'common/src/test/sample.txt', - {'sample.txt': 'test ignore'}, - {} - ) - assert is_ignored(*test) - - def test_ignore_glob_file(self): - test = ( - 'common/src/test/sample.txt', - {'*.txt': 'test ignore'}, - {} - ) - assert is_ignored(*test) - - def test_resource_paths_with_single_file(self): - test_dir = self.extract_test_tar('ignore/user.tgz') - test_plugin = ProcessIgnore(('sample.doc',)) - scan_cache_class = get_scans_cache_class(self.get_temp_dir()) - expected = [ - 'user', - 'user/ignore.doc', - 'user/src', - 'user/src/ignore.doc', - 'user/src/test', - 'user/src/test/sample.txt' - ] - test = [resource.rel_path for resource in resource_paths(test_dir, False, scan_cache_class, [test_plugin])] - assert expected == sorted(test) - - def test_resource_paths_with_multiple_files(self): - test_dir = self.extract_test_tar('ignore/user.tgz') - test_plugin = ProcessIgnore(('ignore.doc',)) - scan_cache_class = get_scans_cache_class(self.get_temp_dir()) - expected = [ - 'user', - 'user/src', - 'user/src/test', - 'user/src/test/sample.doc', - 'user/src/test/sample.txt' - ] - test = [resource.rel_path for resource in resource_paths(test_dir, False, scan_cache_class, [test_plugin])] - assert expected == sorted(test) - - def test_resource_paths_with_glob_file(self): - test_dir = self.extract_test_tar('ignore/user.tgz') - test_plugin = ProcessIgnore(('*.doc',)) - scan_cache_class = get_scans_cache_class(self.get_temp_dir()) - expected = [ - 'user', - 'user/src', - 'user/src/test', - 'user/src/test/sample.txt' - ] - test = [resource.rel_path for resource in resource_paths(test_dir, False, scan_cache_class, [test_plugin])] - assert expected == sorted(test) - - def test_resource_paths_with_glob_path(self): - test_dir = self.extract_test_tar('ignore/user.tgz') - test_plugin = ProcessIgnore(('*/src/test',)) - scan_cache_class = get_scans_cache_class(self.get_temp_dir()) - expected = [ - 'user', - 'user/ignore.doc', - 'user/src', - 'user/src/ignore.doc' - ] - test = [resource.rel_path for resource in resource_paths(test_dir, False, scan_cache_class, [test_plugin])] - assert expected == sorted(test) - - def test_resource_paths_with_multiple_plugins(self): - test_dir = self.extract_test_tar('ignore/user.tgz') - scan_cache_class = get_scans_cache_class(self.get_temp_dir()) - test_plugins = [ - ProcessIgnore(('*.doc',)), - ProcessIgnore(('*/src/test/*',)) - ] - expected = [ - 'user', - 'user/src', - 'user/src/test' - ] - test = [resource.rel_path for resource in resource_paths(test_dir, False, scan_cache_class, test_plugins)] - assert expected == sorted(test) diff --git a/tests/scancode/test_interrupt.py b/tests/scancode/test_interrupt.py index de76a172495..615318fb22f 100644 --- a/tests/scancode/test_interrupt.py +++ b/tests/scancode/test_interrupt.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,13 +23,13 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals import os -import threading from time import sleep +import threading from commoncode.testcase import FileBasedTesting @@ -40,10 +40,10 @@ verify there is no thread leak. """ + class TestInterrupt(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') - def test_interruptible_can_run_function(self): before = threading.active_count() @@ -51,8 +51,9 @@ def some_long_function(exec_time): sleep(exec_time) return 'OK' - result = interrupt.interruptible(some_long_function, args=(0.01,), timeout=10) - assert (True, 'OK') == result + results = interrupt.interruptible(some_long_function, args=(0.01,), timeout=10) + expected = None, 'OK' + assert expected == results after = threading.active_count() assert before == after @@ -65,8 +66,9 @@ def some_long_function(exec_time): sleep(i) return 'OK' - result = interrupt.interruptible(some_long_function, args=(20,), timeout=0.00001) - assert (False, 'ERROR: Processing interrupted: timeout after 0 seconds.') == result + results = interrupt.interruptible(some_long_function, args=(20,), timeout=0.00001) + expected = 'ERROR: Processing interrupted: timeout after 0 seconds.', None + assert expected == results after = threading.active_count() assert before == after diff --git a/tests/scancode/test_plugin_ignore.py b/tests/scancode/test_plugin_ignore.py new file mode 100644 index 00000000000..203edf642fb --- /dev/null +++ b/tests/scancode/test_plugin_ignore.py @@ -0,0 +1,236 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import print_function + +from os.path import dirname +from os.path import join + +from commoncode.testcase import FileDrivenTesting +from scancode.cli_test_utils import run_scan_click +from scancode.cli_test_utils import load_json_result +from scancode.plugin_ignore import is_ignored +from scancode.plugin_ignore import ProcessIgnore +from scancode.resource import Codebase + + +class TestPluginIgnoreFiles(FileDrivenTesting): + + test_data_dir = join(dirname(__file__), 'data') + + def test_is_ignored_glob_path(self): + location = 'common/src/test/sample.txt' + ignores = {'*/src/test/*': 'test ignore'} + assert is_ignored(location=location, ignores=ignores) + + def test_is_ignored_single_path(self): + location = 'common/src/test/sample.txt' + ignores = {'common/src/test/sample.txt': 'test ignore'} + assert is_ignored(location=location, ignores=ignores) + + def test_is_ignored_single_path_not_matching(self): + location = 'common/src/test/sample.txt' + ignores = {'src/test/sample.txt': 'test ignore'} + assert not is_ignored(location=location, ignores=ignores) + + def test_is_ignored_single_file(self): + location = 'common/src/test/sample.txt' + ignores = {'sample.txt': 'test ignore'} + assert is_ignored(location=location, ignores=ignores) + + def test_is_ignored_glob_file(self): + location = 'common/src/test/sample.txt' + ignores = {'*.txt': 'test ignore'} + assert is_ignored(location=location, ignores=ignores) + + def check_ProcessIgnore(self, test_dir, expected, ignore): + codebase = Codebase(test_dir, strip_root=True) + test_plugin = ProcessIgnore() + test_plugin.process_codebase(codebase, ignore=ignore) + resources = [res.path for res in codebase.walk(skip_root=True)] + assert expected == sorted(resources) + + def test_ProcessIgnore_with_single_file(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + ignore = ('sample.doc',) + expected = [ + 'user', + 'user/ignore.doc', + 'user/src', + 'user/src/ignore.doc', + 'user/src/test', + 'user/src/test/sample.txt' + ] + self.check_ProcessIgnore(test_dir, expected, ignore) + + def test_ProcessIgnore_with_multiple_files(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + ignore = ('ignore.doc', 'sample.doc',) + expected = [ + 'user', + 'user/src', + 'user/src/test', + 'user/src/test/sample.txt' + ] + self.check_ProcessIgnore(test_dir, expected, ignore) + + def test_ProcessIgnore_with_glob_for_extension(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + ignore = ('*.doc',) + expected = [ + 'user', + 'user/src', + 'user/src/test', + 'user/src/test/sample.txt' + ] + self.check_ProcessIgnore(test_dir, expected, ignore) + + def test_ProcessIgnore_with_glob_for_path(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + ignore = ('*/src/test',) + expected = [ + 'user', + 'user/ignore.doc', + 'user/src', + 'user/src/ignore.doc' + ] + self.check_ProcessIgnore(test_dir, expected, ignore) + + def test_ProcessIgnore_with_multiple_ignores(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + ignore = ('*.doc', '*/src/test/*',) + expected = [ + 'user', + 'user/src', + 'user/src/test' + ] + self.check_ProcessIgnore(test_dir, expected, ignore) + + +class TestScanPluginIgnoreFiles(FileDrivenTesting): + + test_data_dir = join(dirname(__file__), 'data') + + def test_scancode_ignore_vcs_files_and_dirs_by_default(self): + test_dir = self.extract_test_tar('plugin_ignore/vcs.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + # a single test.tst file and its directory that is not a VCS file should + # be listed + assert 1 == scan_result['files_count'] + scan_locs = [x['path'] for x in scan_result['files']] + assert [u'vcs', u'vcs/test.txt'] == scan_locs + + def test_scancode_ignore_vcs_files_and_dirs_by_default_no_multiprocess(self): + test_dir = self.extract_test_tar('plugin_ignore/vcs.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', '--processes', '0', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + # a single test.tst file and its directory that is not a VCS file should + # be listed + assert 1 == scan_result['files_count'] + scan_locs = [x['path'] for x in scan_result['files']] + assert [u'vcs', u'vcs/test.txt'] == scan_locs + + def test_scancode_ignore_single_file(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', '--ignore', 'sample.doc', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + assert 3 == scan_result['files_count'] + # FIXME: add assert 3 == scan_result['dirs_count'] + scan_locs = [x['path'] for x in scan_result['files']] + expected = [ + 'user', + 'user/ignore.doc', + 'user/src', + 'user/src/ignore.doc', + 'user/src/test', + 'user/src/test/sample.txt' + ] + assert expected == scan_locs + + def test_scancode_ignore_multiple_files(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', '--ignore', 'ignore.doc', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + assert 2 == scan_result['files_count'] + scan_locs = [x['path'] for x in scan_result['files']] + expected = [ + u'user', + u'user/src', + u'user/src/test', + u'user/src/test/sample.doc', + u'user/src/test/sample.txt'] + assert expected == scan_locs + + def test_scancode_ignore_glob_files(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', '--ignore', '*.doc', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + assert 1 == scan_result['files_count'] + scan_locs = [x['path'] for x in scan_result['files']] + expected = [ + u'user', + u'user/src', + u'user/src/test', + u'user/src/test/sample.txt' + ] + assert expected == scan_locs + + def test_scancode_ignore_glob_path(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', '--ignore', '*/src/test/*', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + assert 2 == scan_result['files_count'] + scan_locs = [x['path'] for x in scan_result['files']] + expected = [ + u'user', + u'user/ignore.doc', + u'user/src', + u'user/src/ignore.doc', + u'user/src/test' + ] + assert expected == scan_locs + + def test_scancode_multiple_ignores(self): + test_dir = self.extract_test_tar('plugin_ignore/user.tgz') + result_file = self.get_temp_file('json') + args = ['--copyright', '--strip-root', '--ignore', '*/src/test', '--ignore', '*.doc', test_dir, '--json', result_file] + run_scan_click(args) + scan_result = load_json_result(result_file) + assert 0 == scan_result['files_count'] + scan_locs = [x['path'] for x in scan_result['files']] + assert [u'user', u'user/src'] == scan_locs diff --git a/tests/scancode/test_plugin_mark_source.py b/tests/scancode/test_plugin_mark_source.py new file mode 100644 index 00000000000..ac644c893b1 --- /dev/null +++ b/tests/scancode/test_plugin_mark_source.py @@ -0,0 +1,62 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import unicode_literals + +from os.path import dirname +from os.path import join + +from commoncode.testcase import FileDrivenTesting +from scancode.cli_test_utils import check_json_scan +from scancode.cli_test_utils import run_scan_click +from scancode.plugin_mark_source import is_source_directory + + +class TestMarkSource(FileDrivenTesting): + + test_data_dir = join(dirname(__file__), 'data') + + def test_is_source_directory_above_threshold(self): + files_count = 10 + src_count = 9 + assert is_source_directory(src_count, files_count) + + def test_is_source_directory_below_threshold(self): + files_count = 10 + src_count = 5 + assert not is_source_directory(src_count, files_count) + + def test_scan_mark_source_without_info(self): + test_dir = self.extract_test_tar('plugin_mark_source/JGroups.tgz') + result = run_scan_click(['--mark-source', test_dir, '--json', '-'], + expected_rc=2) + assert 'Error: The option --mark-source requires the option(s) --info and is missing --info.' in result.output + + def test_scan_mark_source_with_info(self): + test_dir = self.extract_test_tar('plugin_mark_source/JGroups.tgz') + result_file = self.get_temp_file('json') + expected_file = self.get_test_loc('plugin_mark_source/with_info.expected.json') + run_scan_click(['--info', '--mark-source', test_dir, '--json', result_file]) + check_json_scan(expected_file, result_file) diff --git a/tests/scancode/test_plugin_only_findings.py b/tests/scancode/test_plugin_only_findings.py new file mode 100644 index 00000000000..94c7d70c18d --- /dev/null +++ b/tests/scancode/test_plugin_only_findings.py @@ -0,0 +1,60 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import unicode_literals + +from os.path import dirname +from os.path import join + +from commoncode.testcase import FileDrivenTesting +from scancode.cli_test_utils import run_scan_click +from scancode.cli_test_utils import check_json_scan + + +class TestHasFindings(FileDrivenTesting): + + test_data_dir = join(dirname(__file__), 'data') + + def test_scan_only_findings(self): + test_dir = self.extract_test_tar('plugin_only_findings/basic.tgz') + result_file = self.get_temp_file('json') + expected_file = self.get_test_loc('plugin_only_findings/expected.json') + run_scan_click(['-clip', '--only-findings', '--json', result_file, test_dir]) + check_json_scan(expected_file, result_file, strip_dates=True) + + def test_scan_only_findings_with_errors(self): + test_dir = self.get_test_loc('plugin_only_findings/errors') + result_file = self.get_temp_file('json') + expected_file = self.get_test_loc('plugin_only_findings/errors.expected.json') + run_scan_click(['-pi', '--only-findings', '--json-pp', + result_file, test_dir], expected_rc=1) + check_json_scan(expected_file, result_file, strip_dates=True) + + def test_scan_only_findings_with_only_info(self): + test_dir = self.extract_test_tar('plugin_only_findings/basic.tgz') + result_file = self.get_temp_file('json') + expected_file = self.get_test_loc('plugin_only_findings/info.expected.json') + run_scan_click(['--info', '--only-findings', '--json', result_file, test_dir]) + check_json_scan(expected_file, result_file, strip_dates=True) diff --git a/tests/scancode/test_resource.py b/tests/scancode/test_resource.py new file mode 100644 index 00000000000..f72a9ccd48f --- /dev/null +++ b/tests/scancode/test_resource.py @@ -0,0 +1,479 @@ +# +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. +# http://nexb.com and https://github.com/nexB/scancode-toolkit/ +# The ScanCode software is licensed under the Apache License version 2.0. +# Data generated with ScanCode require an acknowledgment. +# ScanCode is a trademark of nexB Inc. +# +# You may not use this software except in compliance with the License. +# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# +# When you publish or redistribute any data created with ScanCode or any ScanCode +# derivative work, you must accompany this data with the following acknowledgment: +# +# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, either express or implied. No content created from +# ScanCode should be considered or used as legal advice. Consult an Attorney +# for any legal advice. +# ScanCode is a free software code scanning tool from nexB Inc. and others. +# Visit https://github.com/nexB/scancode-toolkit/ for support and download. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from os.path import dirname +from os.path import exists +from os.path import join + +from commoncode.testcase import FileBasedTesting + +from scancode.resource import Codebase +from commoncode.fileutils import parent_directory +from scancode.resource import get_path + + +class TestCodebase(FileBasedTesting): + test_data_dir = join(dirname(__file__), 'data') + + def test_walk_defaults(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + results = list(codebase.walk()) + expected = [ + ('codebase', False), + ('abc', True), + ('et131x.h', True), + ('dir', False), + ('that', True), + ('this', True), + ('other dir', False), + ('file', True), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_topdown(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + results = list(codebase.walk(topdown=True)) + expected = [ + ('codebase', False), + ('abc', True), + ('et131x.h', True), + ('dir', False), + ('that', True), + ('this', True), + ('other dir', False), + ('file', True), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_bottomup(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + results = list(codebase.walk(topdown=False)) + expected = [ + ('abc', True), + ('et131x.h', True), + ('that', True), + ('this', True), + ('dir', False), + ('file', True), + ('other dir', False), + ('codebase', False), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_skip_root_basic(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + results = list(codebase.walk(skip_root=True)) + expected = [ + ('abc', True), + ('et131x.h', True), + ('dir', False), + ('that', True), + ('this', True), + ('other dir', False), + ('file', True), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered_with_filtered_root(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + codebase.root.is_filtered = True + results = list(codebase.walk_filtered()) + expected = [ + ('abc', True), + ('et131x.h', True), + ('dir', False), + ('that', True), + ('this', True), + ('other dir', False), + ('file', True), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered_with_all_filtered(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + res.is_filtered = True + results = list(codebase.walk_filtered()) + expected = [] + assert expected == [(r.name, r.is_file) for r in results] + + def test_compute_counts_filtered_None(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + results = codebase.compute_counts(skip_filtered=True) + expected = (5, 3, 0) + assert expected == results + + def test_compute_counts_filtered_None_with_size(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + if res.is_file: + res.size = 10 + + results = codebase.compute_counts(skip_filtered=True) + expected = (5, 3, 50) + assert expected == results + + def test_compute_counts_filtered_None_with_cache(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + results = codebase.compute_counts(skip_filtered=True) + expected = (5, 3, 0) + assert expected == results + + def test_compute_counts_filtered_all(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + res.is_filtered = True + results = codebase.compute_counts(skip_filtered=True) + expected = (0, 0, 0) + assert expected == results + + def test_compute_counts_filtered_all_with_cache(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + res.is_filtered = True + results = codebase.compute_counts(skip_filtered=True) + expected = (0, 0, 0) + assert expected == results + + def test_compute_counts_filtered_files(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + if res.is_file: + res.is_filtered = True + results = codebase.compute_counts(skip_filtered=True) + expected = (0, 3, 0) + assert expected == results + + def test_compute_counts_filtered_dirs(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + if not res.is_file: + res.is_filtered = True + results = codebase.compute_counts(skip_filtered=True) + expected = (5, 0, 0) + assert expected == results + + def test_walk_filtered_dirs(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + if not res.is_file: + res.is_filtered = True + + results = list(codebase.walk_filtered(topdown=True)) + expected = [ + ('abc', True), + ('et131x.h', True), + ('that', True), + ('this', True), + ('file', True), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered_skip_root(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + codebase.root.is_filtered = True + results = list(codebase.walk_filtered(skip_root=True)) + expected = [ + ('abc', True), + ('et131x.h', True), + ('dir', False), + ('that', True), + ('this', True), + ('other dir', False), + ('file', True), + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered_all_skip_root(self): + test_codebase = self.get_test_loc('resource/codebase') + codebase = Codebase(test_codebase) + for res in codebase.walk(): + res.is_filtered = True + results = list(codebase.walk_filtered(skip_root=True)) + expected = [] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_skip_root_single_file(self): + test_codebase = self.get_test_loc('resource/codebase/et131x.h') + codebase = Codebase(test_codebase) + results = list(codebase.walk(skip_root=True)) + expected = [ + ('et131x.h', True) + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered_with_skip_root_and_single_file_not_filtered(self): + test_codebase = self.get_test_loc('resource/codebase/et131x.h') + codebase = Codebase(test_codebase) + results = list(codebase.walk_filtered(skip_root=True)) + expected = [ + ('et131x.h', True) + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered__with_skip_root_and_filtered_single_file(self): + test_codebase = self.get_test_loc('resource/codebase/et131x.h') + codebase = Codebase(test_codebase) + codebase.root.is_filtered = True + results = list(codebase.walk_filtered(skip_root=True)) + expected = [ + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_skip_root_single_file_with_children(self): + test_codebase = self.get_test_loc('resource/codebase/et131x.h') + codebase = Codebase(test_codebase, strip_root=True) + + c1 = codebase.create_resource('some child', parent=codebase.root, is_file=True) + _c2 = codebase.create_resource('some child2', parent=c1, is_file=False) + results = list(codebase.walk(skip_root=True)) + expected = [ + (u'some child', True), (u'some child2', False) + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_filtered_with_skip_root_and_single_file_with_children(self): + test_codebase = self.get_test_loc('resource/codebase/et131x.h') + codebase = Codebase(test_codebase, strip_root=True) + + c1 = codebase.create_resource('some child', parent=codebase.root, is_file=True) + c2 = codebase.create_resource('some child2', parent=c1, is_file=False) + c2.is_filtered = True + codebase.save_resource(c2) + + results = list(codebase.walk_filtered(skip_root=True)) + expected = [(u'some child', True)] + assert expected == [(r.name, r.is_file) for r in results] + + c1.is_filtered = True + codebase.save_resource(c1) + results = list(codebase.walk_filtered(skip_root=True)) + expected = [] + assert expected == [(r.name, r.is_file) for r in results] + + def test_walk_skip_root_single_dir(self): + test_codebase = self.get_temp_dir('walk') + codebase = Codebase(test_codebase, strip_root=True) + + results = list(codebase.walk(skip_root=True)) + expected = [ + ('walk', False) + ] + assert expected == [(r.name, r.is_file) for r in results] + + def test_create_resource_can_add_child_to_file(self): + test_codebase = self.get_test_loc('resource/codebase/et131x.h') + codebase = Codebase(test_codebase) + codebase.create_resource('some child', codebase.root, is_file=True) + results = list(codebase.walk()) + expected = [('et131x.h', True), (u'some child', True)] + assert expected == [(r.name, r.is_file) for r in results] + + def test_create_resource_can_add_child_to_dir(self): + test_codebase = self.get_temp_dir('resource') + codebase = Codebase(test_codebase) + codebase.create_resource('some child', codebase.root, is_file=False) + results = list(codebase.walk()) + expected = [('resource', False), (u'some child', False)] + assert expected == [(r.name, r.is_file) for r in results] + + def test_get_resource(self): + test_codebase = self.get_temp_dir('resource') + codebase = Codebase(test_codebase) + assert codebase.root is codebase.get_resource(0) + + def test_get_path(self): + import os + from commoncode.fileutils import fsdecode + from commoncode.fileutils import fsencode + from commoncode.system import on_linux + + test_dir = self.get_test_loc('resource/samples') + locations = [] + for top, dirs, files in os.walk(test_dir): + for x in dirs: + locations.append(os.path.join(top, x)) + for x in files: + locations.append(os.path.join(top, x)) + transcoder = fsencode if on_linux else fsdecode + locations = [transcoder(p) for p in locations] + root_location = transcoder(test_dir) + + expected_default = [ + u'samples/JGroups', u'samples/zlib', u'samples/arch', + u'samples/README', u'samples/screenshot.png', + u'samples/JGroups/src', u'samples/JGroups/licenses', + u'samples/JGroups/LICENSE', u'samples/JGroups/EULA', + u'samples/JGroups/src/GuardedBy.java', + u'samples/JGroups/src/ImmutableReference.java', + u'samples/JGroups/src/RouterStub.java', + u'samples/JGroups/src/S3_PING.java', + u'samples/JGroups/src/FixedMembershipToken.java', + u'samples/JGroups/src/RouterStubManager.java', + u'samples/JGroups/src/RATE_LIMITER.java', + u'samples/JGroups/licenses/cpl-1.0.txt', + u'samples/JGroups/licenses/bouncycastle.txt', + u'samples/JGroups/licenses/lgpl.txt', + u'samples/JGroups/licenses/apache-2.0.txt', + u'samples/JGroups/licenses/apache-1.1.txt', u'samples/zlib/dotzlib', + u'samples/zlib/iostream2', u'samples/zlib/infback9', + u'samples/zlib/gcc_gvmat64', u'samples/zlib/ada', + u'samples/zlib/deflate.h', u'samples/zlib/zutil.c', + u'samples/zlib/zlib.h', u'samples/zlib/deflate.c', + u'samples/zlib/zutil.h', u'samples/zlib/adler32.c', + u'samples/zlib/dotzlib/AssemblyInfo.cs', + u'samples/zlib/dotzlib/LICENSE_1_0.txt', + u'samples/zlib/dotzlib/readme.txt', + u'samples/zlib/dotzlib/ChecksumImpl.cs', + u'samples/zlib/iostream2/zstream_test.cpp', + u'samples/zlib/iostream2/zstream.h', + u'samples/zlib/infback9/infback9.c', + u'samples/zlib/infback9/infback9.h', + u'samples/zlib/gcc_gvmat64/gvmat64.S', u'samples/zlib/ada/zlib.ads', + u'samples/arch/zlib.tar.gz'] + + default = sorted(get_path(root_location, loc) for loc in locations) + assert sorted(expected_default) == default + + expected_strip_root = [ + u'JGroups', u'zlib', u'arch', u'README', u'screenshot.png', + u'JGroups/src', u'JGroups/licenses', u'JGroups/LICENSE', + u'JGroups/EULA', u'JGroups/src/GuardedBy.java', + u'JGroups/src/ImmutableReference.java', + u'JGroups/src/RouterStub.java', u'JGroups/src/S3_PING.java', + u'JGroups/src/FixedMembershipToken.java', + u'JGroups/src/RouterStubManager.java', + u'JGroups/src/RATE_LIMITER.java', u'JGroups/licenses/cpl-1.0.txt', + u'JGroups/licenses/bouncycastle.txt', u'JGroups/licenses/lgpl.txt', + u'JGroups/licenses/apache-2.0.txt', + u'JGroups/licenses/apache-1.1.txt', u'zlib/dotzlib', + u'zlib/iostream2', u'zlib/infback9', u'zlib/gcc_gvmat64', + u'zlib/ada', u'zlib/deflate.h', u'zlib/zutil.c', u'zlib/zlib.h', + u'zlib/deflate.c', u'zlib/zutil.h', u'zlib/adler32.c', + u'zlib/dotzlib/AssemblyInfo.cs', u'zlib/dotzlib/LICENSE_1_0.txt', + u'zlib/dotzlib/readme.txt', u'zlib/dotzlib/ChecksumImpl.cs', + u'zlib/iostream2/zstream_test.cpp', u'zlib/iostream2/zstream.h', + u'zlib/infback9/infback9.c', u'zlib/infback9/infback9.h', + u'zlib/gcc_gvmat64/gvmat64.S', u'zlib/ada/zlib.ads', + u'arch/zlib.tar.gz'] + + skipped = sorted(get_path(root_location, loc, strip_root=True) for loc in locations) + assert sorted(expected_strip_root) == skipped + + expected_full_ends = sorted(expected_default) + full = sorted(get_path(root_location, loc, full_root=True) for loc in locations) + for full_loc, ending in zip(full, expected_full_ends): + assert full_loc.endswith((ending)) + + full_skipped = sorted(get_path(root_location, loc, full_root=True, strip_root=True) for loc in locations) + assert full == full_skipped + + +class TestCodebaseCache(FileBasedTesting): + test_data_dir = join(dirname(__file__), 'data') + + def test_codebase_cache_default(self): + test_codebase = self.get_test_loc('resource/cache2') + codebase = Codebase(test_codebase) + assert codebase.temp_dir + assert codebase.cache_dir + codebase.cache_dir + root = codebase.root + + cp = codebase._get_resource_cache_location(root.rid, create=False) + assert not exists(cp) + cp = codebase._get_resource_cache_location(root.rid, create=True) + assert not exists(cp) + assert exists(parent_directory(cp)) + + child = codebase.create_resource('child', root, is_file=True) + child.size = 12 + codebase.save_resource(child) + child_2 = codebase.get_resource(child.rid) + assert child == child_2 + + def test_codebase_cache_all_in_memory(self): + test_codebase = self.get_test_loc('resource/cache2') + codebase = Codebase(test_codebase, max_in_memory=0) + for rid in codebase.resource_ids: + if rid == 0: + assert codebase.root == codebase.get_resource(rid) + assert codebase._exists_in_memory(rid) + assert not codebase._exists_on_disk(rid) + else: + assert codebase._exists_in_memory(rid) + assert not codebase._exists_on_disk(rid) + + assert len(codebase.resource_ids) == len(list(codebase.walk())) + + def test_codebase_cache_all_on_disk(self): + test_codebase = self.get_test_loc('resource/cache2') + codebase = Codebase(test_codebase, max_in_memory=-1) + for rid in codebase.resource_ids: + if rid == 0: + assert codebase.root == codebase.get_resource(rid) + assert codebase._exists_in_memory(rid) + assert not codebase._exists_on_disk(rid) + else: + assert not codebase._exists_in_memory(rid) + assert codebase._exists_on_disk(rid) + + assert len(codebase.resource_ids) == len(list(codebase.walk())) + + def test_codebase_cache_mixed_two_in_memory(self): + test_codebase = self.get_test_loc('resource/cache2') + codebase = Codebase(test_codebase, max_in_memory=2) + for rid in codebase.resource_ids: + if rid == 0: + assert codebase.root == codebase.get_resource(rid) + assert codebase._exists_in_memory(rid) + assert not codebase._exists_on_disk(rid) + elif rid < 2: + assert codebase._exists_in_memory(rid) + assert not codebase._exists_on_disk(rid) + else: + assert not codebase._exists_in_memory(rid) + assert codebase._exists_on_disk(rid) + + assert len(codebase.resource_ids) == len(list(codebase.walk())) \ No newline at end of file diff --git a/tests/scancode/test_scan_help_groups.py b/tests/scancode/test_scan_help_groups.py deleted file mode 100644 index 6485b9652b9..00000000000 --- a/tests/scancode/test_scan_help_groups.py +++ /dev/null @@ -1,78 +0,0 @@ -# -# Copyright (c) 2017 nexB Inc. and others. All rights reserved. -# http://nexb.com and https://github.com/nexB/scancode-toolkit/ -# The ScanCode software is licensed under the Apache License version 2.0. -# Data generated with ScanCode require an acknowledgment. -# ScanCode is a trademark of nexB Inc. -# -# You may not use this software except in compliance with the License. -# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# -# When you publish or redistribute any data created with ScanCode or any ScanCode -# derivative work, you must accompany this data with the following acknowledgment: -# -# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES -# OR CONDITIONS OF ANY KIND, either express or implied. No content created from -# ScanCode should be considered or used as legal advice. Consult an Attorney -# for any legal advice. -# ScanCode is a free software code scanning tool from nexB Inc. and others. -# Visit https://github.com/nexB/scancode-toolkit/ for support and download. - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import unicode_literals - -from os import path - -import click -click.disable_unicode_literals_warning = True -from click.testing import CliRunner - -from commoncode.testcase import FileBasedTesting -from scancode.cli import ScanCommand -from scancode.cli import ScanOption -from scancode.cli_test_utils import run_scan_click - - -class TestHelpGroups(FileBasedTesting): - - test_data_dir = path.join(path.dirname(__file__), 'data') - - def test_scan_help_without_custom_class(self): - @click.command(name='scan', cls=ScanCommand) - @click.option('--opt', is_flag=True, help='Help text for option') - def scan(opt): - pass - - runner = CliRunner() - result = runner.invoke(scan, ['--help']) - assert 'misc:\n --opt Help text for option\n' in result.output - - def test_scan_help_with_custom_class(self): - @click.command(name='scan', cls=ScanCommand) - @click.option('--opt', is_flag=True, help='Help text for option', cls=ScanOption) - def scan(opt): - pass - - runner = CliRunner() - result = runner.invoke(scan, ['--help']) - assert 'misc:\n --opt Help text for option\n' in result.output - - def test_scan_help_with_group(self): - @click.command(name='scan', cls=ScanCommand) - @click.option('--opt', is_flag=True, help='Help text for option', group='core', cls=ScanOption) - def scan(opt): - pass - - runner = CliRunner() - result = runner.invoke(scan, ['--help']) - assert 'core:\n --opt Help text for option\n' in result.output - - def test_scan_cli_help(self): - expected_file = self.get_test_loc('help/help.txt') - result = run_scan_click(['--help']) - assert open(expected_file).read() == result.output diff --git a/tests/scancode/test_scan_utils.py b/tests/scancode/test_scan_utils.py index 2ae75ec962f..4fc08206587 100644 --- a/tests/scancode/test_scan_utils.py +++ b/tests/scancode/test_scan_utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -23,25 +23,29 @@ # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import -from __future__ import print_function from __future__ import division +from __future__ import print_function from __future__ import unicode_literals import os import click -from click.testing import CliRunner -from click.termui import progressbar +click.disable_unicode_literals_warning = True -from commoncode.testcase import FileBasedTesting +from click.termui import progressbar +from click.testing import CliRunner -from scancode import utils +from commoncode.testcase import FileDrivenTesting +from scancode import CommandLineOption +from scancode.cli import ScanCommand +from scancode.utils import fixed_width_file_name -class TestUtils(FileBasedTesting): +class TestUtils(FileDrivenTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_click_progressbar_with_labels(self): + # test related to https://github.com/mitsuhiko/click/issues/406 @click.command() def mycli(): @@ -61,13 +65,105 @@ def mycli(): ''' assert expected == result.output - def test_get_relative_path(self): - # plain file without parent - assert 'file' == utils.get_relative_path(path='/file', len_base_path=5, base_is_dir=False) - # plain file in a deep path - assert 'that' == utils.get_relative_path(path='/this/file/that', len_base_path=5, base_is_dir=False) - # plain path with directories - assert 'file/that' == utils.get_relative_path(path='/this/file/that', len_base_path=5, base_is_dir=True) - assert 'that' == utils.get_relative_path(path='/this/file/that', len_base_path=10, base_is_dir=True) - assert 'this/file/that' == utils.get_relative_path(path='/foo//this/file/that', len_base_path=4, base_is_dir=True) +class TestFixedWidthFilename(FileDrivenTesting): + + def test_fixed_width_file_name_with_file_name_larger_than_max_length_is_shortened(self): + test = fixed_width_file_name('0123456789012345678901234.c', 25) + expected = '0123456789...5678901234.c' + assert expected == test + + def test_fixed_width_file_name_with_file_name_smaller_than_max_length_is_not_shortened(self): + file_name = '0123456789012345678901234.c' + test = fixed_width_file_name(file_name, max_length=50) + assert file_name == test + + def test_fixed_width_file_name_with_file_name_at_max_length_is_not_shortened(self): + test = fixed_width_file_name('01234567890123456789012.c', 25) + expected = '01234567890123456789012.c' + assert expected == test + + def test_fixed_width_file_name_with_file_name_smaller_than_max_length_not_shortened(self): + test = fixed_width_file_name('0123456789012345678901.c', 25) + expected = '0123456789012345678901.c' + assert expected == test + + def test_fixed_width_file_name_with_none_filename_return_empty_string(self): + test = fixed_width_file_name(None, 25) + expected = '' + assert expected == test + + def test_fixed_width_file_name_without_extension(self): + test = fixed_width_file_name('012345678901234567890123456', 25) + expected = '01234567890...67890123456' + assert expected == test + + def test_fixed_width_file_name_with_posix_path_without_shortening(self): + test = fixed_width_file_name('C/Documents_and_Settings/Boki/Desktop/head/patches/drupal6/drupal.js', 25) + expected = 'drupal.js' + assert expected == test + + def test_fixed_width_file_name_with_posix_path_with_shortening(self): + test = fixed_width_file_name('C/Documents_and_Settings/Boki/Desktop/head/patches/drupal6/012345678901234567890123.c', 25) + expected = '0123456789...4567890123.c' + assert expected == test + + def test_fixed_width_file_name_with_win_path_without_shortening(self): + test = fixed_width_file_name('C\\:Documents_and_Settings\\Boki\\Desktop\\head\\patches\\drupal6\\drupal.js', 25) + expected = 'drupal.js' + assert expected == test + + def test_fixed_width_file_name_with_win_path_with_shortening(self): + test = fixed_width_file_name('C\\:Documents_and_Settings\\Boki\\Desktop\\head\\patches\\drupal6\\012345678901234567890123.c', 25) + expected = '0123456789...4567890123.c' + assert expected == test + + def test_fixed_width_file_name_with_very_small_file_name_and_long_extension(self): + test = fixed_width_file_name('abc.abcdef', 5) + # FIXME: what is expected is TBD + expected = '' + assert expected == test + + +class TestHelpGroups(FileDrivenTesting): + + test_data_dir = os.path.join(os.path.dirname(__file__), 'data') + + def test_scan_help_group_and_sort_order_without_custom_class(self): + + @click.command(name='scan', cls=ScanCommand) + @click.option('--opt', is_flag=True, help='Help text for option') + def scan(opt): + pass + + runner = CliRunner() + result = runner.invoke(scan, ['--help']) + from scancode import MISC_GROUP + assert MISC_GROUP in result.output + assert '--opt Help text for option' in result.output + + def test_scan_help_group_and_sort_order_with_custom_class(self): + + @click.command(name='scan', cls=ScanCommand) + @click.option('--opt', is_flag=True, sort_order=10, + help='Help text for option', cls=CommandLineOption) + def scan(opt): + pass + + runner = CliRunner() + result = runner.invoke(scan, ['--help']) + from scancode import MISC_GROUP + assert MISC_GROUP + ':\n --opt Help text for option\n' in result.output + + def test_scan_help_with_group(self): + from scancode import CORE_GROUP + + @click.command(name='scan', cls=ScanCommand) + @click.option('--opt', is_flag=True, help='Help text for option', + help_group=CORE_GROUP, cls=CommandLineOption) + def scan(opt): + pass + + runner = CliRunner() + result = runner.invoke(scan, ['--help']) + assert CORE_GROUP + ':\n --opt Help text for option\n' in result.output diff --git a/tests/textcode/test_analysis.py b/tests/textcode/test_analysis.py index 85591c27379..d1f3d036c3d 100644 --- a/tests/textcode/test_analysis.py +++ b/tests/textcode/test_analysis.py @@ -33,7 +33,7 @@ from textcode.analysis import unicode_text_lines from textcode.analysis import text_lines -from commoncode.fileutils import file_iter +from commoncode.fileutils import resource_iter class TestAnalysis(FileBasedTesting): @@ -68,13 +68,13 @@ def test_archives_do_not_yield_text_lines(self): def test_some_media_do_yield_text_lines(self): test_dir = self.get_test_loc('media_with_text') - for test_file in file_iter(test_dir): + for test_file in resource_iter(test_dir, with_dirs=False): result = list(text_lines(test_file)) assert result, 'Should return text lines:' + test_file assert any('nexb' in l for l in result) def test_some_media_do_not_yield_text_lines(self): test_dir = self.get_test_loc('media_without_text') - for test_file in file_iter(test_dir): + for test_file in resource_iter(test_dir, with_dirs=False): result = list(text_lines(test_file)) assert [] == result, 'Should not return text lines:' + test_file diff --git a/tests/textcode/test_pdf.py b/tests/textcode/test_pdf.py index d06895dcf01..2d22392a3af 100644 --- a/tests/textcode/test_pdf.py +++ b/tests/textcode/test_pdf.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015 nexB Inc. and others. All rights reserved. +# Copyright (c) 2018 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. @@ -22,13 +22,15 @@ # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. -from __future__ import absolute_import, print_function +from __future__ import absolute_import +from __future__ import print_function +import os from commoncode.testcase import FileBasedTesting - from textcode import pdf -import os + + class TestPdf(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') diff --git a/tests/typecode/data/contenttype/code/python/__init__.py b/tests/typecode/data/contenttype/code/python/__init__.py index 2e2033b3c05..cc085fdc029 100644 --- a/tests/typecode/data/contenttype/code/python/__init__.py +++ b/tests/typecode/data/contenttype/code/python/__init__.py @@ -1,4 +1,5 @@ # this is a namespace package +# flake8: noqa try: import pkg_resources pkg_resources.declare_namespace(__name__) diff --git a/tests/typecode/data/contenttype/code/python/contenttype.py b/tests/typecode/data/contenttype/code/python/contenttype.py index ad4e0aed0c7..fe48a8c5a58 100644 --- a/tests/typecode/data/contenttype/code/python/contenttype.py +++ b/tests/typecode/data/contenttype/code/python/contenttype.py @@ -1,4 +1,5 @@ # +# flake8: noqa import unittest from os.path import join diff --git a/tests/typecode/data/contenttype/code/python/extract.py b/tests/typecode/data/contenttype/code/python/extract.py index d2b9e48784c..9a488abb8b1 100644 --- a/tests/typecode/data/contenttype/code/python/extract.py +++ b/tests/typecode/data/contenttype/code/python/extract.py @@ -1,4 +1,5 @@ +# flake8: noqa import os from os.path import join, dirname, basename, exists diff --git a/thirdparty/prod/attrs-16.3.0-py2.py3-none-any.whl b/thirdparty/prod/attrs-16.3.0-py2.py3-none-any.whl deleted file mode 100644 index 6124236be40..00000000000 Binary files a/thirdparty/prod/attrs-16.3.0-py2.py3-none-any.whl and /dev/null differ diff --git a/thirdparty/prod/attrs-17.4.0-py2.py3-none-any.whl b/thirdparty/prod/attrs-17.4.0-py2.py3-none-any.whl new file mode 100644 index 00000000000..54a2652c9db Binary files /dev/null and b/thirdparty/prod/attrs-17.4.0-py2.py3-none-any.whl differ diff --git a/thirdparty/prod/attrs.ABOUT b/thirdparty/prod/attrs.ABOUT index dca5af0e243..b2cdd3b73a3 100644 --- a/thirdparty/prod/attrs.ABOUT +++ b/thirdparty/prod/attrs.ABOUT @@ -1,8 +1,8 @@ -about_resource: attrs-16.3.0-py2.py3-none-any.whl -version: 16.3.0 +about_resource: attrs-17.4.0-py2.py3-none-any.whl +version: 17.4.0 name: attrs home_url: http://attrs.org -download_url: https://pypi.python.org/packages/bb/6c/730710c765ab6d4493f460196ab003671d27b38568412a780fc67532b47c/attrs-16.3.0-py2.py3-none-any.whl#md5=0d188abbbde8c83253cb11e8df890d30 +download_url: https://pypi.python.org/packages/b5/60/4e178c1e790fd60f1229a9b3cb2f8bc2f4cc6ff2c8838054c142c70b5adc/attrs-17.4.0-py2.py3-none-any.whl#md5=5835a573b3f0316e1602dac3fd9c1daf license_text_file: attrs.LICENSE dje_license: mit -copyright: Copyright (c) 2015 Hynek Schlawack \ No newline at end of file +copyright: Copyright (c) Hynek Schlawack \ No newline at end of file diff --git a/thirdparty/prod/backports.functools_lru_cache-1.4-py2.py3-none-any.whl b/thirdparty/prod/backports.functools_lru_cache-1.4-py2.py3-none-any.whl new file mode 100644 index 00000000000..12e6f65d056 Binary files /dev/null and b/thirdparty/prod/backports.functools_lru_cache-1.4-py2.py3-none-any.whl differ diff --git a/thirdparty/prod/backports.functools_lru_cache.ABOUT b/thirdparty/prod/backports.functools_lru_cache.ABOUT new file mode 100644 index 00000000000..83a5793c0a3 --- /dev/null +++ b/thirdparty/prod/backports.functools_lru_cache.ABOUT @@ -0,0 +1,11 @@ +about_resource: backports.functools_lru_cache-1.4-py2.py3-none-any.whl +version: 1.4 +name: backports.functools_lru_cache +home_url: https://github.com/jaraco/backports.functools_lru_cache +download_url: https://pypi.python.org/packages/02/0b/91573feec859f794689fa46a62240526f4f1db829271ac2d98cf04a8efa2/backports.functools_lru_cache-1.4-py2.py3-none-any.whl#md5=6496f96517b2b0d22175e9f193664bc3 +license_text: + - backports.functools_lru_cache.LICENSE +dje_license: mit +owner: Jason R. Coombs + +copyright: Copyright Jason R. Coombs and Raymond Hettinger diff --git a/thirdparty/prod/backports.functools_lru_cache.LICENSE b/thirdparty/prod/backports.functools_lru_cache.LICENSE new file mode 100644 index 00000000000..625704c829f --- /dev/null +++ b/thirdparty/prod/backports.functools_lru_cache.LICENSE @@ -0,0 +1,18 @@ +Copyright Jason R. Coombs and Raymond Hettinger + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/thirdparty/prod/typing-3.6.2-py2-none-any.whl b/thirdparty/prod/typing-3.6.2-py2-none-any.whl new file mode 100644 index 00000000000..fa13a535f74 Binary files /dev/null and b/thirdparty/prod/typing-3.6.2-py2-none-any.whl differ diff --git a/thirdparty/prod/typing.ABOUT b/thirdparty/prod/typing.ABOUT new file mode 100644 index 00000000000..7d10b8cbe39 --- /dev/null +++ b/thirdparty/prod/typing.ABOUT @@ -0,0 +1,9 @@ +about_resource: typing-3.6.2-py2-none-any.whl +version: 3.6.2 +name: typing +home_url: https://docs.python.org/3/library/typing.html +download_url: https://pypi.python.org/packages/1c/15/aeaae0c01afa895ad774cfd408eca17818fd753817d433f55385d8e36364/typing-3.6.2-py2-none-any.whl#md5=c587eaddadb5294a00ca9616336c85ee +license_text_file: typing.LICENSE +dje_license: python +copyright: Copyright (c) Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Ivan Levkivskyi +onwer: Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Ivan Levkivskyi \ No newline at end of file diff --git a/thirdparty/prod/typing.LICENSE b/thirdparty/prod/typing.LICENSE new file mode 100644 index 00000000000..c073a7b1fd7 --- /dev/null +++ b/thirdparty/prod/typing.LICENSE @@ -0,0 +1,254 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are +retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file