From fb5ec2ff9b9e7cfd9ec80f26e3ad3c688316c731 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Thu, 17 Nov 2022 16:16:52 +0100 Subject: [PATCH 01/12] feat(core): scoring data customizable added with config file and CLI parameter scoring threshold not reached returns exit code 1, other returns 0 scoring data tests and readme documentation added --- docs/guides/2-cli.md | 87 ++++++++++ .../cli/src/commands/__tests__/lint.test.ts | 20 ++- packages/cli/src/commands/lint.ts | 47 +++++- packages/cli/src/formatters/json.ts | 33 +++- packages/cli/src/formatters/pretty.ts | 50 +++++- packages/cli/src/formatters/stylish.ts | 51 +++++- packages/cli/src/formatters/types.ts | 20 +++ .../formatters/utils/getCountsBySeverity.ts | 38 +++++ .../cli/src/formatters/utils/getScoring.ts | 68 ++++++++ packages/cli/src/formatters/utils/index.ts | 3 + .../cli/src/formatters/utils/uniqueErrors.ts | 16 ++ .../__fixtures__/scoring-config.json | 32 ++++ .../cli/src/services/__tests__/linter.test.ts | 17 ++ .../cli/src/services/__tests__/output.test.ts | 46 ++++++ packages/cli/src/services/config.ts | 1 + packages/core/src/ruleset/index.ts | 2 +- packages/core/src/ruleset/utils/severity.ts | 2 +- ...sults-default-format-scoring-json.scenario | 148 ++++++++++++++++++ .../formats/results-default-scoring.scenario | 95 +++++++++++ .../results-format-stylish-scoring.scenario | 96 ++++++++++++ .../formats/too-few-outputs.scenario | 1 + .../formats/too-many-outputs.scenario | 1 + .../formats/unmatched-outputs.scenario | 1 + .../scenarios/help-no-document.scenario | 1 + .../overrides/aliases-scoring.scenario | 133 ++++++++++++++++ .../fail-on-error-no-error-scoring.scenario | 64 ++++++++ .../severity/fail-on-error-scoring.scenario | 78 +++++++++ .../scenarios/strict-options.scenario | 1 + .../valid-no-errors.oas2-scoring.scenario | 57 +++++++ .../scenarios/valid-no-errors.oas2.scenario | 2 +- 30 files changed, 1191 insertions(+), 20 deletions(-) create mode 100644 packages/cli/src/formatters/utils/getCountsBySeverity.ts create mode 100644 packages/cli/src/formatters/utils/getScoring.ts create mode 100644 packages/cli/src/formatters/utils/uniqueErrors.ts create mode 100644 packages/cli/src/services/__tests__/__fixtures__/scoring-config.json create mode 100644 test-harness/scenarios/formats/results-default-format-scoring-json.scenario create mode 100644 test-harness/scenarios/formats/results-default-scoring.scenario create mode 100644 test-harness/scenarios/formats/results-format-stylish-scoring.scenario create mode 100644 test-harness/scenarios/overrides/aliases-scoring.scenario create mode 100644 test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario create mode 100644 test-harness/scenarios/severity/fail-on-error-scoring.scenario create mode 100644 test-harness/scenarios/valid-no-errors.oas2-scoring.scenario diff --git a/docs/guides/2-cli.md b/docs/guides/2-cli.md index e4707dd3e..a02c0be96 100644 --- a/docs/guides/2-cli.md +++ b/docs/guides/2-cli.md @@ -39,6 +39,7 @@ Other options include: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] + -s, --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] @@ -60,6 +61,92 @@ Here you can build a [custom ruleset](../getting-started/3-rulesets.md), or exte - [OpenAPI ruleset](../reference/openapi-rules.md) - [AsyncAPI ruleset](../reference/asyncapi-rules.md) +## Scoring the API + +Scoring an API definition is a way to understand in a high level, how compliant is the API definition with the rulesets provided. This helps teams to understand the quality of the APIs regarding the definition. + +The scoring is produced in two different metrics: + +- A number scoring. Who cames as substracting from 100% from any error or warning +- A letter, who groups numeric scorings in letters from A (better) to any + +Also it introduces a quality gate, were an API scoring below the specific threshold will fail in a pipeline. + +Enabling scoring is done using a new parameter called --scoring-config or -s and the scoring configuration file, where you can define how an error or a warning affects to the scoring + +Usage: + +```bash + spectral lint ./reference/**/*.oas*.{json,yml,yaml} --ruleset mycustomruleset.js --scoring-config ./scoringFile.json +``` + +or + +```bash +spectral lint ./reference/**/*.oas*.{json,yml,yaml} -r mycustomruleset.js -s ./scoringFile.json +``` + +Heres an example of this scoringFile config file: + +``` + { + "scoringSubtract": + { + "error": + { + 1:55, + 2:65, + 3:75, + 6:85, + 10:95 + } + "warn": + { + 1:3, + 2:7, + 3:10, + 6:15, + 10:18 + } + }, + "scoringLetter": + { + "A":75, + "B":65, + "C":55, + "D":45, + "E":0 + }, + "threshold":50, + "warningsSubtract": true, + "uniqueErrors": false + } +``` + +Where: + +- scoringSubtract : An object with a key/value pair objects for every result level we want to subtract percentage, with the percentage to subtract from number of results on every result type +- scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater , for this letter +- threshold : A number with minimum percentage value to provide valid the file we are checking +- warningsSubtract : A boolean to setup if accumulate the result types to less the scoring percentage or stop counting on most critical result types +- uniqueErrors : A boolean to setup a count with unique errors or with all of them + +Example: + + With previous scoring config file, if we have: + + 1 error, the scoring is 45% and D + 2 errors, the scoring is 35% and E + 3 errors, the scoring is 25% and E + 4 errors, the scoring is 25% and E + and so on + +Output: + + Below your output log you can see the scoring, like: + + ✖ SCORING: A (93%) + ## Error Results Spectral has a few different error severities: `error`, `warn`, `info`, and `hint`, and they're in order from highest to lowest. By default, all results are shown regardless of severity, but since v5.0, only the presence of errors causes a failure status code of 1. Seeing results and getting a failure code for it are now two different things. diff --git a/packages/cli/src/commands/__tests__/lint.test.ts b/packages/cli/src/commands/__tests__/lint.test.ts index eb98e7050..890e8ea93 100644 --- a/packages/cli/src/commands/__tests__/lint.test.ts +++ b/packages/cli/src/commands/__tests__/lint.test.ts @@ -146,6 +146,22 @@ describe('lint', () => { ); }); + it('calls lint with document, ruleset and scoring config file', async () => { + const doc = './__fixtures__/empty-oas2-document.json'; + const ruleset = 'custom-ruleset.json'; + const configFile = 'scoring-config.json'; + await run(`lint -r ${ruleset} -s ${configFile} ${doc}`); + expect(lint).toBeCalledWith([doc], { + encoding: 'utf8', + format: ['stylish'], + output: { stylish: '' }, + ruleset: 'custom-ruleset.json', + stdinFilepath: undefined, + ignoreUnknownFormat: false, + failOnUnmatchedGlobs: false, + }); + }); + it.each(['json', 'stylish'])('calls formatOutput with %s format', async format => { await run(`lint -f ${format} ./__fixtures__/empty-oas2-document.json`); expect(formatOutput).toBeCalledWith(results, format, { failSeverity: DiagnosticSeverity.Error }); @@ -244,13 +260,13 @@ describe('lint', () => { expect(process.stderr.write).nthCalledWith(2, `Error #1: ${chalk.red('some unhandled exception')}\n`); expect(process.stderr.write).nthCalledWith( 3, - expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:236`), + expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:252`), ); expect(process.stderr.write).nthCalledWith(4, `Error #2: ${chalk.red('another one')}\n`); expect(process.stderr.write).nthCalledWith( 5, - expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:237`), + expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:253`), ); expect(process.stderr.write).nthCalledWith(6, `Error #3: ${chalk.red('original exception')}\n`); diff --git a/packages/cli/src/commands/lint.ts b/packages/cli/src/commands/lint.ts index 7f4521960..d98f107da 100644 --- a/packages/cli/src/commands/lint.ts +++ b/packages/cli/src/commands/lint.ts @@ -14,6 +14,14 @@ import { formatOutput, writeOutput } from '../services/output'; import { FailSeverity, ILintConfig, OutputFormat } from '../services/config'; import { CLIError } from '../errors'; +import { ScoringConfig } from './../formatters/types'; +import { + getScoringConfig, + getScoringLevel, + groupBySource, + getCountsBySeverity, + uniqueErrors, +} from '../formatters//utils'; const formatOptions = Object.values(OutputFormat); @@ -127,6 +135,11 @@ const lintCommand: CommandModule = { description: 'path/URL to a ruleset file', type: 'string', }, + 'scoring-config': { + alias: 's', + description: 'path/URL to a scoring config file', + type: 'string', + }, 'fail-severity': { alias: 'F', description: 'results of this level or above will trigger a failure exit code', @@ -168,6 +181,7 @@ const lintCommand: CommandModule = { failSeverity, displayOnlyFailures, ruleset, + scoringConfig, stdinFilepath, format, output, @@ -197,20 +211,30 @@ const lintCommand: CommandModule = { results = filterResultsBySeverity(results, failSeverity); } + const scoringConfigData = getScoringConfig(scoringConfig); + await Promise.all( format.map(f => { - const formattedOutput = formatOutput(results, f, { failSeverity: getDiagnosticSeverity(failSeverity) }); + const formattedOutput = formatOutput(results, f, { + failSeverity: getDiagnosticSeverity(failSeverity), + scoringConfig: scoringConfigData, + }); return writeOutput(formattedOutput, output?.[f] ?? ''); }), ); if (results.length > 0) { - process.exit(severeEnoughToFail(results, failSeverity) ? 1 : 0); + process.exit( + scoringThresholdNotEnough(results, scoringConfigData) ? 1 : severeEnoughToFail(results, failSeverity) ? 1 : 0, + ); } else if (config.quiet !== true) { const isErrorSeverity = getDiagnosticSeverity(failSeverity) === DiagnosticSeverity.Error; process.stdout.write( `No results with a severity of '${failSeverity}' ${isErrorSeverity ? '' : 'or higher '}found!\n`, ); + if (scoringConfig !== void 0) { + process.stdout.write(`SCORING: (100%)\nPASSED!`); + } } } catch (ex) { fail(isError(ex) ? ex : new Error(String(ex)), config.verbose === true); @@ -273,6 +297,25 @@ const filterResultsBySeverity = (results: IRuleResult[], failSeverity: FailSever return results.filter(r => r.severity <= diagnosticSeverity); }; +const scoringThresholdNotEnough = (results: IRuleResult[], scoringConfig: ScoringConfig | undefined): boolean => { + if (scoringConfig !== void 0) { + const groupedResults = groupBySource(results); + let groupedUniqueResults = { ...groupedResults }; + if (scoringConfig.uniqueErrors) { + groupedUniqueResults = { ...groupBySource(uniqueErrors(results)) }; + } + return ( + scoringConfig.threshold > + getScoringLevel( + getCountsBySeverity(groupedUniqueResults), + scoringConfig.scoringSubtract, + scoringConfig.warningsSubtract, + ) + ); + } + return false; +}; + export const severeEnoughToFail = (results: IRuleResult[], failSeverity: FailSeverity): boolean => { const diagnosticSeverity = getDiagnosticSeverity(failSeverity); return results.some(r => r.severity <= diagnosticSeverity); diff --git a/packages/cli/src/formatters/json.ts b/packages/cli/src/formatters/json.ts index 4ff9fbce9..9eedbd64e 100644 --- a/packages/cli/src/formatters/json.ts +++ b/packages/cli/src/formatters/json.ts @@ -1,6 +1,21 @@ -import { Formatter } from './types'; +import { ISpectralDiagnostic } from '@stoplight/spectral-core'; +import { Formatter, FormatterOptions } from './types'; -export const json: Formatter = results => { +import { groupBySource, uniqueErrors, getCountsBySeverity, getScoringText } from './utils'; + +const version = process.env.npm_package_version; + +export const json: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { + let spectralVersion = ''; + let groupedResults; + let scoringText = ''; + if (options.scoringConfig !== void 0) { + if (options.scoringConfig.customScoring !== undefined) { + spectralVersion = `${options.scoringConfig.customScoring} ${version as string}`; + } + groupedResults = groupBySource(uniqueErrors(results)); + scoringText = getScoringText(getCountsBySeverity(groupedResults), options.scoringConfig); + } const outputJson = results.map(result => { return { code: result.code, @@ -11,5 +26,17 @@ export const json: Formatter = results => { source: result.source, }; }); - return JSON.stringify(outputJson, null, '\t'); + let objectOutput; + if (options.scoringConfig !== void 0) { + const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0); + objectOutput = { + version: spectralVersion, + scoring: scoringText.replace('SCORING:', '').trim(), + passed: scoring >= options.scoringConfig.threshold, + results: outputJson, + }; + } else { + objectOutput = outputJson; + } + return JSON.stringify(objectOutput, null, '\t'); }; diff --git a/packages/cli/src/formatters/pretty.ts b/packages/cli/src/formatters/pretty.ts index 3d1a40403..3d99a858d 100644 --- a/packages/cli/src/formatters/pretty.ts +++ b/packages/cli/src/formatters/pretty.ts @@ -24,12 +24,24 @@ * @author Ava Thorn */ +import { ISpectralDiagnostic } from '@stoplight/spectral-core'; import { printPath, PrintStyle } from '@stoplight/spectral-runtime'; -import { IDiagnostic, IRange } from '@stoplight/types'; +import { IDiagnostic, IRange, DiagnosticSeverity } from '@stoplight/types'; import chalk from 'chalk'; -import { Formatter } from './types'; -import { getColorForSeverity, getHighestSeverity, getSummary, getSeverityName, groupBySource } from './utils'; +import { Formatter, FormatterOptions } from './types'; +import { + getColorForSeverity, + getHighestSeverity, + getSummary, + getSeverityName, + groupBySource, + getScoringText, + getCountsBySeverity, + uniqueErrors, +} from './utils'; + +const { version } = require('../../package.json'); function formatRange(range?: IRange): string { if (range === void 0) return ''; @@ -37,9 +49,15 @@ function formatRange(range?: IRange): string { return ` ${range.start.line + 1}:${range.start.character + 1}`; } -export const pretty: Formatter = results => { +export const pretty: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { const cliui = require('cliui'); let output = '\n'; + if (options.scoringConfig !== void 0) { + if (options.scoringConfig.customScoring !== void 0) { + output += `${options.scoringConfig.customScoring}${version as string}\n`; + } + } + output += '\n'; const DEFAULT_TOTAL_WIDTH = process.stdout.columns; const COLUMNS = [10, 13, 25, 20, 20]; const variableColumns = DEFAULT_TOTAL_WIDTH - COLUMNS.reduce((a, b) => a + b); @@ -50,10 +68,23 @@ export const pretty: Formatter = results => { const PAD_TOP1_LEFT0 = [1, 0, 0, 0]; const ui = cliui({ width: DEFAULT_TOTAL_WIDTH, wrap: true }); + const uniqueResults = uniqueErrors(results); const groupedResults = groupBySource(results); - const summaryColor = getColorForSeverity(getHighestSeverity(results)); + const summaryColor = getColorForSeverity(getHighestSeverity(uniqueResults)); const summaryText = getSummary(groupedResults); + let groupedUniqueResults = { ...groupedResults }; + let scoringColor = ''; + let scoringText = null; + + if (options.scoringConfig !== void 0) { + if (options.scoringConfig.uniqueErrors) { + groupedUniqueResults = { ...groupBySource(uniqueResults) }; + } + scoringColor = getColorForSeverity(DiagnosticSeverity.Information); + scoringText = getScoringText(getCountsBySeverity(groupedUniqueResults), options.scoringConfig); + } + const uniqueIssues: IDiagnostic['code'][] = []; Object.keys(groupedResults).forEach(i => { const pathResults = groupedResults[i]; @@ -83,6 +114,15 @@ export const pretty: Formatter = results => { output += ui.toString(); output += chalk[summaryColor].bold(`${uniqueIssues.length} Unique Issue(s)\n`); output += chalk[summaryColor].bold(`\u2716${summaryText !== null ? ` ${summaryText}` : ''}\n`); + if (options.scoringConfig !== void 0) { + output += chalk[scoringColor].bold(`\u2716${scoringText !== null ? ` ${scoringText}` : ''}\n`); + const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0); + if (scoring >= options.scoringConfig.threshold) { + output += chalk['green'].bold(`\u2716 PASSED!\n`); + } else { + output += chalk['red'].bold(`\u2716 NOT PASSED!\n`); + } + } return output; }; diff --git a/packages/cli/src/formatters/stylish.ts b/packages/cli/src/formatters/stylish.ts index 7f0aecf34..96ac6acc4 100644 --- a/packages/cli/src/formatters/stylish.ts +++ b/packages/cli/src/formatters/stylish.ts @@ -24,15 +24,28 @@ * @author Sindre Sorhus */ -import type { DiagnosticSeverity, IRange } from '@stoplight/types'; +import { ISpectralDiagnostic } from '@stoplight/spectral-core'; +import type { IRange } from '@stoplight/types'; +import { DiagnosticSeverity } from '@stoplight/types'; import chalk from 'chalk'; import stripAnsi = require('strip-ansi'); import table from 'text-table'; import { printPath, PrintStyle } from '@stoplight/spectral-runtime'; import type { IRuleResult } from '@stoplight/spectral-core'; -import type { Formatter } from './types'; -import { getColorForSeverity, getHighestSeverity, getSeverityName, getSummary, groupBySource } from './utils'; +import type { Formatter, FormatterOptions } from './types'; +import { + getColorForSeverity, + getHighestSeverity, + getSummary, + getSeverityName, + groupBySource, + getScoringText, + getCountsBySeverity, + uniqueErrors, +} from './utils'; + +const version = process.env.npm_package_version; // ----------------------------------------------------------------------------- // Helpers @@ -55,12 +68,31 @@ function getMessageType(severity: DiagnosticSeverity): string { // Public Interface // ----------------------------------------------------------------------------- -export const stylish: Formatter = results => { +export const stylish: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { let output = '\n'; + if (options.scoringConfig !== void 0) { + if (options.scoringConfig.customScoring !== void 0) { + output += `${options.scoringConfig.customScoring}${version as string}\n`; + } + } + output += '\n'; + const uniqueResults = uniqueErrors(results); const groupedResults = groupBySource(results); - const summaryColor = getColorForSeverity(getHighestSeverity(results)); + const summaryColor = getColorForSeverity(getHighestSeverity(uniqueResults)); const summaryText = getSummary(groupedResults); + let groupedUniqueResults = { ...groupedResults }; + let scoringColor = ''; + let scoringText = null; + + if (options.scoringConfig !== void 0) { + if (options.scoringConfig.uniqueErrors) { + groupedUniqueResults = { ...groupBySource(uniqueResults) }; + } + scoringColor = getColorForSeverity(DiagnosticSeverity.Information); + scoringText = getScoringText(getCountsBySeverity(groupedUniqueResults), options.scoringConfig); + } + Object.keys(groupedResults).map(path => { const pathResults = groupedResults[path]; @@ -92,6 +124,15 @@ export const stylish: Formatter = results => { } output += chalk[summaryColor].bold(`\u2716 ${summaryText}\n`); + if (options.scoringConfig !== void 0) { + output += chalk[scoringColor].bold(`\u2716${scoringText !== null ? ` ${scoringText}` : ''}\n`); + const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0); + if (scoring >= options.scoringConfig.threshold) { + output += chalk['green'].bold(`\u2716 PASSED!\n`); + } else { + output += chalk['red'].bold(`\u2716 NOT PASSED!\n`); + } + } return output; }; diff --git a/packages/cli/src/formatters/types.ts b/packages/cli/src/formatters/types.ts index 80607838e..bed4d1000 100644 --- a/packages/cli/src/formatters/types.ts +++ b/packages/cli/src/formatters/types.ts @@ -1,8 +1,28 @@ import { ISpectralDiagnostic } from '@stoplight/spectral-core'; +import type { HumanReadableDiagnosticSeverity } from '@stoplight/spectral-core'; import type { DiagnosticSeverity } from '@stoplight/types'; +export type ScoringTable = { + [key in HumanReadableDiagnosticSeverity]: ScoringSubtract[]; +}; +export interface ScoringSubtract { + [key: number]: number; +} +export interface ScoringLevel { + [key: string]: number; +} +export type ScoringConfig = { + customScoring?: string; + scoringSubtract: ScoringTable[]; + scoringLetter: ScoringLevel[]; + threshold: number; + warningsSubtract: boolean; + uniqueErrors: boolean; +}; + export type FormatterOptions = { failSeverity: DiagnosticSeverity; + scoringConfig?: ScoringConfig; }; export type Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => string; diff --git a/packages/cli/src/formatters/utils/getCountsBySeverity.ts b/packages/cli/src/formatters/utils/getCountsBySeverity.ts new file mode 100644 index 000000000..ff1dc144b --- /dev/null +++ b/packages/cli/src/formatters/utils/getCountsBySeverity.ts @@ -0,0 +1,38 @@ +import { IRuleResult } from '@stoplight/spectral-core'; +import { DiagnosticSeverity, Dictionary } from '@stoplight/types'; +import { groupBySeverity } from './groupBySeverity'; + +export const getCountsBySeverity = ( + groupedResults: Dictionary, +): { + [DiagnosticSeverity.Error]: number; + [DiagnosticSeverity.Warning]: number; + [DiagnosticSeverity.Information]: number; + [DiagnosticSeverity.Hint]: number; +} => { + let errorCount = 0; + let warningCount = 0; + let infoCount = 0; + let hintCount = 0; + + for (const results of Object.values(groupedResults)) { + const { + [DiagnosticSeverity.Error]: errors, + [DiagnosticSeverity.Warning]: warnings, + [DiagnosticSeverity.Information]: infos, + [DiagnosticSeverity.Hint]: hints, + } = groupBySeverity(results); + + errorCount += errors.length; + warningCount += warnings.length; + infoCount += infos.length; + hintCount += hints.length; + } + + return { + [DiagnosticSeverity.Error]: errorCount, + [DiagnosticSeverity.Warning]: warningCount, + [DiagnosticSeverity.Information]: infoCount, + [DiagnosticSeverity.Hint]: hintCount, + }; +}; diff --git a/packages/cli/src/formatters/utils/getScoring.ts b/packages/cli/src/formatters/utils/getScoring.ts new file mode 100644 index 000000000..34e5bfb21 --- /dev/null +++ b/packages/cli/src/formatters/utils/getScoring.ts @@ -0,0 +1,68 @@ +import { SEVERITY_MAP } from '@stoplight/spectral-core'; +import { DiagnosticSeverity } from '@stoplight/types'; +import { ScoringConfig, ScoringTable, ScoringSubtract } from '../types'; +import * as path from '@stoplight/path'; +import fs from 'fs'; + +export const getScoringConfig = (scoringFile?: string): ScoringConfig | undefined => { + if (scoringFile === void 0) { + return undefined; + } else if (!path.isAbsolute(scoringFile)) { + scoringFile = path.join(process.cwd(), scoringFile); + } + + const scoringConfig: ScoringConfig = JSON.parse(fs.readFileSync(scoringFile, 'utf-8')) as ScoringConfig; + + return scoringConfig; +}; + +export const getScoringLevel = ( + issuesCount: { + [DiagnosticSeverity.Error]: number; + [DiagnosticSeverity.Warning]: number; + [DiagnosticSeverity.Information]: number; + [DiagnosticSeverity.Hint]: number; + }, + scoringSubtract: ScoringTable[], + warningsSubtract: boolean, +): number => { + let scoring = 100; + Object.keys(issuesCount).forEach(key => { + const scoringKey = Object.keys(SEVERITY_MAP).filter(mappedKey => SEVERITY_MAP[mappedKey] == key)[0]; + if (scoringSubtract[scoringKey] !== void 0) { + if (scoring < 100 && !warningsSubtract) return; + let subtractValue = 0; + Object.keys(scoringSubtract[scoringKey] as ScoringSubtract[]).forEach((subtractKey: string): void => { + subtractValue = ( + issuesCount[key] >= subtractKey + ? (scoringSubtract[scoringKey] as ScoringSubtract[])[subtractKey] + : subtractValue + ) as number; + }); + scoring -= subtractValue; + } + }); + return scoring; +}; + +export const getScoringText = ( + issuesCount: { + [DiagnosticSeverity.Error]: number; + [DiagnosticSeverity.Warning]: number; + [DiagnosticSeverity.Information]: number; + [DiagnosticSeverity.Hint]: number; + }, + scoringConfig: ScoringConfig, +): string => { + const { scoringSubtract, scoringLetter, warningsSubtract } = scoringConfig; + const scoring = getScoringLevel(issuesCount, scoringSubtract, warningsSubtract); + let scoringLevel: string = Object.keys(scoringLetter)[Object.keys(scoringLetter).length - 1]; + Object.keys(scoringLetter) + .reverse() + .forEach(key => { + if (scoring > (scoringLetter[key] as number)) { + scoringLevel = key; + } + }); + return `SCORING: ${scoringLevel} (${scoring}%)`; +}; diff --git a/packages/cli/src/formatters/utils/index.ts b/packages/cli/src/formatters/utils/index.ts index 7733c6615..1f076d762 100644 --- a/packages/cli/src/formatters/utils/index.ts +++ b/packages/cli/src/formatters/utils/index.ts @@ -1,8 +1,11 @@ export * from './getColorForSeverity'; +export * from './getCountsBySeverity'; export * from './getHighestSeverity'; +export * from './getScoring'; export * from './getSeverityName'; export * from './getSummary'; export * from './groupBySeverity'; export * from './groupBySource'; export * from './pluralize'; +export * from './uniqueErrors'; export * from './xmlEscape'; diff --git a/packages/cli/src/formatters/utils/uniqueErrors.ts b/packages/cli/src/formatters/utils/uniqueErrors.ts new file mode 100644 index 000000000..3e3d10cf2 --- /dev/null +++ b/packages/cli/src/formatters/utils/uniqueErrors.ts @@ -0,0 +1,16 @@ +import { IRuleResult } from '@stoplight/spectral-core'; + +export const uniqueErrors = (results: IRuleResult[]): IRuleResult[] => { + const filteredResults: IRuleResult[] = []; + results.forEach((result: IRuleResult) => { + if ( + !filteredResults.some( + (element: IRuleResult) => element.code === result.code && element.message === result.message, + ) + ) { + filteredResults.push(result); + } + }); + + return filteredResults; +}; diff --git a/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json b/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json new file mode 100644 index 000000000..9ce0e27a4 --- /dev/null +++ b/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json @@ -0,0 +1,32 @@ +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} \ No newline at end of file diff --git a/packages/cli/src/services/__tests__/linter.test.ts b/packages/cli/src/services/__tests__/linter.test.ts index e17a1b77e..e2c7121ba 100644 --- a/packages/cli/src/services/__tests__/linter.test.ts +++ b/packages/cli/src/services/__tests__/linter.test.ts @@ -18,6 +18,7 @@ jest.mock('../output'); const validCustomOas3SpecPath = resolve(__dirname, '__fixtures__/openapi-3.0-valid-custom.yaml'); const invalidRulesetPath = resolve(__dirname, '__fixtures__/ruleset-invalid.js'); const validRulesetPath = resolve(__dirname, '__fixtures__/ruleset-valid.js'); +const validScoringConfigRulesetPath = resolve(__dirname, '__fixtures__/scorint-config.json'); const validOas3SpecPath = resolve(__dirname, './__fixtures__/openapi-3.0-valid.yaml'); async function run(command: string) { @@ -368,6 +369,22 @@ describe('Linter service', () => { }); }); + describe('--scoring-config ', () => { + describe('when single scoring-config option provided', () => { + it('outputs normal output if it does not exist', () => { + return expect( + run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} -s non-existent-path`), + ).resolves.toEqual([]); + }); + + it('outputs no issues', () => { + return expect( + run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} -s ${validScoringConfigRulesetPath}`), + ).resolves.toEqual([]); + }); + }); + }); + describe('when loading specification files from web', () => { it('outputs no issues', () => { const document = join(__dirname, `./__fixtures__/stoplight-info-document.json`); diff --git a/packages/cli/src/services/__tests__/output.test.ts b/packages/cli/src/services/__tests__/output.test.ts index c9f08e305..50ebba381 100644 --- a/packages/cli/src/services/__tests__/output.test.ts +++ b/packages/cli/src/services/__tests__/output.test.ts @@ -2,6 +2,7 @@ import { DiagnosticSeverity } from '@stoplight/types'; import * as fs from 'fs'; import * as process from 'process'; import * as formatters from '../../formatters'; +import { ScoringLevel, ScoringTable } from '../../formatters/types'; import { OutputFormat } from '../config'; import { formatOutput, writeOutput } from '../output'; @@ -14,6 +15,23 @@ jest.mock('fs', () => ({ })); jest.mock('process'); +const scoringConfig = { + scoringSubtract: { + error: [0, 55, 65, 75, 75, 75, 85, 85, 85, 85, 95], + warn: [0, 3, 7, 10, 10, 10, 15, 15, 15, 15, 18], + } as unknown as ScoringTable[], + scoringLetter: { + A: 75, + B: 65, + C: 55, + D: 45, + E: 0, + } as unknown as ScoringLevel[], + threshold: 50, + warningsSubtract: true, + uniqueErrors: false, +}; + describe('Output service', () => { describe('formatOutput', () => { it.each(['stylish', 'json', 'junit'])('calls %s formatter with given result', format => { @@ -41,6 +59,34 @@ describe('Output service', () => { (formatters[format] as jest.Mock).mockReturnValueOnce(output); expect(formatOutput(results, format as OutputFormat, { failSeverity: DiagnosticSeverity.Error })).toEqual(output); }); + + it.each(['stylish', 'json', 'pretty'])('calls %s formatter with given result and scoring-config', format => { + const results = [ + { + code: 'info-contact', + path: ['info'], + message: 'Info object should contain `contact` object.', + severity: DiagnosticSeverity.Information, + range: { + start: { + line: 2, + character: 9, + }, + end: { + line: 6, + character: 19, + }, + }, + source: '/home/Stoplight/spectral/src/__tests__/__fixtures__/petstore.oas3.json', + }, + ]; + + const output = `value for ${format}`; + (formatters[format] as jest.Mock).mockReturnValueOnce(output); + expect( + formatOutput(results, format as OutputFormat, { failSeverity: DiagnosticSeverity.Error, scoringConfig }), + ).toEqual(output); + }); }); describe('writeOutput', () => { diff --git a/packages/cli/src/services/config.ts b/packages/cli/src/services/config.ts index 50024e510..cba83ef8b 100644 --- a/packages/cli/src/services/config.ts +++ b/packages/cli/src/services/config.ts @@ -19,6 +19,7 @@ export interface ILintConfig { output?: Dictionary; resolver?: string; ruleset?: string; + scoringConfig?: string; stdinFilepath?: string; ignoreUnknownFormat: boolean; failOnUnmatchedGlobs: boolean; diff --git a/packages/core/src/ruleset/index.ts b/packages/core/src/ruleset/index.ts index 50addc0e8..84f5769f9 100644 --- a/packages/core/src/ruleset/index.ts +++ b/packages/core/src/ruleset/index.ts @@ -1,5 +1,5 @@ export { assertValidRuleset, RulesetValidationError } from './validation/index'; -export { getDiagnosticSeverity } from './utils/severity'; +export { getDiagnosticSeverity, SEVERITY_MAP } from './utils/severity'; export { createRulesetFunction, SchemaDefinition as RulesetFunctionSchemaDefinition } from './function'; export { Format } from './format'; export { RulesetDefinition, RuleDefinition, ParserOptions, HumanReadableDiagnosticSeverity } from './types'; diff --git a/packages/core/src/ruleset/utils/severity.ts b/packages/core/src/ruleset/utils/severity.ts index aadc8c77f..b950e71cd 100644 --- a/packages/core/src/ruleset/utils/severity.ts +++ b/packages/core/src/ruleset/utils/severity.ts @@ -3,7 +3,7 @@ import { HumanReadableDiagnosticSeverity } from '../types'; export const DEFAULT_SEVERITY_LEVEL = DiagnosticSeverity.Warning; -const SEVERITY_MAP: Record = { +export const SEVERITY_MAP: Record = { error: DiagnosticSeverity.Error, warn: DiagnosticSeverity.Warning, info: DiagnosticSeverity.Information, diff --git a/test-harness/scenarios/formats/results-default-format-scoring-json.scenario b/test-harness/scenarios/formats/results-default-format-scoring-json.scenario new file mode 100644 index 000000000..4d27884ea --- /dev/null +++ b/test-harness/scenarios/formats/results-default-format-scoring-json.scenario @@ -0,0 +1,148 @@ +====test==== +Invalid document outputs results with scoring data --format=json +====document==== +--- +info: + version: 1.0.0 + title: Stoplight +====asset:ruleset.json==== + { + "rules": { + "api-servers": { + "description": "\"servers\" must be present and non-empty array.", + "recommended": true, + "given": "$", + "then": { + "field": "servers", + "function": "schema", + "functionOptions": { + "dialect": "draft7", + "schema": { + "items": { + "type": "object", + }, + "minItems": 1, + "type": "array" + } + } + } + }, + "info-contact": { + "description": "Info object must have a \"contact\" object.", + "recommended": true, + "type": "style", + "given": "$", + "then": { + "field": "info.contact", + "function": "truthy", + } + }, + "info-description": { + "description": "Info \"description\" must be present and non-empty string.", + "recommended": true, + "type": "style", + "given": "$", + "then": { + "field": "info.description", + "function": "truthy" + } + } + } + } +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====command==== +{bin} lint {document} --format=json --ruleset "{asset:ruleset.json}" --scoring-config "{asset:scoring-config.json}" +====stdout==== +{ + "version": "", + "scoring": "A (90%)", + "passed": true, + "results": [ + { + "code": "api-servers", + "path": [], + "message": "\"servers\" must be present and non-empty array.", + "severity": 1, + "range": { + "start": { + "line": 0, + "character": 0 + }, + "end": { + "line": 3, + "character": 18 + } + }, + "source": "{document}" + }, + { + "code": "info-contact", + "path": [ + "info" + ], + "message": "Info object must have a \"contact\" object.", + "severity": 1, + "range": { + "start": { + "line": 1, + "character": 5 + }, + "end": { + "line": 3, + "character": 18 + } + }, + "source": "{document}" + }, + { + "code": "info-description", + "path": [ + "info" + ], + "message": "Info \"description\" must be present and non-empty string.", + "severity": 1, + "range": { + "start": { + "line": 1, + "character": 5 + }, + "end": { + "line": 3, + "character": 18 + } + }, + "source": "{document}" + } + ]} diff --git a/test-harness/scenarios/formats/results-default-scoring.scenario b/test-harness/scenarios/formats/results-default-scoring.scenario new file mode 100644 index 000000000..c5c19519d --- /dev/null +++ b/test-harness/scenarios/formats/results-default-scoring.scenario @@ -0,0 +1,95 @@ +====test==== +Invalid document returns results with scoring data in default (stylish) format +====document==== +--- +info: + version: 1.0.0 + title: Stoplight +====asset:ruleset.json==== +{ + "rules": { + "api-servers": { + "description": "\"servers\" must be present and non-empty array.", + "recommended": true, + "given": "$", + "then": { + "field": "servers", + "function": "schema", + "functionOptions": { + "dialect": "draft7", + "schema": { + "items": { + "type": "object", + }, + "minItems": 1, + "type": "array" + } + } + } + }, + "info-contact": { + "description": "Info object must have a \"contact\" object.", + "recommended": true, + "type": "style", + "given": "$", + "then": { + "field": "info.contact", + "function": "truthy", + } + }, + "info-description": { + "description": "Info \"description\" must be present and non-empty string.", + "recommended": true, + "type": "style", + "given": "$", + "then": { + "field": "info.description", + "function": "truthy" + } + } + } +} +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====command==== +{bin} lint {document} --ruleset "{asset:ruleset.json}" --scoring-config "{asset:scoring-config.json}" +====stdout==== +{document} + 1:1 warning api-servers "servers" must be present and non-empty array. + 2:6 warning info-contact Info object must have a "contact" object. info + 2:6 warning info-description Info "description" must be present and non-empty string. info + +✖ 3 problems (0 errors, 3 warnings, 0 infos, 0 hints) +✖ SCORING: A (90%) +✖ PASSED! diff --git a/test-harness/scenarios/formats/results-format-stylish-scoring.scenario b/test-harness/scenarios/formats/results-format-stylish-scoring.scenario new file mode 100644 index 000000000..a48946469 --- /dev/null +++ b/test-harness/scenarios/formats/results-format-stylish-scoring.scenario @@ -0,0 +1,96 @@ +====test==== +Invalid document outputs results with scoring data when --format=stylish +====document==== +--- +info: + version: 1.0.0 + title: Stoplight +paths: {} +====asset:ruleset.json==== +{ + "rules": { + "api-servers": { + "description": "\"servers\" must be present and non-empty array.", + "recommended": true, + "given": "$", + "then": { + "field": "servers", + "function": "schema", + "functionOptions": { + "dialect": "draft7", + "schema": { + "items": { + "type": "object", + }, + "minItems": 1, + "type": "array" + } + } + } + }, + "info-contact": { + "description": "Info object must have a \"contact\" object.", + "recommended": true, + "type": "style", + "given": "$", + "then": { + "field": "info.contact", + "function": "truthy", + } + }, + "info-description": { + "description": "Info \"description\" must be present and non-empty string.", + "recommended": true, + "type": "style", + "given": "$", + "then": { + "field": "info.description", + "function": "truthy" + } + } + } +} +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====command==== +{bin} lint {document} --format=stylish --ruleset "{asset:ruleset.json}" --scoring-config "{asset:scoring-config.json}" +====stdout==== +{document} + 1:1 warning api-servers "servers" must be present and non-empty array. + 2:6 warning info-contact Info object must have a "contact" object. info + 2:6 warning info-description Info "description" must be present and non-empty string. info + +✖ 3 problems (0 errors, 3 warnings, 0 infos, 0 hints) +✖ SCORING: A (90%) +✖ PASSED! diff --git a/test-harness/scenarios/formats/too-few-outputs.scenario b/test-harness/scenarios/formats/too-few-outputs.scenario index 733e54185..b62b02bef 100644 --- a/test-harness/scenarios/formats/too-few-outputs.scenario +++ b/test-harness/scenarios/formats/too-few-outputs.scenario @@ -24,6 +24,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] + -s, --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/formats/too-many-outputs.scenario b/test-harness/scenarios/formats/too-many-outputs.scenario index c127e994a..1afe939b9 100644 --- a/test-harness/scenarios/formats/too-many-outputs.scenario +++ b/test-harness/scenarios/formats/too-many-outputs.scenario @@ -24,6 +24,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] + -s, --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/formats/unmatched-outputs.scenario b/test-harness/scenarios/formats/unmatched-outputs.scenario index 69f7f1fc5..fbb017b6e 100644 --- a/test-harness/scenarios/formats/unmatched-outputs.scenario +++ b/test-harness/scenarios/formats/unmatched-outputs.scenario @@ -24,6 +24,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] + -s, --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/help-no-document.scenario b/test-harness/scenarios/help-no-document.scenario index 8e686198b..4243f84c3 100644 --- a/test-harness/scenarios/help-no-document.scenario +++ b/test-harness/scenarios/help-no-document.scenario @@ -25,6 +25,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] + -s, --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/overrides/aliases-scoring.scenario b/test-harness/scenarios/overrides/aliases-scoring.scenario new file mode 100644 index 000000000..0b5a29c7b --- /dev/null +++ b/test-harness/scenarios/overrides/aliases-scoring.scenario @@ -0,0 +1,133 @@ +====test==== +Respect overrides with aliases and scoring +====asset:spectral.js==== +const { DiagnosticSeverity } = require('@stoplight/types'); +const { pattern } = require('@stoplight/spectral-functions'); + +module.exports = { + aliases: { + Info: ['$.info'], + }, + rules: { + 'description-matches-stoplight': { + message: 'Description must contain Stoplight', + given: '#Info', + recommended: true, + severity: DiagnosticSeverity.Error, + then: { + field: 'description', + function: pattern, + functionOptions: { + match: 'Stoplight', + }, + }, + }, + 'title-matches-stoplight': { + message: 'Title must contain Stoplight', + given: '#Info', + then: { + field: 'title', + function: pattern, + functionOptions: { + match: 'Stoplight', + }, + }, + }, + 'contact-name-matches-stoplight': { + message: 'Contact name must contain Stoplight', + given: '#Info.contact', + recommended: false, + then: { + field: 'name', + function: pattern, + functionOptions: { + match: 'Stoplight', + }, + }, + }, + }, + overrides: [ + { + files: [`**/*.json`], + rules: { + 'description-matches-stoplight': 'error', + 'title-matches-stoplight': 'warn', + }, + }, + { + files: [`v2/**/*.json`], + rules: { + 'description-matches-stoplight': 'info', + 'title-matches-stoplight': 'hint', + }, + }, + ], +}; +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====asset:v2/document.json==== +{ + "info": { + "description": "", + "title": "", + "contact": { + "name": "" + } + } +} +====asset:legacy/document.json==== +{ + "info": { + "description": "", + "title": "", + "contact": { + "name": "" + } + } +} +====command==== +{bin} lint **/*.json --ruleset {asset:spectral.js} --fail-on-unmatched-globs --scoring-config "{asset:scoring-config.json}" +====stdout==== + +{asset:legacy/document.json} + 3:20 error description-matches-stoplight Description must contain Stoplight info.description + 4:14 warning title-matches-stoplight Title must contain Stoplight info.title + +{asset:v2/document.json} + 3:20 information description-matches-stoplight Description must contain Stoplight info.description + 4:14 hint title-matches-stoplight Title must contain Stoplight info.title + +✖ 4 problems (1 error, 1 warning, 1 info, 1 hint) +✖ SCORING: E (42%) +✖ NOT PASSED! diff --git a/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario b/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario new file mode 100644 index 000000000..41fbd4a9e --- /dev/null +++ b/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario @@ -0,0 +1,64 @@ +====test==== +Will only fail if there is an error, and there is not. Can still see all warnings with scoring data. +====document==== +- type: string +- type: number +====asset:ruleset.json==== +{ + "rules": { + "valid-type": { + "given": "$..type", + "then": { + "function": "enumeration", + "functionOptions": { + "values": ["object"] + } + } + } + } +} +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====command==== +{bin} lint {document} --ruleset "{asset:ruleset.json}" --fail-severity=error --scoring-config "{asset:scoring-config.json}" +====status==== +0 +====stdout==== +{document} + 1:9 warning valid-type "string" must be equal to one of the allowed values: "object" [0].type + 2:9 warning valid-type "number" must be equal to one of the allowed values: "object" [1].type + +✖ 2 problems (0 errors, 2 warnings, 0 infos, 0 hints) +✖ SCORING: A (93%) +✖ PASSED! diff --git a/test-harness/scenarios/severity/fail-on-error-scoring.scenario b/test-harness/scenarios/severity/fail-on-error-scoring.scenario new file mode 100644 index 000000000..f5db7676c --- /dev/null +++ b/test-harness/scenarios/severity/fail-on-error-scoring.scenario @@ -0,0 +1,78 @@ +====test==== +Will fail and return 1 as exit code because errors exist with scoring data +====document==== +- type: string +- type: array +====asset:ruleset.json==== +{ + "rules": { + "valid-type": { + "given": "$..type", + "severity": "error", + "then": { + "function": "enumeration", + "functionOptions": { + "values": ["object"] + } + } + }, + "no-primitive-type": { + "given": "$..type", + "severity": "warn", + "then": { + "function": "enumeration", + "functionOptions": { + "values": ["string", "number", "boolean", "null"] + } + } + } + } +} +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====command-nix==== +{bin} lint {document} --ruleset "{asset:ruleset.json}" --fail-severity=error --scoring-config "{asset:scoring-config.json}" +====command-win==== +{bin} lint {document} --ruleset "{asset:ruleset.json}" --fail-severity error --scoring-config "{asset:scoring-config.json}" +====status==== +1 +====stdout==== +{document} + 1:9 error valid-type "string" must be equal to one of the allowed values: "object" [0].type + 2:9 warning no-primitive-type "array" must be equal to one of the allowed values: "string", "number", "boolean", "null" [1].type + 2:9 error valid-type "array" must be equal to one of the allowed values: "object" [1].type + +✖ 3 problems (2 errors, 1 warning, 0 infos, 0 hints) +✖ SCORING: E (32%) +✖ NOT PASSED! diff --git a/test-harness/scenarios/strict-options.scenario b/test-harness/scenarios/strict-options.scenario index 8b1cb3708..67dceac7f 100644 --- a/test-harness/scenarios/strict-options.scenario +++ b/test-harness/scenarios/strict-options.scenario @@ -25,6 +25,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] + -s, --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario b/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario new file mode 100644 index 000000000..2d13af494 --- /dev/null +++ b/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario @@ -0,0 +1,57 @@ +====test==== +Valid OAS2 document returns no results with scoring data +====document==== +swagger: "2.0" +info: + version: 1.0.0 + title: Stoplight + description: lots of text + contact: + name: fred +host: localhost +schemes: + - http +paths: {} +tags: + - name: my-tag +====asset:ruleset==== +const { oas } = require('@stoplight/spectral-rulesets'); +module.exports = oas; +====asset:scoring-config.json==== +{ + "scoringSubtract": + { + "error": + { + "1":55, + "2":65, + "3":75, + "6":85, + "10":95 + }, + "warn": + { + "1":3, + "2":7, + "3":10, + "6":15, + "10":18 + } + }, + "scoringLetter": + { + "A": 75, + "B": 65, + "C": 55, + "D": 45, + "E": 0 + }, + "threshold": 50, + "warningsSubtract": true, + "uniqueErrors": false +} +====command==== +{bin} lint {document} --ruleset "{asset:ruleset}" --scoring-config "{asset:scoring-config.json}" +====stdout==== +No results with a severity of 'error' found! +SCORING: (100%)PASSED! diff --git a/test-harness/scenarios/valid-no-errors.oas2.scenario b/test-harness/scenarios/valid-no-errors.oas2.scenario index b671062f2..f2db9703c 100644 --- a/test-harness/scenarios/valid-no-errors.oas2.scenario +++ b/test-harness/scenarios/valid-no-errors.oas2.scenario @@ -9,7 +9,7 @@ info: contact: name: fred host: localhost -schemes: +schemes: - http paths: {} tags: From b74588c9ad21b087dd96adf21bf7f87e44613491 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 10:49:28 +0100 Subject: [PATCH 02/12] feat(core): cli new parameter alias removed --- packages/cli/src/commands/lint.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/cli/src/commands/lint.ts b/packages/cli/src/commands/lint.ts index d98f107da..d85f1db59 100644 --- a/packages/cli/src/commands/lint.ts +++ b/packages/cli/src/commands/lint.ts @@ -136,7 +136,6 @@ const lintCommand: CommandModule = { type: 'string', }, 'scoring-config': { - alias: 's', description: 'path/URL to a scoring config file', type: 'string', }, From 294ec3a0d9138639ada6660d24b835fc69808b53 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 15:50:22 +0100 Subject: [PATCH 03/12] feat(core): cli results version output removed --- docs/guides/2-cli.md | 2 +- packages/cli/src/commands/__tests__/lint.test.ts | 2 +- packages/cli/src/formatters/json.ts | 6 ------ packages/cli/src/formatters/pretty.ts | 9 +-------- packages/cli/src/formatters/stylish.ts | 9 +-------- packages/cli/src/formatters/types.ts | 1 - packages/cli/src/services/__tests__/linter.test.ts | 4 ++-- .../formats/results-default-format-scoring-json.scenario | 1 - test-harness/scenarios/formats/too-few-outputs.scenario | 2 +- test-harness/scenarios/formats/too-many-outputs.scenario | 2 +- .../scenarios/formats/unmatched-outputs.scenario | 2 +- test-harness/scenarios/help-no-document.scenario | 2 +- test-harness/scenarios/strict-options.scenario | 2 +- 13 files changed, 11 insertions(+), 33 deletions(-) diff --git a/docs/guides/2-cli.md b/docs/guides/2-cli.md index a02c0be96..aa56318c3 100644 --- a/docs/guides/2-cli.md +++ b/docs/guides/2-cli.md @@ -39,7 +39,7 @@ Other options include: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - -s, --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] diff --git a/packages/cli/src/commands/__tests__/lint.test.ts b/packages/cli/src/commands/__tests__/lint.test.ts index 890e8ea93..fc3fe1928 100644 --- a/packages/cli/src/commands/__tests__/lint.test.ts +++ b/packages/cli/src/commands/__tests__/lint.test.ts @@ -150,7 +150,7 @@ describe('lint', () => { const doc = './__fixtures__/empty-oas2-document.json'; const ruleset = 'custom-ruleset.json'; const configFile = 'scoring-config.json'; - await run(`lint -r ${ruleset} -s ${configFile} ${doc}`); + await run(`lint -r ${ruleset} --scoring-config ${configFile} ${doc}`); expect(lint).toBeCalledWith([doc], { encoding: 'utf8', format: ['stylish'], diff --git a/packages/cli/src/formatters/json.ts b/packages/cli/src/formatters/json.ts index 9eedbd64e..36c53d626 100644 --- a/packages/cli/src/formatters/json.ts +++ b/packages/cli/src/formatters/json.ts @@ -3,16 +3,11 @@ import { Formatter, FormatterOptions } from './types'; import { groupBySource, uniqueErrors, getCountsBySeverity, getScoringText } from './utils'; -const version = process.env.npm_package_version; export const json: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { - let spectralVersion = ''; let groupedResults; let scoringText = ''; if (options.scoringConfig !== void 0) { - if (options.scoringConfig.customScoring !== undefined) { - spectralVersion = `${options.scoringConfig.customScoring} ${version as string}`; - } groupedResults = groupBySource(uniqueErrors(results)); scoringText = getScoringText(getCountsBySeverity(groupedResults), options.scoringConfig); } @@ -30,7 +25,6 @@ export const json: Formatter = (results: ISpectralDiagnostic[], options: Formatt if (options.scoringConfig !== void 0) { const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0); objectOutput = { - version: spectralVersion, scoring: scoringText.replace('SCORING:', '').trim(), passed: scoring >= options.scoringConfig.threshold, results: outputJson, diff --git a/packages/cli/src/formatters/pretty.ts b/packages/cli/src/formatters/pretty.ts index 3d99a858d..de51b2b6f 100644 --- a/packages/cli/src/formatters/pretty.ts +++ b/packages/cli/src/formatters/pretty.ts @@ -41,8 +41,6 @@ import { uniqueErrors, } from './utils'; -const { version } = require('../../package.json'); - function formatRange(range?: IRange): string { if (range === void 0) return ''; @@ -52,12 +50,7 @@ function formatRange(range?: IRange): string { export const pretty: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { const cliui = require('cliui'); let output = '\n'; - if (options.scoringConfig !== void 0) { - if (options.scoringConfig.customScoring !== void 0) { - output += `${options.scoringConfig.customScoring}${version as string}\n`; - } - } - output += '\n'; + const DEFAULT_TOTAL_WIDTH = process.stdout.columns; const COLUMNS = [10, 13, 25, 20, 20]; const variableColumns = DEFAULT_TOTAL_WIDTH - COLUMNS.reduce((a, b) => a + b); diff --git a/packages/cli/src/formatters/stylish.ts b/packages/cli/src/formatters/stylish.ts index 96ac6acc4..e0945401b 100644 --- a/packages/cli/src/formatters/stylish.ts +++ b/packages/cli/src/formatters/stylish.ts @@ -45,8 +45,6 @@ import { uniqueErrors, } from './utils'; -const version = process.env.npm_package_version; - // ----------------------------------------------------------------------------- // Helpers // ----------------------------------------------------------------------------- @@ -70,12 +68,7 @@ function getMessageType(severity: DiagnosticSeverity): string { export const stylish: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { let output = '\n'; - if (options.scoringConfig !== void 0) { - if (options.scoringConfig.customScoring !== void 0) { - output += `${options.scoringConfig.customScoring}${version as string}\n`; - } - } - output += '\n'; + const uniqueResults = uniqueErrors(results); const groupedResults = groupBySource(results); const summaryColor = getColorForSeverity(getHighestSeverity(uniqueResults)); diff --git a/packages/cli/src/formatters/types.ts b/packages/cli/src/formatters/types.ts index bed4d1000..795cc61ea 100644 --- a/packages/cli/src/formatters/types.ts +++ b/packages/cli/src/formatters/types.ts @@ -12,7 +12,6 @@ export interface ScoringLevel { [key: string]: number; } export type ScoringConfig = { - customScoring?: string; scoringSubtract: ScoringTable[]; scoringLetter: ScoringLevel[]; threshold: number; diff --git a/packages/cli/src/services/__tests__/linter.test.ts b/packages/cli/src/services/__tests__/linter.test.ts index e2c7121ba..834313c1a 100644 --- a/packages/cli/src/services/__tests__/linter.test.ts +++ b/packages/cli/src/services/__tests__/linter.test.ts @@ -373,13 +373,13 @@ describe('Linter service', () => { describe('when single scoring-config option provided', () => { it('outputs normal output if it does not exist', () => { return expect( - run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} -s non-existent-path`), + run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} --scoring-config non-existent-path`), ).resolves.toEqual([]); }); it('outputs no issues', () => { return expect( - run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} -s ${validScoringConfigRulesetPath}`), + run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} --scoring-config ${validScoringConfigRulesetPath}`), ).resolves.toEqual([]); }); }); diff --git a/test-harness/scenarios/formats/results-default-format-scoring-json.scenario b/test-harness/scenarios/formats/results-default-format-scoring-json.scenario index 4d27884ea..e6eb4aa70 100644 --- a/test-harness/scenarios/formats/results-default-format-scoring-json.scenario +++ b/test-harness/scenarios/formats/results-default-format-scoring-json.scenario @@ -86,7 +86,6 @@ info: {bin} lint {document} --format=json --ruleset "{asset:ruleset.json}" --scoring-config "{asset:scoring-config.json}" ====stdout==== { - "version": "", "scoring": "A (90%)", "passed": true, "results": [ diff --git a/test-harness/scenarios/formats/too-few-outputs.scenario b/test-harness/scenarios/formats/too-few-outputs.scenario index b62b02bef..f4b630195 100644 --- a/test-harness/scenarios/formats/too-few-outputs.scenario +++ b/test-harness/scenarios/formats/too-few-outputs.scenario @@ -24,7 +24,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - -s, --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/formats/too-many-outputs.scenario b/test-harness/scenarios/formats/too-many-outputs.scenario index 1afe939b9..f31ac9898 100644 --- a/test-harness/scenarios/formats/too-many-outputs.scenario +++ b/test-harness/scenarios/formats/too-many-outputs.scenario @@ -24,7 +24,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - -s, --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/formats/unmatched-outputs.scenario b/test-harness/scenarios/formats/unmatched-outputs.scenario index fbb017b6e..8abf03ea1 100644 --- a/test-harness/scenarios/formats/unmatched-outputs.scenario +++ b/test-harness/scenarios/formats/unmatched-outputs.scenario @@ -24,7 +24,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - -s, --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/help-no-document.scenario b/test-harness/scenarios/help-no-document.scenario index 4243f84c3..d9274754f 100644 --- a/test-harness/scenarios/help-no-document.scenario +++ b/test-harness/scenarios/help-no-document.scenario @@ -25,7 +25,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - -s, --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] diff --git a/test-harness/scenarios/strict-options.scenario b/test-harness/scenarios/strict-options.scenario index 67dceac7f..cdab123cf 100644 --- a/test-harness/scenarios/strict-options.scenario +++ b/test-harness/scenarios/strict-options.scenario @@ -25,7 +25,7 @@ Options: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - -s, --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] --ignore-unknown-format do not warn about unmatched formats [boolean] [default: false] From ded50ec942deb5ca33ee2b076eb582b2efe414d5 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 15:57:07 +0100 Subject: [PATCH 04/12] feat(core): scoring config file readed with promises --- packages/cli/src/formatters/utils/getScoring.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli/src/formatters/utils/getScoring.ts b/packages/cli/src/formatters/utils/getScoring.ts index 34e5bfb21..3e68cd505 100644 --- a/packages/cli/src/formatters/utils/getScoring.ts +++ b/packages/cli/src/formatters/utils/getScoring.ts @@ -11,7 +11,7 @@ export const getScoringConfig = (scoringFile?: string): ScoringConfig | undefine scoringFile = path.join(process.cwd(), scoringFile); } - const scoringConfig: ScoringConfig = JSON.parse(fs.readFileSync(scoringFile, 'utf-8')) as ScoringConfig; + const scoringConfig: ScoringConfig = JSON.parse(fs.promises.readFile(scoringFile, 'utf8')) as ScoringConfig; return scoringConfig; }; From c0860415f9c02a640d34fb2699010bdb59d07799 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 16:02:28 +0100 Subject: [PATCH 05/12] feat(core): added unique error specified --- docs/guides/2-cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/2-cli.md b/docs/guides/2-cli.md index aa56318c3..772b61483 100644 --- a/docs/guides/2-cli.md +++ b/docs/guides/2-cli.md @@ -129,7 +129,7 @@ Where: - scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater , for this letter - threshold : A number with minimum percentage value to provide valid the file we are checking - warningsSubtract : A boolean to setup if accumulate the result types to less the scoring percentage or stop counting on most critical result types -- uniqueErrors : A boolean to setup a count with unique errors or with all of them +- uniqueErrors : A boolean to setup a count with unique errors or with all of them. An error is considered unique if its code and message have not been seen yet Example: From 7909d08c782ddb27968f5da829755629e1f16352 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 16:20:42 +0100 Subject: [PATCH 06/12] feat(core): changed NOT PASSED message to FAILED on results --- packages/cli/src/commands/lint.ts | 6 +++--- packages/cli/src/formatters/json.ts | 4 ++-- packages/cli/src/formatters/pretty.ts | 4 ++-- packages/cli/src/formatters/stylish.ts | 4 ++-- packages/cli/src/formatters/utils/getScoring.ts | 4 ++-- packages/cli/src/formatters/utils/uniqueErrors.ts | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/cli/src/commands/lint.ts b/packages/cli/src/commands/lint.ts index d85f1db59..50fcb89ac 100644 --- a/packages/cli/src/commands/lint.ts +++ b/packages/cli/src/commands/lint.ts @@ -20,7 +20,7 @@ import { getScoringLevel, groupBySource, getCountsBySeverity, - uniqueErrors, + getUniqueErrors, } from '../formatters//utils'; const formatOptions = Object.values(OutputFormat); @@ -210,7 +210,7 @@ const lintCommand: CommandModule = { results = filterResultsBySeverity(results, failSeverity); } - const scoringConfigData = getScoringConfig(scoringConfig); + const scoringConfigData = await getScoringConfig(scoringConfig); await Promise.all( format.map(f => { @@ -301,7 +301,7 @@ const scoringThresholdNotEnough = (results: IRuleResult[], scoringConfig: Scorin const groupedResults = groupBySource(results); let groupedUniqueResults = { ...groupedResults }; if (scoringConfig.uniqueErrors) { - groupedUniqueResults = { ...groupBySource(uniqueErrors(results)) }; + groupedUniqueResults = { ...groupBySource(getUniqueErrors(results)) }; } return ( scoringConfig.threshold > diff --git a/packages/cli/src/formatters/json.ts b/packages/cli/src/formatters/json.ts index 36c53d626..7b2f92507 100644 --- a/packages/cli/src/formatters/json.ts +++ b/packages/cli/src/formatters/json.ts @@ -1,14 +1,14 @@ import { ISpectralDiagnostic } from '@stoplight/spectral-core'; import { Formatter, FormatterOptions } from './types'; -import { groupBySource, uniqueErrors, getCountsBySeverity, getScoringText } from './utils'; +import { groupBySource, getUniqueErrors, getCountsBySeverity, getScoringText } from './utils'; export const json: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { let groupedResults; let scoringText = ''; if (options.scoringConfig !== void 0) { - groupedResults = groupBySource(uniqueErrors(results)); + groupedResults = groupBySource(getUniqueErrors(results)); scoringText = getScoringText(getCountsBySeverity(groupedResults), options.scoringConfig); } const outputJson = results.map(result => { diff --git a/packages/cli/src/formatters/pretty.ts b/packages/cli/src/formatters/pretty.ts index de51b2b6f..1b901ff25 100644 --- a/packages/cli/src/formatters/pretty.ts +++ b/packages/cli/src/formatters/pretty.ts @@ -38,7 +38,7 @@ import { groupBySource, getScoringText, getCountsBySeverity, - uniqueErrors, + getUniqueErrors, } from './utils'; function formatRange(range?: IRange): string { @@ -61,7 +61,7 @@ export const pretty: Formatter = (results: ISpectralDiagnostic[], options: Forma const PAD_TOP1_LEFT0 = [1, 0, 0, 0]; const ui = cliui({ width: DEFAULT_TOTAL_WIDTH, wrap: true }); - const uniqueResults = uniqueErrors(results); + const uniqueResults = getUniqueErrors(results); const groupedResults = groupBySource(results); const summaryColor = getColorForSeverity(getHighestSeverity(uniqueResults)); const summaryText = getSummary(groupedResults); diff --git a/packages/cli/src/formatters/stylish.ts b/packages/cli/src/formatters/stylish.ts index e0945401b..7bf472815 100644 --- a/packages/cli/src/formatters/stylish.ts +++ b/packages/cli/src/formatters/stylish.ts @@ -42,7 +42,7 @@ import { groupBySource, getScoringText, getCountsBySeverity, - uniqueErrors, + getUniqueErrors, } from './utils'; // ----------------------------------------------------------------------------- @@ -69,7 +69,7 @@ function getMessageType(severity: DiagnosticSeverity): string { export const stylish: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { let output = '\n'; - const uniqueResults = uniqueErrors(results); + const uniqueResults = getUniqueErrors(results); const groupedResults = groupBySource(results); const summaryColor = getColorForSeverity(getHighestSeverity(uniqueResults)); const summaryText = getSummary(groupedResults); diff --git a/packages/cli/src/formatters/utils/getScoring.ts b/packages/cli/src/formatters/utils/getScoring.ts index 3e68cd505..b6099df05 100644 --- a/packages/cli/src/formatters/utils/getScoring.ts +++ b/packages/cli/src/formatters/utils/getScoring.ts @@ -4,14 +4,14 @@ import { ScoringConfig, ScoringTable, ScoringSubtract } from '../types'; import * as path from '@stoplight/path'; import fs from 'fs'; -export const getScoringConfig = (scoringFile?: string): ScoringConfig | undefined => { +export const getScoringConfig = async (scoringFile?: string): Promise => { if (scoringFile === void 0) { return undefined; } else if (!path.isAbsolute(scoringFile)) { scoringFile = path.join(process.cwd(), scoringFile); } - const scoringConfig: ScoringConfig = JSON.parse(fs.promises.readFile(scoringFile, 'utf8')) as ScoringConfig; + const scoringConfig: ScoringConfig = JSON.parse(await fs.promises.readFile(scoringFile, 'utf8')) as ScoringConfig; return scoringConfig; }; diff --git a/packages/cli/src/formatters/utils/uniqueErrors.ts b/packages/cli/src/formatters/utils/uniqueErrors.ts index 3e3d10cf2..efa469353 100644 --- a/packages/cli/src/formatters/utils/uniqueErrors.ts +++ b/packages/cli/src/formatters/utils/uniqueErrors.ts @@ -1,6 +1,6 @@ import { IRuleResult } from '@stoplight/spectral-core'; -export const uniqueErrors = (results: IRuleResult[]): IRuleResult[] => { +export const getUniqueErrors = (results: IRuleResult[]): IRuleResult[] => { const filteredResults: IRuleResult[] = []; results.forEach((result: IRuleResult) => { if ( From d8cd9b642cf5e8abf37932c458e79d6d264f5c72 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 16:21:40 +0100 Subject: [PATCH 07/12] feat(core): changed NOT PASSED message to FAILED on results --- packages/cli/src/formatters/pretty.ts | 2 +- packages/cli/src/formatters/stylish.ts | 2 +- test-harness/scenarios/overrides/aliases-scoring.scenario | 2 +- test-harness/scenarios/severity/fail-on-error-scoring.scenario | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/cli/src/formatters/pretty.ts b/packages/cli/src/formatters/pretty.ts index 1b901ff25..38c9a62b1 100644 --- a/packages/cli/src/formatters/pretty.ts +++ b/packages/cli/src/formatters/pretty.ts @@ -113,7 +113,7 @@ export const pretty: Formatter = (results: ISpectralDiagnostic[], options: Forma if (scoring >= options.scoringConfig.threshold) { output += chalk['green'].bold(`\u2716 PASSED!\n`); } else { - output += chalk['red'].bold(`\u2716 NOT PASSED!\n`); + output += chalk['red'].bold(`\u2716 FAILED!\n`); } } diff --git a/packages/cli/src/formatters/stylish.ts b/packages/cli/src/formatters/stylish.ts index 7bf472815..5ee66bb9f 100644 --- a/packages/cli/src/formatters/stylish.ts +++ b/packages/cli/src/formatters/stylish.ts @@ -123,7 +123,7 @@ export const stylish: Formatter = (results: ISpectralDiagnostic[], options: Form if (scoring >= options.scoringConfig.threshold) { output += chalk['green'].bold(`\u2716 PASSED!\n`); } else { - output += chalk['red'].bold(`\u2716 NOT PASSED!\n`); + output += chalk['red'].bold(`\u2716 FAILED!\n`); } } diff --git a/test-harness/scenarios/overrides/aliases-scoring.scenario b/test-harness/scenarios/overrides/aliases-scoring.scenario index 0b5a29c7b..8cc493ef2 100644 --- a/test-harness/scenarios/overrides/aliases-scoring.scenario +++ b/test-harness/scenarios/overrides/aliases-scoring.scenario @@ -130,4 +130,4 @@ module.exports = { ✖ 4 problems (1 error, 1 warning, 1 info, 1 hint) ✖ SCORING: E (42%) -✖ NOT PASSED! +✖ FAILED! diff --git a/test-harness/scenarios/severity/fail-on-error-scoring.scenario b/test-harness/scenarios/severity/fail-on-error-scoring.scenario index f5db7676c..3996bbe77 100644 --- a/test-harness/scenarios/severity/fail-on-error-scoring.scenario +++ b/test-harness/scenarios/severity/fail-on-error-scoring.scenario @@ -75,4 +75,4 @@ Will fail and return 1 as exit code because errors exist with scoring data ✖ 3 problems (2 errors, 1 warning, 0 infos, 0 hints) ✖ SCORING: E (32%) -✖ NOT PASSED! +✖ FAILED! From 0ed625a4e26b3ae0195fc63851693d7743d4db4d Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 19 Dec 2022 16:38:13 +0100 Subject: [PATCH 08/12] feat(core): lintern changes --- packages/cli/src/formatters/json.ts | 1 - packages/cli/src/services/__tests__/linter.test.ts | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/cli/src/formatters/json.ts b/packages/cli/src/formatters/json.ts index 7b2f92507..a411dc59e 100644 --- a/packages/cli/src/formatters/json.ts +++ b/packages/cli/src/formatters/json.ts @@ -3,7 +3,6 @@ import { Formatter, FormatterOptions } from './types'; import { groupBySource, getUniqueErrors, getCountsBySeverity, getScoringText } from './utils'; - export const json: Formatter = (results: ISpectralDiagnostic[], options: FormatterOptions) => { let groupedResults; let scoringText = ''; diff --git a/packages/cli/src/services/__tests__/linter.test.ts b/packages/cli/src/services/__tests__/linter.test.ts index 834313c1a..69d299cee 100644 --- a/packages/cli/src/services/__tests__/linter.test.ts +++ b/packages/cli/src/services/__tests__/linter.test.ts @@ -379,7 +379,9 @@ describe('Linter service', () => { it('outputs no issues', () => { return expect( - run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} --scoring-config ${validScoringConfigRulesetPath}`), + run( + `lint ${validCustomOas3SpecPath} -r ${validRulesetPath} --scoring-config ${validScoringConfigRulesetPath}`, + ), ).resolves.toEqual([]); }); }); From 69cd3414714aabf54d347ab134bdc5fee1c11b01 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade <118373228+PagoNxt-Trade@users.noreply.github.com> Date: Mon, 19 Dec 2022 17:04:13 +0100 Subject: [PATCH 09/12] docs(cli): remove -s alias from docs --- docs/guides/2-cli.md | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/docs/guides/2-cli.md b/docs/guides/2-cli.md index 772b61483..ee16525b5 100644 --- a/docs/guides/2-cli.md +++ b/docs/guides/2-cli.md @@ -39,7 +39,7 @@ Other options include: --stdin-filepath path to a file to pretend that stdin comes from [string] --resolver path to custom json-ref-resolver instance [string] -r, --ruleset path/URL to a ruleset file [string] - --scoring-config path/URL to a scoring config file [string] + --scoring-config path/URL to a scoring config file [string] -F, --fail-severity results of this level or above will trigger a failure exit code [string] [choices: "error", "warn", "info", "hint"] [default: "error"] -D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false] @@ -72,7 +72,7 @@ The scoring is produced in two different metrics: Also it introduces a quality gate, were an API scoring below the specific threshold will fail in a pipeline. -Enabling scoring is done using a new parameter called --scoring-config or -s and the scoring configuration file, where you can define how an error or a warning affects to the scoring +Enabling scoring is done using a new parameter called --scoring-config and the scoring configuration file, where you can define how an error or a warning affects to the scoring Usage: @@ -80,12 +80,6 @@ Usage: spectral lint ./reference/**/*.oas*.{json,yml,yaml} --ruleset mycustomruleset.js --scoring-config ./scoringFile.json ``` -or - -```bash -spectral lint ./reference/**/*.oas*.{json,yml,yaml} -r mycustomruleset.js -s ./scoringFile.json -``` - Heres an example of this scoringFile config file: ``` From 5f33c1b1cadec413177bb1d6635e5d24d29e3e6c Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Thu, 5 Jan 2023 15:42:04 +0100 Subject: [PATCH 10/12] docs(cli): apply suggested changes --- docs/guides/2-cli.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/guides/2-cli.md b/docs/guides/2-cli.md index ee16525b5..b7b75e724 100644 --- a/docs/guides/2-cli.md +++ b/docs/guides/2-cli.md @@ -63,12 +63,12 @@ Here you can build a [custom ruleset](../getting-started/3-rulesets.md), or exte ## Scoring the API -Scoring an API definition is a way to understand in a high level, how compliant is the API definition with the rulesets provided. This helps teams to understand the quality of the APIs regarding the definition. +Scoring an API definition is a way to understand at a high level how compliant the API definition is with the rulesets provided. This helps teams to understand the quality of the APIs regarding the definition. The scoring is produced in two different metrics: -- A number scoring. Who cames as substracting from 100% from any error or warning -- A letter, who groups numeric scorings in letters from A (better) to any +- A number scoring: Calculated by subtracting any error or warning from 100%. +- A letter scoring, which groups numeric scoring in letters from A to Z, with A being the best score. Also it introduces a quality gate, were an API scoring below the specific threshold will fail in a pipeline. @@ -119,11 +119,11 @@ Heres an example of this scoringFile config file: Where: -- scoringSubtract : An object with a key/value pair objects for every result level we want to subtract percentage, with the percentage to subtract from number of results on every result type -- scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater , for this letter +- scoringSubtract : An object with key/value pair objects for every result level we want to subtract percentage, with the percentage to subtract from number of results on every result type +- scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater, for this letter - threshold : A number with minimum percentage value to provide valid the file we are checking -- warningsSubtract : A boolean to setup if accumulate the result types to less the scoring percentage or stop counting on most critical result types -- uniqueErrors : A boolean to setup a count with unique errors or with all of them. An error is considered unique if its code and message have not been seen yet +- warningsSubtract : A boolean to accumulate the result types to less than the scoring percentage or to stop counting on most critical result types +- uniqueErrors : A boolean to count unique errors or all errors. An error is considered unique if its code and message have not been seen yet Example: From aeb85a45a8b1a8d6dd2f892909c1d1225fc60b90 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Mon, 23 Jan 2023 15:19:09 +0100 Subject: [PATCH 11/12] docs(cli): clarified info and changes applied --- docs/guides/2-cli.md | 19 ++++++++++++++++--- packages/cli/src/commands/lint.ts | 2 +- packages/cli/src/formatters/types.ts | 2 +- .../cli/src/formatters/utils/getScoring.ts | 10 +++++----- .../__fixtures__/scoring-config.json | 2 +- .../cli/src/services/__tests__/output.test.ts | 2 +- ...sults-default-format-scoring-json.scenario | 2 +- .../formats/results-default-scoring.scenario | 2 +- .../results-format-stylish-scoring.scenario | 2 +- .../overrides/aliases-scoring.scenario | 2 +- .../fail-on-error-no-error-scoring.scenario | 2 +- .../severity/fail-on-error-scoring.scenario | 2 +- .../valid-no-errors.oas2-scoring.scenario | 2 +- 13 files changed, 32 insertions(+), 19 deletions(-) diff --git a/docs/guides/2-cli.md b/docs/guides/2-cli.md index b7b75e724..897e8ffb9 100644 --- a/docs/guides/2-cli.md +++ b/docs/guides/2-cli.md @@ -112,7 +112,7 @@ Heres an example of this scoringFile config file: "E":0 }, "threshold":50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ``` @@ -121,8 +121,21 @@ Where: - scoringSubtract : An object with key/value pair objects for every result level we want to subtract percentage, with the percentage to subtract from number of results on every result type - scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater, for this letter -- threshold : A number with minimum percentage value to provide valid the file we are checking -- warningsSubtract : A boolean to accumulate the result types to less than the scoring percentage or to stop counting on most critical result types +- threshold : A number with minimum percentage value to provide valid the file we are checking. Any scoring below this thresold will mark the API as a failure in the scoring. +- onlySubtractHigherSeverityLevel : A boolean to decide if only the higher severity level who appears in the results for the API to analize, are subtracted from scoring or every severity level are subtracted from scoring. + +See sample: + + true + + API with Errors and Warnings, only Errors substract from scoring + API with Warnings, Warnings substract from scoring + + false + + API with Errors and Warnings, Errors and Warnings substracts from scoring + API with Warnings, Warnings substract from scoring + - uniqueErrors : A boolean to count unique errors or all errors. An error is considered unique if its code and message have not been seen yet Example: diff --git a/packages/cli/src/commands/lint.ts b/packages/cli/src/commands/lint.ts index 50fcb89ac..b8ffc7eca 100644 --- a/packages/cli/src/commands/lint.ts +++ b/packages/cli/src/commands/lint.ts @@ -308,7 +308,7 @@ const scoringThresholdNotEnough = (results: IRuleResult[], scoringConfig: Scorin getScoringLevel( getCountsBySeverity(groupedUniqueResults), scoringConfig.scoringSubtract, - scoringConfig.warningsSubtract, + scoringConfig.onlySubtractHigherSeverityLevel ) ); } diff --git a/packages/cli/src/formatters/types.ts b/packages/cli/src/formatters/types.ts index 795cc61ea..5b020a4a3 100644 --- a/packages/cli/src/formatters/types.ts +++ b/packages/cli/src/formatters/types.ts @@ -15,7 +15,7 @@ export type ScoringConfig = { scoringSubtract: ScoringTable[]; scoringLetter: ScoringLevel[]; threshold: number; - warningsSubtract: boolean; + onlySubtractHigherSeverityLevel: boolean; uniqueErrors: boolean; }; diff --git a/packages/cli/src/formatters/utils/getScoring.ts b/packages/cli/src/formatters/utils/getScoring.ts index b6099df05..984622556 100644 --- a/packages/cli/src/formatters/utils/getScoring.ts +++ b/packages/cli/src/formatters/utils/getScoring.ts @@ -24,13 +24,13 @@ export const getScoringLevel = ( [DiagnosticSeverity.Hint]: number; }, scoringSubtract: ScoringTable[], - warningsSubtract: boolean, + onlySubtractHigherSeverityLevel: boolean, ): number => { let scoring = 100; Object.keys(issuesCount).forEach(key => { const scoringKey = Object.keys(SEVERITY_MAP).filter(mappedKey => SEVERITY_MAP[mappedKey] == key)[0]; if (scoringSubtract[scoringKey] !== void 0) { - if (scoring < 100 && !warningsSubtract) return; + if (scoring < 100 && !onlySubtractHigherSeverityLevel) return; let subtractValue = 0; Object.keys(scoringSubtract[scoringKey] as ScoringSubtract[]).forEach((subtractKey: string): void => { subtractValue = ( @@ -42,7 +42,7 @@ export const getScoringLevel = ( scoring -= subtractValue; } }); - return scoring; + return scoring > 0 ? scoring : 0; }; export const getScoringText = ( @@ -54,8 +54,8 @@ export const getScoringText = ( }, scoringConfig: ScoringConfig, ): string => { - const { scoringSubtract, scoringLetter, warningsSubtract } = scoringConfig; - const scoring = getScoringLevel(issuesCount, scoringSubtract, warningsSubtract); + const { scoringSubtract, scoringLetter, onlySubtractHigherSeverityLevel } = scoringConfig; + const scoring = getScoringLevel(issuesCount, scoringSubtract, onlySubtractHigherSeverityLevel); let scoringLevel: string = Object.keys(scoringLetter)[Object.keys(scoringLetter).length - 1]; Object.keys(scoringLetter) .reverse() diff --git a/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json b/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json index 9ce0e27a4..4d6abc0b3 100644 --- a/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json +++ b/packages/cli/src/services/__tests__/__fixtures__/scoring-config.json @@ -27,6 +27,6 @@ "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } \ No newline at end of file diff --git a/packages/cli/src/services/__tests__/output.test.ts b/packages/cli/src/services/__tests__/output.test.ts index 50ebba381..88d38eeaf 100644 --- a/packages/cli/src/services/__tests__/output.test.ts +++ b/packages/cli/src/services/__tests__/output.test.ts @@ -28,7 +28,7 @@ const scoringConfig = { E: 0, } as unknown as ScoringLevel[], threshold: 50, - warningsSubtract: true, + onlySubtractHigherSeverityLevel: true, uniqueErrors: false, }; diff --git a/test-harness/scenarios/formats/results-default-format-scoring-json.scenario b/test-harness/scenarios/formats/results-default-format-scoring-json.scenario index e6eb4aa70..56a9b97c2 100644 --- a/test-harness/scenarios/formats/results-default-format-scoring-json.scenario +++ b/test-harness/scenarios/formats/results-default-format-scoring-json.scenario @@ -79,7 +79,7 @@ info: "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====command==== diff --git a/test-harness/scenarios/formats/results-default-scoring.scenario b/test-harness/scenarios/formats/results-default-scoring.scenario index c5c19519d..4757b259c 100644 --- a/test-harness/scenarios/formats/results-default-scoring.scenario +++ b/test-harness/scenarios/formats/results-default-scoring.scenario @@ -79,7 +79,7 @@ info: "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====command==== diff --git a/test-harness/scenarios/formats/results-format-stylish-scoring.scenario b/test-harness/scenarios/formats/results-format-stylish-scoring.scenario index a48946469..93f99854e 100644 --- a/test-harness/scenarios/formats/results-format-stylish-scoring.scenario +++ b/test-harness/scenarios/formats/results-format-stylish-scoring.scenario @@ -80,7 +80,7 @@ paths: {} "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====command==== diff --git a/test-harness/scenarios/overrides/aliases-scoring.scenario b/test-harness/scenarios/overrides/aliases-scoring.scenario index 8cc493ef2..12b9d4061 100644 --- a/test-harness/scenarios/overrides/aliases-scoring.scenario +++ b/test-harness/scenarios/overrides/aliases-scoring.scenario @@ -93,7 +93,7 @@ module.exports = { "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====asset:v2/document.json==== diff --git a/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario b/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario index 41fbd4a9e..66dfb2b87 100644 --- a/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario +++ b/test-harness/scenarios/severity/fail-on-error-no-error-scoring.scenario @@ -47,7 +47,7 @@ Will only fail if there is an error, and there is not. Can still see all warning "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====command==== diff --git a/test-harness/scenarios/severity/fail-on-error-scoring.scenario b/test-harness/scenarios/severity/fail-on-error-scoring.scenario index 3996bbe77..b30c9b5e5 100644 --- a/test-harness/scenarios/severity/fail-on-error-scoring.scenario +++ b/test-harness/scenarios/severity/fail-on-error-scoring.scenario @@ -58,7 +58,7 @@ Will fail and return 1 as exit code because errors exist with scoring data "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====command-nix==== diff --git a/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario b/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario index 2d13af494..50c16f90b 100644 --- a/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario +++ b/test-harness/scenarios/valid-no-errors.oas2-scoring.scenario @@ -47,7 +47,7 @@ module.exports = oas; "E": 0 }, "threshold": 50, - "warningsSubtract": true, + "onlySubtractHigherSeverityLevel": true, "uniqueErrors": false } ====command==== From 2ab64a96774dff89508ba2ceff0b1438e7aa9826 Mon Sep 17 00:00:00 2001 From: PagoNxt-Trade Date: Wed, 25 Jan 2023 10:08:16 +0100 Subject: [PATCH 12/12] docs(cli): fixed lint errors --- packages/cli/src/commands/lint.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli/src/commands/lint.ts b/packages/cli/src/commands/lint.ts index b8ffc7eca..aaab79257 100644 --- a/packages/cli/src/commands/lint.ts +++ b/packages/cli/src/commands/lint.ts @@ -308,7 +308,7 @@ const scoringThresholdNotEnough = (results: IRuleResult[], scoringConfig: Scorin getScoringLevel( getCountsBySeverity(groupedUniqueResults), scoringConfig.scoringSubtract, - scoringConfig.onlySubtractHigherSeverityLevel + scoringConfig.onlySubtractHigherSeverityLevel, ) ); }