From 232873ae76911f675e1af2468e896fe6ebe41e51 Mon Sep 17 00:00:00 2001 From: Nikolay Vitkov <34244704+Lightning00Blade@users.noreply.github.com> Date: Tue, 28 Feb 2023 12:55:20 +0100 Subject: [PATCH] chore: fix Mocha test runner suggestion when hooks fail (#9750) --- test/README.md | 2 +- test/TestSuites.json | 2 +- test/package.json | 1 + tools/mochaRunner/README.md | 2 +- tools/mochaRunner/src/main.ts | 58 +++++++++--------------------- tools/mochaRunner/src/types.ts | 2 +- tools/mochaRunner/src/utils.ts | 65 ++++++++++++++++++++++++++-------- 7 files changed, 73 insertions(+), 59 deletions(-) diff --git a/test/README.md b/test/README.md index 3c48b5b3..cc4fe3b5 100644 --- a/test/README.md +++ b/test/README.md @@ -25,7 +25,7 @@ The best place to look is an existing test to see how they use the helpers. ## Skipping tests in specific conditions -To skip tests edit the [TestExpecations](https://github.com/puppeteer/puppeteer/blob/main/test/TestExpectations.json) file. See [test runner documentation](https://github.com/puppeteer/puppeteer/tree/main/tools/mochaRunner) for more details. +To skip tests edit the [TestExpectations](https://github.com/puppeteer/puppeteer/blob/main/test/TestExpectations.json) file. See [test runner documentation](https://github.com/puppeteer/puppeteer/tree/main/tools/mochaRunner) for more details. ## Running tests diff --git a/test/TestSuites.json b/test/TestSuites.json index 8920c140..be4b982b 100644 --- a/test/TestSuites.json +++ b/test/TestSuites.json @@ -43,7 +43,7 @@ "expectedLineCoverage": 56 } ], - "parameterDefinitons": { + "parameterDefinitions": { "chrome": { "PUPPETEER_PRODUCT": "chrome" }, diff --git a/test/package.json b/test/package.json index aa1e1179..b9962611 100644 --- a/test/package.json +++ b/test/package.json @@ -15,6 +15,7 @@ "../packages/testserver:build" ], "files": [ + "../tools/mochaRunner/**", "src/**" ], "output": [ diff --git a/tools/mochaRunner/README.md b/tools/mochaRunner/README.md index fd87ec19..94be666e 100644 --- a/tools/mochaRunner/README.md +++ b/tools/mochaRunner/README.md @@ -24,7 +24,7 @@ npm run build && npm run test -- --test-suite chrome-headless ## TestSuites.json Define test suites via the `testSuites` attribute. `parameters` can be used in the `TestExpectations.json` to disable tests -based on parameters. The meaning for parameters is defined in `parameterDefinitons` which tell what env object corresponds +based on parameters. The meaning for parameters is defined in `parameterDefinitions` which tell what env object corresponds to the given parameter. ## TestExpectations.json diff --git a/tools/mochaRunner/src/main.ts b/tools/mochaRunner/src/main.ts index 76b8a4db..192d9613 100644 --- a/tools/mochaRunner/src/main.ts +++ b/tools/mochaRunner/src/main.ts @@ -31,10 +31,10 @@ import { import { extendProcessEnv, filterByPlatform, - prettyPrintJSON, readJSON, filterByParameters, getExpectationUpdates, + printSuggestions, } from './utils.js'; function getApplicableTestSuites( @@ -109,7 +109,7 @@ async function main() { const env = extendProcessEnv([ ...parameters.map(param => { - return parsedSuitesFile.parameterDefinitons[param]; + return parsedSuitesFile.parameterDefinitions[param]; }), { PUPPETEER_SKIPPED_TEST_CONFIG: JSON.stringify( @@ -211,45 +211,21 @@ async function main() { console.error(err); } finally { if (!noSuggestions) { - const toAdd = recommendations.filter(item => { - return item.action === 'add'; - }); - if (toAdd.length) { - console.log( - 'Add the following to TestExpectations.json to ignore the error:' - ); - prettyPrintJSON( - toAdd.map(item => { - return item.expectation; - }) - ); - } - const toRemove = recommendations.filter(item => { - return item.action === 'remove'; - }); - if (toRemove.length) { - console.log( - 'Remove the following from the TestExpectations.json to ignore the error:' - ); - prettyPrintJSON( - toRemove.map(item => { - return item.expectation; - }) - ); - } - const toUpdate = recommendations.filter(item => { - return item.action === 'update'; - }); - if (toUpdate.length) { - console.log( - 'Update the following expectations in the TestExpecations.json to ignore the error:' - ); - prettyPrintJSON( - toUpdate.map(item => { - return item.expectation; - }) - ); - } + printSuggestions( + recommendations, + 'add', + 'Add the following to TestExpectations.json to ignore the error:' + ); + printSuggestions( + recommendations, + 'remove', + 'Remove the following from the TestExpectations.json to ignore the error:' + ); + printSuggestions( + recommendations, + 'update', + 'Update the following expectations in the TestExpectations.json to ignore the error:' + ); } process.exit(fail ? 1 : 0); } diff --git a/tools/mochaRunner/src/types.ts b/tools/mochaRunner/src/types.ts index e8fac25a..d59237eb 100644 --- a/tools/mochaRunner/src/types.ts +++ b/tools/mochaRunner/src/types.ts @@ -31,7 +31,7 @@ export type TestSuite = z.infer; export const zTestSuiteFile = z.object({ testSuites: z.array(zTestSuite), - parameterDefinitons: z.record(z.any()), + parameterDefinitions: z.record(z.any()), }); export type TestSuiteFile = z.infer; diff --git a/tools/mochaRunner/src/utils.ts b/tools/mochaRunner/src/utils.ts index 687b6d34..eba34a03 100644 --- a/tools/mochaRunner/src/utils.ts +++ b/tools/mochaRunner/src/utils.ts @@ -57,6 +57,24 @@ export function prettyPrintJSON(json: unknown): void { console.log(JSON.stringify(json, null, 2)); } +export function printSuggestions( + recommendations: RecommendedExpectation[], + action: RecommendedExpectation['action'], + message: string +): void { + const toPrint = recommendations.filter(item => { + return item.action === action; + }); + if (toPrint.length) { + console.log(message); + prettyPrintJSON( + toPrint.map(item => { + return item.expectation; + }) + ); + } +} + export function filterByParameters( expectations: TestExpectation[], parameters: string[] @@ -88,9 +106,8 @@ export function findEffectiveExpectationForTest( .pop(); } -type RecommendedExpecation = { +type RecommendedExpectation = { expectation: TestExpectation; - test: MochaTestResult; action: 'remove' | 'add' | 'update'; }; @@ -104,28 +121,42 @@ export function isWildCardPattern(testIdPattern: string): boolean { export function getExpectationUpdates( results: MochaResults, - expecations: TestExpectation[], + expectations: TestExpectation[], context: { platforms: NodeJS.Platform[]; parameters: string[]; } -): RecommendedExpecation[] { - const output: RecommendedExpecation[] = []; +): RecommendedExpectation[] { + const output: Map = new Map(); for (const pass of results.passes) { - const expectationEntry = findEffectiveExpectationForTest(expecations, pass); + // If an error occurs during a hook + // the error not have a file associated with it + if (!pass.file) { + continue; + } + + const expectationEntry = findEffectiveExpectationForTest( + expectations, + pass + ); if (expectationEntry && !expectationEntry.expectations.includes('PASS')) { - output.push({ + addEntry({ expectation: expectationEntry, - test: pass, action: 'remove', }); } } for (const failure of results.failures) { + // If an error occurs during a hook + // the error not have a file associated with it + if (!failure.file) { + continue; + } + const expectationEntry = findEffectiveExpectationForTest( - expecations, + expectations, failure ); // If the effective explanation is a wildcard, we recommend adding a new @@ -140,7 +171,7 @@ export function getExpectationUpdates( getTestResultForFailure(failure) ) ) { - output.push({ + addEntry({ expectation: { ...expectationEntry, expectations: [ @@ -148,24 +179,30 @@ export function getExpectationUpdates( getTestResultForFailure(failure), ], }, - test: failure, action: 'update', }); } } else { - output.push({ + addEntry({ expectation: { testIdPattern: getTestId(failure.file, failure.fullTitle), platforms: context.platforms, parameters: context.parameters, expectations: [getTestResultForFailure(failure)], }, - test: failure, action: 'add', }); } } - return output; + + function addEntry(value: RecommendedExpectation) { + const key = JSON.stringify(value); + if (!output.has(key)) { + output.set(key, value); + } + } + + return [...output.values()]; } export function getTestResultForFailure(