chore: fix Mocha test runner suggestion when hooks fail (#9750)

This commit is contained in:
Nikolay Vitkov 2023-02-28 12:55:20 +01:00 committed by GitHub
parent 4a365a42b4
commit 232873ae76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 73 additions and 59 deletions

View File

@ -25,7 +25,7 @@ The best place to look is an existing test to see how they use the helpers.
## Skipping tests in specific conditions ## Skipping tests in specific conditions
To skip tests edit the [TestExpecations](https://github.com/puppeteer/puppeteer/blob/main/test/TestExpectations.json) file. See [test runner documentation](https://github.com/puppeteer/puppeteer/tree/main/tools/mochaRunner) for more details. To skip tests edit the [TestExpectations](https://github.com/puppeteer/puppeteer/blob/main/test/TestExpectations.json) file. See [test runner documentation](https://github.com/puppeteer/puppeteer/tree/main/tools/mochaRunner) for more details.
## Running tests ## Running tests

View File

@ -43,7 +43,7 @@
"expectedLineCoverage": 56 "expectedLineCoverage": 56
} }
], ],
"parameterDefinitons": { "parameterDefinitions": {
"chrome": { "chrome": {
"PUPPETEER_PRODUCT": "chrome" "PUPPETEER_PRODUCT": "chrome"
}, },

View File

@ -15,6 +15,7 @@
"../packages/testserver:build" "../packages/testserver:build"
], ],
"files": [ "files": [
"../tools/mochaRunner/**",
"src/**" "src/**"
], ],
"output": [ "output": [

View File

@ -24,7 +24,7 @@ npm run build && npm run test -- --test-suite chrome-headless
## TestSuites.json ## TestSuites.json
Define test suites via the `testSuites` attribute. `parameters` can be used in the `TestExpectations.json` to disable tests Define test suites via the `testSuites` attribute. `parameters` can be used in the `TestExpectations.json` to disable tests
based on parameters. The meaning for parameters is defined in `parameterDefinitons` which tell what env object corresponds based on parameters. The meaning for parameters is defined in `parameterDefinitions` which tell what env object corresponds
to the given parameter. to the given parameter.
## TestExpectations.json ## TestExpectations.json

View File

@ -31,10 +31,10 @@ import {
import { import {
extendProcessEnv, extendProcessEnv,
filterByPlatform, filterByPlatform,
prettyPrintJSON,
readJSON, readJSON,
filterByParameters, filterByParameters,
getExpectationUpdates, getExpectationUpdates,
printSuggestions,
} from './utils.js'; } from './utils.js';
function getApplicableTestSuites( function getApplicableTestSuites(
@ -109,7 +109,7 @@ async function main() {
const env = extendProcessEnv([ const env = extendProcessEnv([
...parameters.map(param => { ...parameters.map(param => {
return parsedSuitesFile.parameterDefinitons[param]; return parsedSuitesFile.parameterDefinitions[param];
}), }),
{ {
PUPPETEER_SKIPPED_TEST_CONFIG: JSON.stringify( PUPPETEER_SKIPPED_TEST_CONFIG: JSON.stringify(
@ -211,46 +211,22 @@ async function main() {
console.error(err); console.error(err);
} finally { } finally {
if (!noSuggestions) { if (!noSuggestions) {
const toAdd = recommendations.filter(item => { printSuggestions(
return item.action === 'add'; recommendations,
}); 'add',
if (toAdd.length) {
console.log(
'Add the following to TestExpectations.json to ignore the error:' 'Add the following to TestExpectations.json to ignore the error:'
); );
prettyPrintJSON( printSuggestions(
toAdd.map(item => { recommendations,
return item.expectation; 'remove',
})
);
}
const toRemove = recommendations.filter(item => {
return item.action === 'remove';
});
if (toRemove.length) {
console.log(
'Remove the following from the TestExpectations.json to ignore the error:' 'Remove the following from the TestExpectations.json to ignore the error:'
); );
prettyPrintJSON( printSuggestions(
toRemove.map(item => { recommendations,
return item.expectation; 'update',
}) 'Update the following expectations in the TestExpectations.json to ignore the error:'
); );
} }
const toUpdate = recommendations.filter(item => {
return item.action === 'update';
});
if (toUpdate.length) {
console.log(
'Update the following expectations in the TestExpecations.json to ignore the error:'
);
prettyPrintJSON(
toUpdate.map(item => {
return item.expectation;
})
);
}
}
process.exit(fail ? 1 : 0); process.exit(fail ? 1 : 0);
} }
} }

View File

@ -31,7 +31,7 @@ export type TestSuite = z.infer<typeof zTestSuite>;
export const zTestSuiteFile = z.object({ export const zTestSuiteFile = z.object({
testSuites: z.array(zTestSuite), testSuites: z.array(zTestSuite),
parameterDefinitons: z.record(z.any()), parameterDefinitions: z.record(z.any()),
}); });
export type TestSuiteFile = z.infer<typeof zTestSuiteFile>; export type TestSuiteFile = z.infer<typeof zTestSuiteFile>;

View File

@ -57,6 +57,24 @@ export function prettyPrintJSON(json: unknown): void {
console.log(JSON.stringify(json, null, 2)); console.log(JSON.stringify(json, null, 2));
} }
export function printSuggestions(
recommendations: RecommendedExpectation[],
action: RecommendedExpectation['action'],
message: string
): void {
const toPrint = recommendations.filter(item => {
return item.action === action;
});
if (toPrint.length) {
console.log(message);
prettyPrintJSON(
toPrint.map(item => {
return item.expectation;
})
);
}
}
export function filterByParameters( export function filterByParameters(
expectations: TestExpectation[], expectations: TestExpectation[],
parameters: string[] parameters: string[]
@ -88,9 +106,8 @@ export function findEffectiveExpectationForTest(
.pop(); .pop();
} }
type RecommendedExpecation = { type RecommendedExpectation = {
expectation: TestExpectation; expectation: TestExpectation;
test: MochaTestResult;
action: 'remove' | 'add' | 'update'; action: 'remove' | 'add' | 'update';
}; };
@ -104,28 +121,42 @@ export function isWildCardPattern(testIdPattern: string): boolean {
export function getExpectationUpdates( export function getExpectationUpdates(
results: MochaResults, results: MochaResults,
expecations: TestExpectation[], expectations: TestExpectation[],
context: { context: {
platforms: NodeJS.Platform[]; platforms: NodeJS.Platform[];
parameters: string[]; parameters: string[];
} }
): RecommendedExpecation[] { ): RecommendedExpectation[] {
const output: RecommendedExpecation[] = []; const output: Map<string, RecommendedExpectation> = new Map();
for (const pass of results.passes) { for (const pass of results.passes) {
const expectationEntry = findEffectiveExpectationForTest(expecations, pass); // If an error occurs during a hook
// the error not have a file associated with it
if (!pass.file) {
continue;
}
const expectationEntry = findEffectiveExpectationForTest(
expectations,
pass
);
if (expectationEntry && !expectationEntry.expectations.includes('PASS')) { if (expectationEntry && !expectationEntry.expectations.includes('PASS')) {
output.push({ addEntry({
expectation: expectationEntry, expectation: expectationEntry,
test: pass,
action: 'remove', action: 'remove',
}); });
} }
} }
for (const failure of results.failures) { for (const failure of results.failures) {
// If an error occurs during a hook
// the error not have a file associated with it
if (!failure.file) {
continue;
}
const expectationEntry = findEffectiveExpectationForTest( const expectationEntry = findEffectiveExpectationForTest(
expecations, expectations,
failure failure
); );
// If the effective explanation is a wildcard, we recommend adding a new // If the effective explanation is a wildcard, we recommend adding a new
@ -140,7 +171,7 @@ export function getExpectationUpdates(
getTestResultForFailure(failure) getTestResultForFailure(failure)
) )
) { ) {
output.push({ addEntry({
expectation: { expectation: {
...expectationEntry, ...expectationEntry,
expectations: [ expectations: [
@ -148,24 +179,30 @@ export function getExpectationUpdates(
getTestResultForFailure(failure), getTestResultForFailure(failure),
], ],
}, },
test: failure,
action: 'update', action: 'update',
}); });
} }
} else { } else {
output.push({ addEntry({
expectation: { expectation: {
testIdPattern: getTestId(failure.file, failure.fullTitle), testIdPattern: getTestId(failure.file, failure.fullTitle),
platforms: context.platforms, platforms: context.platforms,
parameters: context.parameters, parameters: context.parameters,
expectations: [getTestResultForFailure(failure)], expectations: [getTestResultForFailure(failure)],
}, },
test: failure,
action: 'add', action: 'add',
}); });
} }
} }
return output;
function addEntry(value: RecommendedExpectation) {
const key = JSON.stringify(value);
if (!output.has(key)) {
output.set(key, value);
}
}
return [...output.values()];
} }
export function getTestResultForFailure( export function getTestResultForFailure(