Skip to content

Commit

Permalink
Report TableBench output as another performance benchmark to Kusto (#…
Browse files Browse the repository at this point in the history
…22225)

#### Description


[AB#8399](https://dev.azure.com/fluidframework/internal/_workitems/edit/7831/)

- Update the TableBench tests to run benchmarks with custom outputs
instead of execution time benchmarks (replacing `it()` by
`benchmarkCustom()`)
- Adds a new mocha configuration in `examples/benchmarks/tablebench`
package and update `.gitignore`.
- Removes `--perfmode` in `packages/dds/tree`.

Running `npm run test:customBenchmarks` displays the output of custom
benchmark test.

<img width="1096" alt="Screenshot 2024-08-15 at 17 28 18"
src="https://github.com/user-attachments/assets/297ae71e-2157-48e5-aeaf-a521677ba885">

<img width="785" alt="Screenshot 2024-08-15 at 17 28 33"
src="https://github.com/user-attachments/assets/901e649e-8249-4e99-b836-5082c0c68ffb">


#### Follow-up Items
- Make sure to run custom benchmarks test on pipeline
(#22204)
- Fix a bug with `customBenchmark()` function which was determined while
working on this task.
- https://dev.azure.com/fluidframework/internal/_workitems/edit/12925
  • Loading branch information
jikim-msft committed Aug 16, 2024
1 parent 83c8b37 commit 1850c61
Show file tree
Hide file tree
Showing 5 changed files with 104 additions and 57 deletions.
5 changes: 4 additions & 1 deletion examples/benchmarks/tablebench/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,7 @@ intel_modules/
temp_modules/

# Fuzz test operation files
**/fuzz/failures/**
**/fuzz/failures/**

# Output folder for custom benchmark tests
.customBenchmarksOutput
33 changes: 33 additions & 0 deletions examples/benchmarks/tablebench/.mocharc.customBenchmarks.cjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*!
* Copyright (c) Microsoft Corporation and contributors. All rights reserved.
* Licensed under the MIT License.
*/

/**
* Mocha configuration file to run memory-profiling tests
*/
"use strict";

const getFluidTestMochaConfig = require("@fluid-internal/mocha-test-setup/mocharc-common");

const packageDir = __dirname;
const baseConfig = getFluidTestMochaConfig(packageDir);

const nodeOptions =
baseConfig["node-option"] !== undefined
? Array.isArray(baseConfig["node-option"])
? baseConfig["node-option"]
: [baseConfig["node-option"]] // If string, wrap as array to use spread operator
: []; // If undefined, use an empty array

nodeOptions.push("expose-gc", "gc-global", "unhandled-rejections=strict");

module.exports = {
...baseConfig,
"fgrep": ["@CustomBenchmark"],
"node-option": nodeOptions, // without leading "--"
"recursive": true,
"reporter": "@fluid-tools/benchmark/dist/MochaReporter.js",
"reporterOptions": ["reportDir=.customBenchmarksOutput/"],
"spec": ["lib/test/**/*.*js"],
};
1 change: 1 addition & 0 deletions examples/benchmarks/tablebench/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
"start:webpack": "webpack serve --config webpack.config.cjs --env mode=tinylicious",
"test": "npm run test:mocha",
"test:benchmark:report": "mocha --exit --perfMode --parentProcess --fgrep @Benchmark --reporter @fluid-tools/benchmark/dist/MochaReporter.js --timeout 60000",
"test:customBenchmarks": "mocha --config ./.mocharc.customBenchmarks.cjs",
"test:mocha": "npm run test:mocha:esm",
"test:mocha:esm": "mocha --exit",
"test:mocha:verbose": "cross-env FLUID_TEST_VERBOSE=1 npm run test:mocha",
Expand Down
120 changes: 65 additions & 55 deletions examples/benchmarks/tablebench/src/test/table.bench.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,12 @@
* Licensed under the MIT License.
*/

import { BenchmarkType, benchmark, isInPerformanceTestingMode } from "@fluid-tools/benchmark";
import {
BenchmarkType,
benchmark,
benchmarkCustom,
isInPerformanceTestingMode,
} from "@fluid-tools/benchmark";
import { IChannel } from "@fluidframework/datastore-definitions/internal";
import { SharedMatrix } from "@fluidframework/matrix/internal";
import { type ITree, NodeFromSchema, TreeViewConfiguration } from "@fluidframework/tree";
Expand Down Expand Up @@ -130,70 +135,75 @@ describe("Table", () => {
const colMajorJsonBytes = measureEncodedLength(JSON.stringify(transposeTable(data)));
let summaryBytes: number;

// After each test, print the summary size information to the console.
afterEach(() => {
// When using a logger, Mocha suppresses 'console.log()' by default.
// Writing directly to 'process.stdout' bypasses this suppression.
process.stdout.write(` Summary: ${summaryBytes} bytes\n`);
process.stdout.write(
` vs row-major: ${(summaryBytes / rowMajorJsonBytes).toLocaleString(
undefined,
{
maximumFractionDigits: 2,
minimumFractionDigits: 2,
},
)}x\n`,
);
process.stdout.write(
` vs col-major: ${(summaryBytes / colMajorJsonBytes).toLocaleString(
undefined,
{
maximumFractionDigits: 2,
minimumFractionDigits: 2,
},
)}x\n`,
);
benchmarkCustom({
only: false,
type: BenchmarkType.Measurement,
title: `Row-major JSON (Typical Database Baseline)`,
run: async (reporter) => {
summaryBytes = rowMajorJsonBytes;
reporter.addMeasurement(`summaryBytes`, summaryBytes);
reporter.addMeasurement(`vs row-major:`, summaryBytes / rowMajorJsonBytes);
reporter.addMeasurement(`vs col-major:`, summaryBytes / colMajorJsonBytes);
},
});

it("Row-major JSON (Typical Database Baseline)", () => {
// Row/col major sizes are precalculated before the test run.
// Copy the value to 'summaryBytes' for reporting by 'afterEach' above.
summaryBytes = rowMajorJsonBytes;
benchmarkCustom({
only: false,
type: BenchmarkType.Measurement,
title: `Column-major JSON (Compact REST Baseline)`,
run: async (reporter) => {
summaryBytes = colMajorJsonBytes;
reporter.addMeasurement(`summaryBytes`, summaryBytes);
reporter.addMeasurement(`vs row-major:`, summaryBytes / rowMajorJsonBytes);
reporter.addMeasurement(`vs col-major:`, summaryBytes / colMajorJsonBytes);
},
});

it("Column-major JSON (Compact REST Baseline)", () => {
// Row/col major sizes are precalculated before the test run.
// Copy the value to 'summaryBytes' for reporting by 'afterEach' above.
summaryBytes = colMajorJsonBytes;
});

it("SharedMatrix", () => {
const columnNames = Object.keys(data[0]);

const { channel, processAllMessages } = create(SharedMatrix.getFactory());
matrix = channel as SharedMatrix;
matrix.insertCols(0, columnNames.length);
matrix.insertRows(0, data.length);

for (let r = 0; r < data.length; r++) {
for (const [c, key] of columnNames.entries()) {
matrix.setCell(r, c, (data as any)[r][key]);
benchmarkCustom({
only: false,
type: BenchmarkType.Measurement,
title: `SharedMatrix`,
run: async (reporter) => {
const columnNames = Object.keys(data[0]);

const { channel, processAllMessages } = create(SharedMatrix.getFactory());
matrix = channel as SharedMatrix;
matrix.insertCols(0, columnNames.length);
matrix.insertRows(0, data.length);

for (let r = 0; r < data.length; r++) {
for (const [c, key] of columnNames.entries()) {
matrix.setCell(r, c, (data as any)[r][key]);
}
}
}

processAllMessages();
summaryBytes = measureAttachmentSummary(channel);
processAllMessages();
summaryBytes = measureAttachmentSummary(channel);

reporter.addMeasurement(`summaryBytes`, summaryBytes);
reporter.addMeasurement(`vs row-major:`, summaryBytes / rowMajorJsonBytes);
reporter.addMeasurement(`vs col-major:`, summaryBytes / colMajorJsonBytes);
},
});

it("SharedTree", () => {
const { channel, processAllMessages } = create(SharedTree.getFactory());
tree = channel;
benchmarkCustom({
only: false,
type: BenchmarkType.Measurement,
title: `SharedTree`,
run: async (reporter) => {
const { channel, processAllMessages } = create(SharedTree.getFactory());
tree = channel;

const view = tree.viewWith(new TreeViewConfiguration({ schema: Table }));
view.initialize(data);
const view = tree.viewWith(new TreeViewConfiguration({ schema: Table }));
view.initialize(data);

processAllMessages();
summaryBytes = measureAttachmentSummary(channel);
processAllMessages();
summaryBytes = measureAttachmentSummary(channel);

reporter.addMeasurement(`summaryBytes`, summaryBytes);
reporter.addMeasurement(`vs row-major:`, summaryBytes / rowMajorJsonBytes);
reporter.addMeasurement(`vs col-major:`, summaryBytes / colMajorJsonBytes);
},
});
});
});
Expand Down
2 changes: 1 addition & 1 deletion packages/dds/tree/.mocharc.customBenchmarks.cjs
Original file line number Diff line number Diff line change
Expand Up @@ -29,5 +29,5 @@ module.exports = {
"recursive": true,
"reporter": "@fluid-tools/benchmark/dist/MochaReporter.js",
"reporterOptions": ["reportDir=.customBenchmarksOutput/"],
"spec": ["lib/test/**/*.*js", "--perfMode"],
"spec": ["lib/test/**/*.*js"],
};

0 comments on commit 1850c61

Please sign in to comment.