2014-06-01 20:38:55 +00:00
|
|
|
window.benchmarkClient = {
|
2017-08-22 21:00:49 +00:00
|
|
|
displayUnit: 'runs/min',
|
|
|
|
iterationCount: 10,
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
stepCount: null,
|
2014-06-02 19:57:39 +00:00
|
|
|
suitesCount: null,
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
_measuredValuesList: [],
|
2014-06-01 21:12:00 +00:00
|
|
|
_finishedTestCount: 0,
|
|
|
|
_progressCompleted: null,
|
2014-06-02 19:57:39 +00:00
|
|
|
willAddTestFrame: function (frame) {
|
|
|
|
var main = document.querySelector('main');
|
|
|
|
var style = getComputedStyle(main);
|
|
|
|
frame.style.left = main.offsetLeft + parseInt(style.borderLeftWidth) + parseInt(style.paddingLeft) + 'px';
|
|
|
|
frame.style.top = main.offsetTop + parseInt(style.borderTopWidth) + parseInt(style.paddingTop) + 'px';
|
|
|
|
},
|
|
|
|
willRunTest: function (suite, test) {
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
document.getElementById('info').textContent = suite.name + ' ( ' + this._finishedTestCount + ' / ' + this.stepCount + ' )';
|
2014-06-02 19:57:39 +00:00
|
|
|
},
|
2014-06-01 20:38:55 +00:00
|
|
|
didRunTest: function () {
|
2014-06-01 21:12:00 +00:00
|
|
|
this._finishedTestCount++;
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
this._progressCompleted.style.width = (this._finishedTestCount * 100 / this.stepCount) + '%';
|
2014-06-01 20:38:55 +00:00
|
|
|
},
|
|
|
|
didRunSuites: function (measuredValues) {
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
this._measuredValuesList.push(measuredValues);
|
2014-06-01 21:12:00 +00:00
|
|
|
},
|
|
|
|
willStartFirstIteration: function () {
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
this._measuredValuesList = [];
|
2014-06-02 19:57:39 +00:00
|
|
|
this._finishedTestCount = 0;
|
|
|
|
this._progressCompleted = document.getElementById('progress-completed');
|
|
|
|
document.getElementById('logo-link').onclick = function (event) { event.preventDefault(); return false; }
|
2014-06-01 20:38:55 +00:00
|
|
|
},
|
|
|
|
didFinishLastIteration: function () {
|
2014-06-02 19:57:39 +00:00
|
|
|
document.getElementById('logo-link').onclick = null;
|
|
|
|
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
var results = this._computeResults(this._measuredValuesList, this.displayUnit);
|
2014-06-02 19:57:39 +00:00
|
|
|
|
|
|
|
this._updateGaugeNeedle(results.mean);
|
|
|
|
document.getElementById('result-number').textContent = results.formattedMean;
|
|
|
|
if (results.formattedDelta)
|
|
|
|
document.getElementById('confidence-number').textContent = '\u00b1 ' + results.formattedDelta;
|
|
|
|
|
|
|
|
this._populateDetailedResults(results.formattedValues);
|
|
|
|
document.getElementById('results-with-statistics').textContent = results.formattedMeanAndDelta;
|
|
|
|
|
2017-08-22 21:00:49 +00:00
|
|
|
if (this.displayUnit == 'ms') {
|
2014-06-02 19:57:39 +00:00
|
|
|
document.getElementById('show-summary').style.display = 'none';
|
|
|
|
showResultDetails();
|
|
|
|
} else
|
|
|
|
showResultsSummary();
|
|
|
|
},
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
_computeResults: function (measuredValuesList, displayUnit) {
|
2014-06-02 19:57:39 +00:00
|
|
|
var suitesCount = this.suitesCount;
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
function valueForUnit(measuredValues) {
|
2014-06-02 19:57:39 +00:00
|
|
|
if (displayUnit == 'ms')
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
return measuredValues.geomean;
|
|
|
|
return measuredValues.score;
|
2014-06-02 19:57:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function sigFigFromPercentDelta(percentDelta) {
|
|
|
|
return Math.ceil(-Math.log(percentDelta)/Math.log(10)) + 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
function toSigFigPrecision(number, sigFig) {
|
|
|
|
var nonDecimalDigitCount = number < 1 ? 0 : (Math.floor(Math.log(number)/Math.log(10)) + 1);
|
|
|
|
return number.toPrecision(Math.max(nonDecimalDigitCount, Math.min(6, sigFig)));
|
|
|
|
}
|
|
|
|
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
var values = measuredValuesList.map(valueForUnit);
|
2014-06-01 20:38:55 +00:00
|
|
|
var sum = values.reduce(function (a, b) { return a + b; }, 0);
|
|
|
|
var arithmeticMean = sum / values.length;
|
2014-06-02 19:57:39 +00:00
|
|
|
var meanSigFig = 4;
|
|
|
|
var formattedDelta;
|
|
|
|
var formattedPercentDelta;
|
2014-06-01 20:38:55 +00:00
|
|
|
if (window.Statistics) {
|
|
|
|
var delta = Statistics.confidenceIntervalDelta(0.95, values.length, sum, Statistics.squareSum(values));
|
2014-06-02 19:57:39 +00:00
|
|
|
if (!isNaN(delta)) {
|
|
|
|
var percentDelta = delta * 100 / arithmeticMean;
|
|
|
|
meanSigFig = sigFigFromPercentDelta(percentDelta);
|
|
|
|
formattedDelta = toSigFigPrecision(delta, 2);
|
|
|
|
formattedPercentDelta = toSigFigPrecision(percentDelta, 2) + '%';
|
|
|
|
}
|
2014-06-01 20:38:55 +00:00
|
|
|
}
|
2014-06-02 19:57:39 +00:00
|
|
|
|
|
|
|
var formattedMean = toSigFigPrecision(arithmeticMean, Math.max(meanSigFig, 3));
|
|
|
|
|
|
|
|
return {
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
formattedValues: values.map(function (value) {
|
|
|
|
return toSigFigPrecision(value, 4) + ' ' + displayUnit;
|
2014-06-02 19:57:39 +00:00
|
|
|
}),
|
|
|
|
mean: arithmeticMean,
|
|
|
|
formattedMean: formattedMean,
|
|
|
|
formattedDelta: formattedDelta,
|
|
|
|
formattedMeanAndDelta: formattedMean + (formattedDelta ? ' \xb1 ' + formattedDelta + ' (' + formattedPercentDelta + ')' : ''),
|
|
|
|
};
|
2014-06-01 21:12:00 +00:00
|
|
|
},
|
2014-06-02 19:57:39 +00:00
|
|
|
_addDetailedResultsRow: function (table, iterationNumber, value) {
|
2014-06-01 21:12:00 +00:00
|
|
|
var row = document.createElement('tr');
|
|
|
|
var th = document.createElement('th');
|
2014-06-02 19:57:39 +00:00
|
|
|
th.textContent = 'Iteration ' + (iterationNumber + 1);
|
2014-06-01 21:12:00 +00:00
|
|
|
var td = document.createElement('td');
|
|
|
|
td.textContent = value;
|
|
|
|
row.appendChild(th);
|
|
|
|
row.appendChild(td);
|
2014-06-02 19:57:39 +00:00
|
|
|
table.appendChild(row);
|
|
|
|
},
|
|
|
|
_updateGaugeNeedle: function (rpm) {
|
|
|
|
var needleAngle = Math.max(0, Math.min(rpm, 140)) - 70;
|
|
|
|
var needleRotationValue = 'rotate(' + needleAngle + 'deg)';
|
|
|
|
|
|
|
|
var gaugeNeedleElement = document.querySelector('#summarized-results > .gauge .needle');
|
|
|
|
gaugeNeedleElement.style.setProperty('-webkit-transform', needleRotationValue);
|
|
|
|
gaugeNeedleElement.style.setProperty('-moz-transform', needleRotationValue);
|
|
|
|
gaugeNeedleElement.style.setProperty('-ms-transform', needleRotationValue);
|
|
|
|
gaugeNeedleElement.style.setProperty('transform', needleRotationValue);
|
|
|
|
},
|
|
|
|
_populateDetailedResults: function (formattedValues) {
|
|
|
|
var resultsTables = document.querySelectorAll('.results-table');
|
|
|
|
var i = 0;
|
|
|
|
resultsTables[0].innerHTML = '';
|
|
|
|
for (; i < Math.ceil(formattedValues.length / 2); i++)
|
|
|
|
this._addDetailedResultsRow(resultsTables[0], i, formattedValues[i]);
|
|
|
|
resultsTables[1].innerHTML = '';
|
|
|
|
for (; i < formattedValues.length; i++)
|
|
|
|
this._addDetailedResultsRow(resultsTables[1], i, formattedValues[i]);
|
|
|
|
},
|
|
|
|
prepareUI: function () {
|
|
|
|
window.addEventListener('popstate', function (event) {
|
|
|
|
if (event.state) {
|
|
|
|
var sectionToShow = event.state.section;
|
|
|
|
if (sectionToShow) {
|
|
|
|
var sections = document.querySelectorAll('main > section');
|
|
|
|
for (var i = 0; i < sections.length; i++) {
|
|
|
|
if (sections[i].id === sectionToShow)
|
|
|
|
return showSection(sectionToShow, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return showSection('home', false);
|
|
|
|
}, false);
|
|
|
|
|
|
|
|
function updateScreenSize() {
|
|
|
|
// FIXME: Detect when the window size changes during the test.
|
|
|
|
var screenIsTooSmall = window.innerWidth < 850 || window.innerHeight < 650;
|
|
|
|
document.getElementById('screen-size').textContent = window.innerWidth + 'px by ' + window.innerHeight + 'px';
|
|
|
|
document.getElementById('screen-size-warning').style.display = screenIsTooSmall ? null : 'none';
|
|
|
|
}
|
|
|
|
|
|
|
|
window.addEventListener('resize', updateScreenSize);
|
|
|
|
updateScreenSize();
|
2014-06-01 20:38:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-23 23:04:13 +00:00
|
|
|
function enableOneSuite(suites, suiteToEnable)
|
|
|
|
{
|
|
|
|
suiteToEnable = suiteToEnable.toLowerCase();
|
|
|
|
var found = false;
|
|
|
|
for (var i = 0; i < suites.length; i++) {
|
|
|
|
var currentSuite = suites[i];
|
|
|
|
if (currentSuite.name.toLowerCase() == suiteToEnable) {
|
|
|
|
currentSuite.disabled = false;
|
|
|
|
found = true;
|
|
|
|
} else
|
|
|
|
currentSuite.disabled = true;
|
|
|
|
}
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:38:55 +00:00
|
|
|
function startBenchmark() {
|
2017-08-22 21:00:49 +00:00
|
|
|
if (location.search.length > 1) {
|
|
|
|
var parts = location.search.substring(1).split('&');
|
|
|
|
for (var i = 0; i < parts.length; i++) {
|
|
|
|
var keyValue = parts[i].split('=');
|
|
|
|
var key = keyValue[0];
|
|
|
|
var value = keyValue[1];
|
|
|
|
switch (key) {
|
|
|
|
case 'unit':
|
|
|
|
if (value == 'ms')
|
|
|
|
benchmarkClient.displayUnit = 'ms';
|
|
|
|
else
|
|
|
|
console.error('Invalid unit: ' + value);
|
|
|
|
break;
|
|
|
|
case 'iterationCount':
|
|
|
|
var parsedValue = parseInt(value);
|
2017-08-22 21:03:53 +00:00
|
|
|
if (!isNaN(parsedValue))
|
2017-08-22 21:00:49 +00:00
|
|
|
benchmarkClient.iterationCount = parsedValue;
|
|
|
|
else
|
|
|
|
console.error('Invalid iteration count: ' + value);
|
|
|
|
break;
|
2017-08-23 23:04:13 +00:00
|
|
|
case 'suite':
|
|
|
|
if (!enableOneSuite(Suites, value)) {
|
2017-08-23 23:54:40 +00:00
|
|
|
alert('Suite "' + value + '" does not exist. No tests to run.');
|
2017-08-23 23:04:13 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
2017-08-22 21:00:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-23 23:04:13 +00:00
|
|
|
var enabledSuites = Suites.filter(function (suite) { return !suite.disabled; });
|
Compute the final score using geometric mean in Speedometer 2.0
https://bugs.webkit.org/show_bug.cgi?id=172968
Reviewed by Saam Barati.
Make Speedometer 2.0 use the geometric mean of the subtotal of each test suite instead of the total..
In Speedometer 1.0, we used the total time to compute the final score because we wanted to make
the slowest framework and library faster. The fastest suite (FlightJS) still accounted for ~6% and
the slowest case (React) accounted for ~25% so we felt the total time, or the arithmetic mean with
a constant factor, was a good metric to track.
In the latest version of Speedometer 2.0, however, the fastest suite (Preact) runs in ~55ms whereas
the slowest suite (Inferno) takes 1.5s on Safari. Since the total time is 6.5s, Preact's suite only
accounts for ~0.8% of the total score while Inferno's suite accounts for ~23% of the total score.
Since the goal of Speedometer is to approximate different kinds of DOM API use patterns on the Web,
we want each framework & library to have some measurement impact on the overall benchmark score.
Furthermore, after r221205, we're testing both debug build of Ember.js as well as release build.
Since debug build is 4x slower, using the total time or the arithmetic mean thereof will effectively
give 4x as much weight to debug build of Ember.js relative to release build of Ember.js. Given only
~5% of websites that deploy Ember.js use debug build, this weighting is clearly not right.
This patch, therefore, replaces the arithmetic mean by the geometric mean to compute the final score.
It also moves the code to compute the final score to BenchmarkRunner to be shared between main.js
and InteractiveRunner.html.
* Speedometer/InteractiveRunner.html:
(.didRunSuites): Show geometric mean, arithmetic mean, total, as well as the score for completeness
since this is a debugging page for developers.
* Speedometer/resources/benchmark-runner.js:
(BenchmarkRunner.prototype.step): Added mean, geomean, and score as measuredValues' properties.
(BenchmarkRunner.prototype._runTestAndRecordResults): Removed the dead code.
(BenchmarkRunner.prototype._finalize): Compute and add total, arithmetic mean (just mean in the code),
and geometric mean (geomean) to measuredValues.
* Speedometer/resources/main.js:
(window.benchmarkClient): Replaced testsCount by stepsCount and _timeValues by _measuredValuesList.
(window.benchmarkClient.willRunTest):
(window.benchmarkClient.didRunTest):
(window.benchmarkClient.didRunSuites): Store measuredValues object instead of just the total time.
(window.benchmarkClient.didFinishLastIteration):
(window.benchmarkClient._computeResults):
(window.benchmarkClient._computeResults.valueForUnit): Renamed from totalTimeInDisplayUnit. Now simply
retrieves the values computed by BenchmarkRunner's_finalize.
(startBenchmark):
(computeScore): Deleted.
Canonical link: https://commits.webkit.org/193020@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221659 268f45cc-cd09-0410-ab3c-d52691b4dbfc
2017-09-06 02:37:41 +00:00
|
|
|
var totalSubtestsCount = enabledSuites.reduce(function (testsCount, suite) { return testsCount + suite.tests.length; }, 0);
|
|
|
|
benchmarkClient.stepCount = benchmarkClient.iterationCount * totalSubtestsCount;
|
2014-06-02 19:57:39 +00:00
|
|
|
benchmarkClient.suitesCount = enabledSuites.length;
|
2014-06-01 20:38:55 +00:00
|
|
|
var runner = new BenchmarkRunner(Suites, benchmarkClient);
|
|
|
|
runner.runMultipleIterations(benchmarkClient.iterationCount);
|
2017-08-23 23:04:13 +00:00
|
|
|
|
|
|
|
return true;
|
2014-06-01 20:38:55 +00:00
|
|
|
}
|
|
|
|
|
2014-06-02 19:57:39 +00:00
|
|
|
function showSection(sectionIdentifier, pushState) {
|
|
|
|
var currentSectionElement = document.querySelector('section.selected');
|
|
|
|
console.assert(currentSectionElement);
|
|
|
|
|
|
|
|
var newSectionElement = document.getElementById(sectionIdentifier);
|
|
|
|
console.assert(newSectionElement);
|
|
|
|
|
|
|
|
currentSectionElement.classList.remove('selected');
|
|
|
|
newSectionElement.classList.add('selected');
|
|
|
|
|
|
|
|
if (pushState)
|
|
|
|
history.pushState({section: sectionIdentifier}, document.title);
|
|
|
|
}
|
|
|
|
|
|
|
|
function showHome() {
|
|
|
|
showSection('home', true);
|
|
|
|
}
|
|
|
|
|
|
|
|
function startTest() {
|
2017-08-23 23:04:13 +00:00
|
|
|
if (startBenchmark())
|
|
|
|
showSection('running');
|
2014-06-02 19:57:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function showResultsSummary() {
|
|
|
|
showSection('summarized-results', true);
|
|
|
|
}
|
|
|
|
|
|
|
|
function showResultDetails() {
|
|
|
|
showSection('detailed-results', true);
|
|
|
|
}
|
|
|
|
|
|
|
|
function showAbout() {
|
|
|
|
showSection('about', true);
|
|
|
|
}
|
|
|
|
|
|
|
|
window.addEventListener('DOMContentLoaded', function () {
|
|
|
|
if (benchmarkClient.prepareUI)
|
|
|
|
benchmarkClient.prepareUI();
|
|
|
|
});
|