diff --git a/lighthouse-core/audits/byte-efficiency/byte-efficiency-audit.js b/lighthouse-core/audits/byte-efficiency/byte-efficiency-audit.js index 5f968d2f0664..7169865006c7 100644 --- a/lighthouse-core/audits/byte-efficiency/byte-efficiency-audit.js +++ b/lighthouse-core/audits/byte-efficiency/byte-efficiency-audit.js @@ -56,16 +56,6 @@ class UnusedBytes extends Audit { } } - /** - * @param {number} bytes - * @param {number} networkThroughput measured in bytes/second - * @return {number} - */ - static bytesToMs(bytes, networkThroughput) { - const milliseconds = bytes / networkThroughput * 1000; - return milliseconds; - } - /** * Estimates the number of bytes this network record would have consumed on the network based on the * uncompressed size (totalBytes). Uses the actual transfer size from the network record if applicable. diff --git a/lighthouse-core/audits/byte-efficiency/total-byte-weight.js b/lighthouse-core/audits/byte-efficiency/total-byte-weight.js index fa64d53c283d..af37d25f07d6 100644 --- a/lighthouse-core/audits/byte-efficiency/total-byte-weight.js +++ b/lighthouse-core/audits/byte-efficiency/total-byte-weight.js @@ -57,16 +57,13 @@ class TotalByteWeight extends ByteEfficiencyAudit { * @return {Promise} */ static async audit(artifacts, context) { - const devtoolsLogs = artifacts.devtoolsLogs[ByteEfficiencyAudit.DEFAULT_PASS]; - const [networkRecords, networkThroughput] = await Promise.all([ - artifacts.requestNetworkRecords(devtoolsLogs), - artifacts.requestNetworkThroughput(devtoolsLogs), - ]); + const devtoolsLog = artifacts.devtoolsLogs[ByteEfficiencyAudit.DEFAULT_PASS]; + const records = await artifacts.requestNetworkRecords(devtoolsLog); let totalBytes = 0; - /** @type {Array<{url: string, totalBytes: number, totalMs: number}>} */ + /** @type {Array<{url: string, totalBytes: number}>} */ let results = []; - networkRecords.forEach(record => { + records.forEach(record => { // exclude data URIs since their size is reflected in other resources // exclude unfinished requests since they won't have transfer size information if (record.parsedURL.scheme === 'data' || !record.finished) return; @@ -74,7 +71,6 @@ class TotalByteWeight extends ByteEfficiencyAudit { const result = { url: record.url, totalBytes: record.transferSize, - totalMs: ByteEfficiencyAudit.bytesToMs(record.transferSize, networkThroughput), }; totalBytes += result.totalBytes; diff --git a/lighthouse-core/gather/computed/network-analysis.js b/lighthouse-core/gather/computed/network-analysis.js index eeb2a514ddd2..5927bc5b2f53 100644 --- a/lighthouse-core/gather/computed/network-analysis.js +++ b/lighthouse-core/gather/computed/network-analysis.js @@ -15,7 +15,7 @@ class NetworkAnalysis extends ComputedArtifact { /** * @param {Array} records - * @return {LH.Artifacts.NetworkAnalysis} + * @return {Omit} */ static computeRTTAndServerResponseTime(records) { // First pass compute the estimated observed RTT to each origin's servers. @@ -45,7 +45,11 @@ class NetworkAnalysis extends ComputedArtifact { serverResponseTimeByOrigin.set(origin, summary.median); } - return {rtt: minimumRtt, additionalRttByOrigin, serverResponseTimeByOrigin, throughput: 0}; + return { + rtt: minimumRtt, + additionalRttByOrigin, + serverResponseTimeByOrigin, + }; } /** @@ -55,10 +59,9 @@ class NetworkAnalysis extends ComputedArtifact { */ async compute_(devtoolsLog, computedArtifacts) { const records = await computedArtifacts.requestNetworkRecords(devtoolsLog); - const throughput = await computedArtifacts.requestNetworkThroughput(devtoolsLog); + const throughput = NetworkAnalyzer.estimateThroughput(records); const rttAndServerResponseTime = NetworkAnalysis.computeRTTAndServerResponseTime(records); - rttAndServerResponseTime.throughput = throughput * 8; // convert from KBps to Kbps - return rttAndServerResponseTime; + return {records, throughput, ...rttAndServerResponseTime}; } } diff --git a/lighthouse-core/gather/computed/network-throughput.js b/lighthouse-core/gather/computed/network-throughput.js deleted file mode 100644 index fe3676ddf945..000000000000 --- a/lighthouse-core/gather/computed/network-throughput.js +++ /dev/null @@ -1,74 +0,0 @@ -/** - * @license Copyright 2017 Google Inc. All Rights Reserved. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - */ -'use strict'; - -const ComputedArtifact = require('./computed-artifact'); - -class NetworkThroughput extends ComputedArtifact { - get name() { - return 'NetworkThroughput'; - } - - /** - * Computes the average throughput for the given records in bytes/second. - * Excludes data URI, failed or otherwise incomplete, and cached requests. - * Returns Infinity if there were no analyzable network records. - * - * @param {Array} networkRecords - * @return {number} - */ - static getThroughput(networkRecords) { - let totalBytes = 0; - const timeBoundaries = networkRecords.reduce((boundaries, record) => { - const scheme = record.parsedURL && record.parsedURL.scheme; - if (scheme === 'data' || record.failed || !record.finished || - record.statusCode > 300 || !record.transferSize) { - return boundaries; - } - - totalBytes += record.transferSize; - boundaries.push({time: record.responseReceivedTime, isStart: true}); - boundaries.push({time: record.endTime, isStart: false}); - return boundaries; - }, /** @type {Array<{time: number, isStart: boolean}>} */([])).sort((a, b) => a.time - b.time); - - if (!timeBoundaries.length) { - return Infinity; - } - - let inflight = 0; - let currentStart = 0; - let totalDuration = 0; - timeBoundaries.forEach(boundary => { - if (boundary.isStart) { - if (inflight === 0) { - currentStart = boundary.time; - } - inflight++; - } else { - inflight--; - if (inflight === 0) { - totalDuration += boundary.time - currentStart; - } - } - }); - - return totalBytes / totalDuration; - } - - /** - * @param {LH.DevtoolsLog} devtoolsLog - * @param {LH.ComputedArtifacts} computedArtifacts - * @return {Promise} - */ - compute_(devtoolsLog, computedArtifacts) { - // TODO(phulce): migrate this to network-analysis computed artifact - return computedArtifacts.requestNetworkRecords(devtoolsLog) - .then(NetworkThroughput.getThroughput); - } -} - -module.exports = NetworkThroughput; diff --git a/lighthouse-core/lib/dependency-graph/simulator/network-analyzer.js b/lighthouse-core/lib/dependency-graph/simulator/network-analyzer.js index c2ad615fa0e8..6b45bbebf76d 100644 --- a/lighthouse-core/lib/dependency-graph/simulator/network-analyzer.js +++ b/lighthouse-core/lib/dependency-graph/simulator/network-analyzer.js @@ -385,6 +385,64 @@ class NetworkAnalyzer { return NetworkAnalyzer.summarize(estimatesByOrigin); } + + /** + * Computes the average throughput for the given records in bits/second. + * Excludes data URI, failed or otherwise incomplete, and cached requests. + * Returns Infinity if there were no analyzable network records. + * + * @param {Array} networkRecords + * @return {number} + */ + static estimateThroughput(networkRecords) { + let totalBytes = 0; + + // We will measure throughput by summing the total bytes downloaded by the total time spent + // downloading those bytes. We slice up all the network records into start/end boundaries, so + // it's easier to deal with the gaps in downloading. + const timeBoundaries = networkRecords.reduce((boundaries, record) => { + const scheme = record.parsedURL && record.parsedURL.scheme; + // Requests whose bodies didn't come over the network or didn't completely finish will mess + // with the computation, just skip over them. + if (scheme === 'data' || record.failed || !record.finished || + record.statusCode > 300 || !record.transferSize) { + return boundaries; + } + + // If we've made it this far, all the times we need should be valid (i.e. not undefined/-1). + totalBytes += record.transferSize; + boundaries.push({time: record.responseReceivedTime, isStart: true}); + boundaries.push({time: record.endTime, isStart: false}); + return boundaries; + }, /** @type {Array<{time: number, isStart: boolean}>} */([])).sort((a, b) => a.time - b.time); + + if (!timeBoundaries.length) { + return Infinity; + } + + let inflight = 0; + let currentStart = 0; + let totalDuration = 0; + + timeBoundaries.forEach(boundary => { + if (boundary.isStart) { + if (inflight === 0) { + // We just ended a quiet period, keep track of when the download period started + currentStart = boundary.time; + } + inflight++; + } else { + inflight--; + if (inflight === 0) { + // We just entered a quiet period, update our duration with the time we spent downloading + totalDuration += boundary.time - currentStart; + } + } + }); + + return totalBytes * 8 / totalDuration; + } + /** * @param {Array} records * @return {LH.Artifacts.NetworkRequest} diff --git a/lighthouse-core/test/audits/byte-efficiency/total-byte-weight-test.js b/lighthouse-core/test/audits/byte-efficiency/total-byte-weight-test.js index 435b4ce51603..cf6f14f3a3be 100644 --- a/lighthouse-core/test/audits/byte-efficiency/total-byte-weight-test.js +++ b/lighthouse-core/test/audits/byte-efficiency/total-byte-weight-test.js @@ -35,7 +35,6 @@ function generateArtifacts(records) { return { devtoolsLogs: {defaultPass: []}, requestNetworkRecords: () => Promise.resolve(records), - requestNetworkThroughput: () => Promise.resolve(1024), }; } diff --git a/lighthouse-core/test/gather/computed/network-throughput-test.js b/lighthouse-core/test/gather/computed/network-throughput-test.js deleted file mode 100644 index d479baf86168..000000000000 --- a/lighthouse-core/test/gather/computed/network-throughput-test.js +++ /dev/null @@ -1,96 +0,0 @@ -/** - * @license Copyright 2017 Google Inc. All Rights Reserved. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - */ -'use strict'; - -/* eslint-env jest */ - -const NetworkThroughput = require('../../../gather/computed/network-throughput'); -const assert = require('assert'); - -describe('NetworkThroughput', () => { - const compute = NetworkThroughput.getThroughput; - function createRecord(responseReceivedTime, endTime, extras) { - return Object.assign({ - responseReceivedTime, - endTime, - transferSize: 1000, - finished: true, - failed: false, - statusCode: 200, - url: 'https://google.com/logo.png', - parsedURL: {isValid: true, scheme: 'https'}, - }, extras); - } - - it('should return Infinity for no/missing records', () => { - assert.equal(compute([]), Infinity); - assert.equal(compute([createRecord(0, 0, {finished: false})]), Infinity); - }); - - it('should compute correctly for a basic waterfall', () => { - const result = compute([ - createRecord(0, 1), - createRecord(1, 2), - createRecord(2, 6), - ]); - - assert.equal(result, 500); - }); - - it('should compute correctly for concurrent requests', () => { - const result = compute([ - createRecord(0, 1), - createRecord(0.5, 1), - ]); - - assert.equal(result, 2000); - }); - - it('should compute correctly for gaps', () => { - const result = compute([ - createRecord(0, 1), - createRecord(3, 4), - ]); - - assert.equal(result, 1000); - }); - - it('should compute correctly for partially overlapping requests', () => { - const result = compute([ - createRecord(0, 1), - createRecord(0.5, 1.5), - createRecord(1.25, 3), - createRecord(1.4, 4), - createRecord(5, 9), - ]); - - assert.equal(result, 625); - }); - - it('should exclude failed records', () => { - const extras = {failed: true}; - const result = compute([createRecord(0, 2), createRecord(3, 4, extras)]); - assert.equal(result, 500); - }); - - it('should exclude cached records', () => { - const extras = {statusCode: 304}; - const result = compute([createRecord(0, 2), createRecord(3, 4, extras)]); - assert.equal(result, 500); - }); - - it('should exclude unfinished records', () => { - const extras = {finished: false}; - const result = compute([createRecord(0, 2), createRecord(3, 4, extras)]); - assert.equal(result, 500); - }); - - it('should exclude data URIs', () => { - const extras = {parsedURL: {scheme: 'data'}}; - const result = compute([createRecord(0, 2), createRecord(3, 4, extras)]); - assert.equal(result, 500); - }); -}); diff --git a/lighthouse-core/test/lib/dependency-graph/simulator/network-analyzer-test.js b/lighthouse-core/test/lib/dependency-graph/simulator/network-analyzer-test.js index db6444f71c3e..2f52c5d9cdd3 100644 --- a/lighthouse-core/test/lib/dependency-graph/simulator/network-analyzer-test.js +++ b/lighthouse-core/test/lib/dependency-graph/simulator/network-analyzer-test.js @@ -273,6 +273,103 @@ describe('DependencyGraph/Simulator/NetworkAnalyzer', () => { }); }); + describe('#estimateThroughput', () => { + const estimateThroughput = NetworkAnalyzer.estimateThroughput; + + function createThroughputRecord(responseReceivedTime, endTime, extras) { + return Object.assign( + { + responseReceivedTime, + endTime, + transferSize: 1000, + finished: true, + failed: false, + statusCode: 200, + url: 'https://google.com/logo.png', + parsedURL: {isValid: true, scheme: 'https'}, + }, + extras + ); + } + + it('should return Infinity for no/missing records', () => { + assert.equal(estimateThroughput([]), Infinity); + assert.equal(estimateThroughput([createThroughputRecord(0, 0, {finished: false})]), Infinity); + }); + + it('should compute correctly for a basic waterfall', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(1, 2), + createThroughputRecord(2, 6), + ]); + + assert.equal(result, 500 * 8); + }); + + it('should compute correctly for concurrent requests', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(0.5, 1), + ]); + + assert.equal(result, 2000 * 8); + }); + + it('should compute correctly for gaps', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(3, 4), + ]); + + assert.equal(result, 1000 * 8); + }); + + it('should compute correctly for partially overlapping requests', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(0.5, 1.5), + createThroughputRecord(1.25, 3), + createThroughputRecord(1.4, 4), + createThroughputRecord(5, 9), + ]); + + assert.equal(result, 625 * 8); + }); + + it('should exclude failed records', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {failed: true}), + ]); + assert.equal(result, 500 * 8); + }); + + it('should exclude cached records', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {statusCode: 304}), + ]); + assert.equal(result, 500 * 8); + }); + + it('should exclude unfinished records', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {finished: false}), + ]); + assert.equal(result, 500 * 8); + }); + + it('should exclude data URIs', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {parsedURL: {scheme: 'data'}}), + ]); + assert.equal(result, 500 * 8); + }); + }); + describe('#findMainDocument', () => { it('should find the main document', async () => { const records = await computedArtifacts.requestNetworkRecords(devtoolsLog); diff --git a/lighthouse-core/test/results/sample_v2.json b/lighthouse-core/test/results/sample_v2.json index 284058e3f4bc..cc76cb763a21 100644 --- a/lighthouse-core/test/results/sample_v2.json +++ b/lighthouse-core/test/results/sample_v2.json @@ -1829,53 +1829,43 @@ "items": [ { "url": "http://localhost:10200/zone.js", - "totalBytes": 71654, - "totalMs": 409.9376550703614 + "totalBytes": 71654 }, { "url": "http://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js", - "totalBytes": 30174, - "totalMs": 172.62761051850677 + "totalBytes": 30174 }, { "url": "http://localhost:10200/dobetterweb/lighthouse-480x318.jpg", - "totalBytes": 24741, - "totalMs": 141.54502922510693 + "totalBytes": 24741 }, { "url": "http://localhost:10200/dobetterweb/dbw_tester.html", - "totalBytes": 12640, - "totalMs": 72.31434337356418 + "totalBytes": 12640 }, { "url": "http://localhost:10200/dobetterweb/dbw_tester.html", - "totalBytes": 12640, - "totalMs": 72.31434337356418 + "totalBytes": 12640 }, { "url": "http://localhost:10200/dobetterweb/dbw_tester.js", - "totalBytes": 1703, - "totalMs": 9.74298471243511 + "totalBytes": 1703 }, { "url": "http://localhost:10200/dobetterweb/dbw_disabled.css?delay=200&isdisabled", - "totalBytes": 1108, - "totalMs": 6.338947188125721 + "totalBytes": 1108 }, { "url": "http://localhost:10200/dobetterweb/dbw_tester.css?delay=2200", - "totalBytes": 821, - "totalMs": 4.696999676400015 + "totalBytes": 821 }, { "url": "http://localhost:10200/dobetterweb/dbw_tester.css?delay=3000&async=true", - "totalBytes": 821, - "totalMs": 4.696999676400015 + "totalBytes": 821 }, { "url": "http://localhost:10200/dobetterweb/dbw_tester.css?delay=2000&async=true", - "totalBytes": 821, - "totalMs": 4.696999676400015 + "totalBytes": 821 } ] } diff --git a/typings/artifacts.d.ts b/typings/artifacts.d.ts index 0f4747b9e086..a4110bc9fcfa 100644 --- a/typings/artifacts.d.ts +++ b/typings/artifacts.d.ts @@ -126,7 +126,6 @@ declare global { requestLoadSimulator(data: {devtoolsLog: DevtoolsLog, settings: Config.Settings}): Promise; requestMainResource(data: {devtoolsLog: DevtoolsLog, URL: Artifacts['URL']}): Promise; requestNetworkAnalysis(devtoolsLog: DevtoolsLog): Promise; - requestNetworkThroughput(devtoolsLog: DevtoolsLog): Promise; requestNetworkRecords(devtoolsLog: DevtoolsLog): Promise; requestPageDependencyGraph(data: {trace: Trace, devtoolsLog: DevtoolsLog}): Promise; requestPushedRequests(devtoolsLogs: DevtoolsLog): Promise; @@ -351,6 +350,7 @@ declare global { } export interface NetworkAnalysis { + records: Array; rtt: number; additionalRttByOrigin: Map; serverResponseTimeByOrigin: Map;