
Performance testing is the discipline of measuring how well applications perform under various conditions, ensuring they meet user expectations and business requirements. In the modern web landscape, performance directly impacts user experience, conversion rates, search engine rankings, and ultimately, business success. A slow application doesn't just frustrate users—it drives them away, with studies showing that a 100ms delay in load time can decrease conversion rates by 7%.
Performance testing encompasses various types of testing including load testing (normal expected traffic), stress testing (beyond normal capacity), spike testing (sudden traffic increases), and endurance testing (sustained loads over time). Core Web Vitals monitoring focuses on user-centric metrics that Google uses for search rankings, while Lighthouse automation provides comprehensive performance auditing.
Think of performance testing as being like a fitness trainer for your application—it measures current capabilities, identifies weaknesses, sets performance goals, and validates that optimizations actually improve real-world performance.
Performance testing operates across multiple dimensions and timeframes:
1. Frontend Performance Testing
2. Backend Performance Testing
3. End-to-End Performance Testing
Core Web Vitals represent Google's attempt to quantify user experience:
Loading (LCP - Largest Contentful Paint):
┌─────────────────────────────────────────┐
│ Good: < 2.5s │ Needs Improvement: 2.5-4s │ Poor: > 4s │
└─────────────────────────────────────────┘
Interactivity (FID - First Input Delay):
┌─────────────────────────────────────────┐
│ Good: < 100ms │ Needs Improvement: 100-300ms │ Poor: > 300ms │
└─────────────────────────────────────────┘
Visual Stability (CLS - Cumulative Layout Shift):
┌─────────────────────────────────────────┐
│ Good: < 0.1 │ Needs Improvement: 0.1-0.25 │ Poor: > 0.25 │
└─────────────────────────────────────────┘
// performance-testing-framework.js - Comprehensive Performance Testing System
class PerformanceTestingFramework {
constructor(config = {}) {
this.config = {
// Performance thresholds
thresholds: {
lcp: { good: 2500, needsImprovement: 4000 },
fid: { good: 100, needsImprovement: 300 },
cls: { good: 0.1, needsImprovement: 0.25 },
ttfb: { good: 800, needsImprovement: 1800 },
tti: { good: 5000, needsImprovement: 10000 },
tbt: { good: 200, needsImprovement: 600 }
},
// Test environments
environments: {
development: 'http://localhost:3000',
staging: 'https://staging.example.com',
production: 'https://example.com'
},
// Device configurations
devices: {
desktop: { cpu: 1, memory: 4096 },
tablet: { cpu: 4, memory: 2048 },
mobile: { cpu: 6, memory: 1024 }
},
// Network conditions
networks: {
fast3g: { latency: 562.5, downloadThroughput: 1.6 * 1024, uploadThroughput: 0.75 * 1024 },
slow3g: { latency: 2000, downloadThroughput: 0.4 * 1024, uploadThroughput: 0.4 * 1024 },
offline: { latency: 0, downloadThroughput: 0, uploadThroughput: 0 }
},
// Load testing parameters
loadTesting: {
maxVirtualUsers: 1000,
rampUpDuration: 300, // seconds
testDuration: 600, // seconds
thinkTime: { min: 1, max: 5 } // seconds between requests
},
...config
};
this.lighthouseRunner = new LighthouseRunner(this.config);
this.loadTestRunner = new LoadTestRunner(this.config);
this.realUserMonitoring = new RealUserMonitoring(this.config);
this.performanceReporting = new PerformanceReporting(this.config);
}
// Comprehensive performance test suite
async runPerformanceTestSuite(targetUrl, options = {}) {
const testSuite = {
url: targetUrl,
timestamp: new Date(),
environment: options.environment || 'staging',
results: {}
};
try {
// 1. Core Web Vitals Testing
console.log('Running Core Web Vitals tests...');
testSuite.results.coreWebVitals = await this.runCoreWebVitalsTests(targetUrl, options);
// 2. Lighthouse Performance Audits
console.log('Running Lighthouse audits...');
testSuite.results.lighthouse = await this.runLighthouseAudits(targetUrl, options);
// 3. Load Testing
if (options.includeLoadTesting) {
console.log('Running load tests...');
testSuite.results.loadTesting = await this.runLoadTests(targetUrl, options);
}
// 4. Network Condition Testing
console.log('Running network condition tests...');
testSuite.results.networkTesting = await this.runNetworkTests(targetUrl, options);
// 5. Device Performance Testing
console.log('Running device performance tests...');
testSuite.results.deviceTesting = await this.runDeviceTests(targetUrl, options);
// 6. Performance Regression Testing
if (options.baselineResults) {
console.log('Running regression analysis...');
testSuite.results.regression = await this.runRegressionAnalysis(
testSuite.results,
options.baselineResults
);
}
// Generate comprehensive report
const report = await this.performanceReporting.generateReport(testSuite);
// Validate against thresholds
const validation = this.validatePerformance(testSuite.results);
return {
...testSuite,
report,
validation,
passed: validation.overallStatus === 'passed'
};
} catch (error) {
console.error('Performance test suite failed:', error);
throw error;
}
}
async runCoreWebVitalsTests(url, options) {
const devices = options.devices || ['desktop', 'mobile'];
const results = {};
for (const device of devices) {
results[device] = await this.measureCoreWebVitals(url, device);
}
return results;
}
async measureCoreWebVitals(url, deviceType) {
const deviceConfig = this.config.devices[deviceType];
const browser = await this.lighthouseRunner.launchBrowser();
const page = await browser.newPage();
try {
// Configure device emulation
await page.emulate({
viewport: deviceType === 'mobile'
? { width: 375, height: 667 }
: { width: 1920, height: 1080 },
userAgent: deviceType === 'mobile'
? 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X)'
: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
});
// Apply CPU throttling
const client = await page.target().createCDPSession();
await client.send('Emulation.setCPUThrottlingRate', {
rate: deviceConfig.cpu
});
// Start performance monitoring
await page.evaluateOnNewDocument(() => {
window.performanceMetrics = {
navigationStart: 0,
firstPaint: 0,
firstContentfulPaint: 0,
largestContentfulPaint: 0,
firstInputDelay: 0,
cumulativeLayoutShift: 0,
timeToInteractive: 0,
totalBlockingTime: 0
};
// Capture Core Web Vitals
const observer = new PerformanceObserver((list) => {
list.getEntries().forEach((entry) => {
switch (entry.entryType) {
case 'paint':
if (entry.name === 'first-paint') {
window.performanceMetrics.firstPaint = entry.startTime;
} else if (entry.name === 'first-contentful-paint') {
window.performanceMetrics.firstContentfulPaint = entry.startTime;
}
break;
case 'largest-contentful-paint':
window.performanceMetrics.largestContentfulPaint = entry.startTime;
break;
case 'first-input':
window.performanceMetrics.firstInputDelay = entry.processingStart - entry.startTime;
break;
case 'layout-shift':
if (!entry.hadRecentInput) {
window.performanceMetrics.cumulativeLayoutShift += entry.value;
}
break;
}
});
});
observer.observe({
entryTypes: ['paint', 'largest-contentful-paint', 'first-input', 'layout-shift']
});
// Calculate Time to Interactive
const calculateTTI = () => {
const navigationEntry = performance.getEntriesByType('navigation')[0];
if (navigationEntry) {
window.performanceMetrics.timeToInteractive = navigationEntry.loadEventEnd;
}
};
// Calculate Total Blocking Time
const calculateTBT = () => {
const longTasks = performance.getEntriesByType('longtask');
let tbt = 0;
longTasks.forEach(task => {
if (task.duration > 50) {
tbt += task.duration - 50;
}
});
window.performanceMetrics.totalBlockingTime = tbt;
};
window.addEventListener('load', () => {
setTimeout(() => {
calculateTTI();
calculateTBT();
}, 5000); // Wait 5 seconds after load for accurate measurements
});
});
// Navigate to page and wait for load
await page.goto(url, { waitUntil: 'networkidle0', timeout: 60000 });
// Wait for metrics collection
await page.waitForTimeout(10000);
// Extract performance metrics
const metrics = await page.evaluate(() => window.performanceMetrics);
// Get additional navigation timing metrics
const navigationMetrics = await page.evaluate(() => {
const nav = performance.getEntriesByType('navigation')[0];
return {
timeToFirstByte: nav.responseStart - nav.requestStart,
domContentLoaded: nav.domContentLoadedEventEnd - nav.domContentLoadedEventStart,
loadComplete: nav.loadEventEnd - nav.loadEventStart
};
});
return {
...metrics,
...navigationMetrics,
deviceType,
timestamp: new Date(),
url
};
} finally {
await page.close();
await browser.close();
}
}
validatePerformance(results) {
const validation = {
coreWebVitals: {},
overallStatus: 'passed',
failures: [],
warnings: []
};
// Validate Core Web Vitals for each device
for (const [device, metrics] of Object.entries(results.coreWebVitals || {})) {
validation.coreWebVitals[device] = this.validateCoreWebVitals(metrics);
if (validation.coreWebVitals[device].status === 'failed') {
validation.overallStatus = 'failed';
validation.failures.push(`Core Web Vitals failed for ${device}`);
} else if (validation.coreWebVitals[device].status === 'warning') {
validation.warnings.push(`Core Web Vitals need improvement for ${device}`);
}
}
return validation;
}
validateCoreWebVitals(metrics) {
const validation = {
lcp: this.validateMetric('lcp', metrics.largestContentfulPaint),
fid: this.validateMetric('fid', metrics.firstInputDelay),
cls: this.validateMetric('cls', metrics.cumulativeLayoutShift),
ttfb: this.validateMetric('ttfb', metrics.timeToFirstByte),
status: 'passed'
};
const failedMetrics = Object.values(validation).filter(v => v?.status === 'failed');
const warningMetrics = Object.values(validation).filter(v => v?.status === 'warning');
if (failedMetrics.length > 0) {
validation.status = 'failed';
} else if (warningMetrics.length > 0) {
validation.status = 'warning';
}
return validation;
}
validateMetric(metricName, value) {
const thresholds = this.config.thresholds[metricName];
if (value <= thresholds.good) {
return { status: 'passed', value, threshold: 'good' };
} else if (value <= thresholds.needsImprovement) {
return { status: 'warning', value, threshold: 'needs-improvement' };
} else {
return { status: 'failed', value, threshold: 'poor' };
}
}
}
// Lighthouse Automation Runner
class LighthouseRunner {
constructor(config) {
this.config = config;
}
async launchBrowser() {
const puppeteer = require('puppeteer');
return await puppeteer.launch({
headless: true,
args: [
'--no-sandbox',
'--disable-setuid-sandbox',
'--disable-dev-shm-usage',
'--disable-background-timer-throttling',
'--disable-backgrounding-occluded-windows',
'--disable-renderer-backgrounding'
]
});
}
async runLighthouseAudit(url, options = {}) {
const lighthouse = require('lighthouse');
const browser = await this.launchBrowser();
try {
const { lhr } = await lighthouse(url, {
port: new URL(browser.wsEndpoint()).port,
output: 'json',
logLevel: 'error',
onlyCategories: ['performance'],
skipAudits: options.skipAudits || [],
...options.lighthouseOptions
});
return {
performanceScore: lhr.categories.performance.score * 100,
metrics: {
firstContentfulPaint: lhr.audits['first-contentful-paint'].numericValue,
speedIndex: lhr.audits['speed-index'].numericValue,
largestContentfulPaint: lhr.audits['largest-contentful-paint'].numericValue,
interactiveTime: lhr.audits['interactive'].numericValue,
totalBlockingTime: lhr.audits['total-blocking-time'].numericValue,
cumulativeLayoutShift: lhr.audits['cumulative-layout-shift'].numericValue
},
opportunities: lhr.audits,
timestamp: new Date(),
url
};
} finally {
await browser.close();
}
}
}
// Load Testing Implementation
class LoadTestRunner {
constructor(config) {
this.config = config;
}
async runLoadTest(url, options = {}) {
const {
virtualUsers = 10,
duration = 60,
rampUp = 10
} = options;
const results = {
startTime: new Date(),
endTime: null,
totalRequests: 0,
successfulRequests: 0,
failedRequests: 0,
averageResponseTime: 0,
minResponseTime: Infinity,
maxResponseTime: 0,
requestsPerSecond: 0,
errors: [],
responseTimeDistribution: {},
userJourneys: []
};
const userPromises = [];
// Gradually ramp up virtual users
for (let i = 0; i < virtualUsers; i++) {
const delay = (rampUp * 1000 / virtualUsers) * i;
userPromises.push(
new Promise(resolve =>
setTimeout(() => resolve(this.simulateUser(url, duration, results)), delay)
)
);
}
await Promise.all(userPromises);
results.endTime = new Date();
results.averageResponseTime = results.averageResponseTime / results.totalRequests;
results.requestsPerSecond = results.totalRequests / duration;
return results;
}
async simulateUser(url, duration, results) {
const puppeteer = require('puppeteer');
const browser = await puppeteer.launch({ headless: true });
const page = await browser.newPage();
const userJourney = {
userId: Math.random().toString(36).substr(2, 9),
startTime: new Date(),
requests: [],
errors: []
};
const endTime = Date.now() + (duration * 1000);
try {
while (Date.now() < endTime) {
const requestStart = Date.now();
try {
await page.goto(url, { waitUntil: 'domcontentloaded', timeout: 30000 });
const requestEnd = Date.now();
const responseTime = requestEnd - requestStart;
// Update results
results.totalRequests++;
results.successfulRequests++;
results.averageResponseTime += responseTime;
results.minResponseTime = Math.min(results.minResponseTime, responseTime);
results.maxResponseTime = Math.max(results.maxResponseTime, responseTime);
// Track response time distribution
const bucket = Math.floor(responseTime / 100) * 100;
results.responseTimeDistribution[bucket] = (results.responseTimeDistribution[bucket] || 0) + 1;
userJourney.requests.push({
url,
responseTime,
status: 'success',
timestamp: new Date(requestStart)
});
// Simulate user think time
const thinkTime = Math.random() *
(this.config.loadTesting.thinkTime.max - this.config.loadTesting.thinkTime.min) +
this.config.loadTesting.thinkTime.min;
await new Promise(resolve => setTimeout(resolve, thinkTime * 1000));
} catch (error) {
results.totalRequests++;
results.failedRequests++;
results.errors.push({
error: error.message,
timestamp: new Date(),
userId: userJourney.userId
});
userJourney.errors.push({
error: error.message,
timestamp: new Date()
});
}
}
} finally {
userJourney.endTime = new Date();
results.userJourneys.push(userJourney);
await page.close();
await browser.close();
}
}
}
export { PerformanceTestingFramework, LighthouseRunner, LoadTestRunner };
Let me explain how each component of our performance testing framework addresses real-world performance challenges:
const coreWebVitalsResults = await performanceFramework.runCoreWebVitalsTests(url, {
devices: ['desktop', 'mobile', 'tablet']
});
What's being measured:
await page.emulate({
viewport: { width: 375, height: 667 },
userAgent: 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X)'
});
await client.send('Emulation.setCPUThrottlingRate', { rate: 6 }); // 6x slower CPU
Realistic testing conditions:
const lighthouseResults = await lighthouseRunner.runLighthouseAudit(url, {
onlyCategories: ['performance'],
skipAudits: ['unused-css-rules'] // Skip specific audits if needed
});
Lighthouse provides:
Here's how to implement comprehensive performance testing in practice:
// PerformanceTest.js - Practical Performance Testing Implementation
import { PerformanceTestingFramework } from './performance-testing-framework';
// Initialize performance testing framework
const performanceFramework = new PerformanceTestingFramework({
thresholds: {
// Stricter thresholds for e-commerce site
lcp: { good: 2000, needsImprovement: 3000 }, // 2s for good
fid: { good: 50, needsImprovement: 150 }, // 50ms for good
cls: { good: 0.05, needsImprovement: 0.15 } // 0.05 for good
}
});
// Comprehensive performance test suite
async function runPerformanceTests() {
const testResults = {};
try {
// Test homepage performance
console.log('Testing homepage performance...');
testResults.homepage = await performanceFramework.runPerformanceTestSuite(
'https://example.com',
{
environment: 'production',
devices: ['desktop', 'mobile'],
includeLoadTesting: true
}
);
// Test product page performance
console.log('Testing product page performance...');
testResults.productPage = await performanceFramework.runPerformanceTestSuite(
'https://example.com/products/laptop-pro',
{
environment: 'production',
devices: ['mobile'], // Focus on mobile for product pages
includeLoadTesting: false // Skip load testing for content pages
}
);
// Test checkout flow performance
console.log('Testing checkout flow performance...');
testResults.checkoutFlow = await performanceFramework.runPerformanceTestSuite(
'https://example.com/checkout',
{
environment: 'production',
devices: ['desktop', 'mobile'],
includeLoadTesting: true,
loadTestingOptions: {
virtualUsers: 50, // Higher load for critical flow
duration: 300, // 5 minutes
rampUp: 60 // 1 minute ramp up
}
}
);
// Generate comparison report
const comparisonReport = generatePerformanceComparison(testResults);
// Save results and reports
await saveTestResults(testResults, comparisonReport);
// Send notifications if any tests failed
await notifyIfPerformanceIssues(testResults);
return testResults;
} catch (error) {
console.error('Performance test suite failed:', error);
await notifyPerformanceTestFailure(error);
throw error;
}
}
// Advanced performance testing with user flows
async function testUserFlowPerformance() {
const userFlowTests = [
{
name: 'User Registration Flow',
steps: [
{ action: 'visit', url: 'https://example.com/signup' },
{ action: 'fill', selector: '[name="email"]', value: 'test@example.com' },
{ action: 'fill', selector: '[name="password"]', value: 'TestPass123!' },
{ action: 'click', selector: '[type="submit"]' },
{ action: 'waitFor', selector: '[data-testid="welcome-message"]' }
]
},
{
name: 'Product Purchase Flow',
steps: [
{ action: 'visit', url: 'https://example.com/products' },
{ action: 'click', selector: '[data-testid="product-card"]:first-child' },
{ action: 'click', selector: '[data-testid="add-to-cart"]' },
{ action: 'click', selector: '[data-testid="checkout-button"]' },
{ action: 'fill', selector: '[name="shipping-address"]', value: '123 Test St' },
{ action: 'click', selector: '[data-testid="place-order"]' }
]
}
];
const results = {};
for (const userFlow of userFlowTests) {
console.log(`Testing ${userFlow.name}...`);
results[userFlow.name] = await testUserFlow(userFlow);
}
return results;
}
async function testUserFlow(userFlow) {
const puppeteer = require('puppeteer');
const browser = await puppeteer.launch({ headless: true });
const page = await browser.newPage();
const flowMetrics = {
steps: [],
totalDuration: 0,
coreWebVitals: {},
errors: []
};
try {
// Setup performance monitoring
await page.evaluateOnNewDocument(() => {
window.performanceMetrics = {
stepMetrics: [],
navigationMetrics: []
};
// Track navigation performance for each step
const observer = new PerformanceObserver((list) => {
const entries = list.getEntries();
entries.forEach(entry => {
if (entry.entryType === 'navigation') {
window.performanceMetrics.navigationMetrics.push({
url: entry.name,
loadTime: entry.loadEventEnd - entry.loadEventStart,
domContentLoaded: entry.domContentLoadedEventEnd - entry.domContentLoadedEventStart,
timeToFirstByte: entry.responseStart - entry.requestStart,
timestamp: Date.now()
});
}
});
});
observer.observe({ entryTypes: ['navigation'] });
});
const flowStartTime = Date.now();
// Execute each step in the user flow
for (let i = 0; i < userFlow.steps.length; i++) {
const step = userFlow.steps[i];
const stepStartTime = Date.now();
try {
await executeFlowStep(page, step);
const stepEndTime = Date.now();
const stepDuration = stepEndTime - stepStartTime;
flowMetrics.steps.push({
stepNumber: i + 1,
action: step.action,
duration: stepDuration,
success: true,
timestamp: stepStartTime
});
// Measure Core Web Vitals after each significant step
if (step.action === 'visit' || step.action === 'waitFor') {
const metrics = await page.evaluate(() => {
const nav = performance.getEntriesByType('navigation')[0];
const paintEntries = performance.getEntriesByType('paint');
return {
timeToFirstByte: nav ? nav.responseStart - nav.requestStart : 0,
firstContentfulPaint: paintEntries.find(p => p.name === 'first-contentful-paint')?.startTime || 0,
domContentLoaded: nav ? nav.domContentLoadedEventEnd - nav.domContentLoadedEventStart : 0
};
});
flowMetrics.coreWebVitals[`step_${i + 1}`] = metrics;
}
} catch (error) {
flowMetrics.errors.push({
step: i + 1,
action: step.action,
error: error.message,
timestamp: Date.now()
});
flowMetrics.steps.push({
stepNumber: i + 1,
action: step.action,
duration: Date.now() - stepStartTime,
success: false,
error: error.message
});
}
}
flowMetrics.totalDuration = Date.now() - flowStartTime;
// Get final navigation metrics
const finalMetrics = await page.evaluate(() => window.performanceMetrics);
flowMetrics.navigationMetrics = finalMetrics.navigationMetrics;
} finally {
await page.close();
await browser.close();
}
return flowMetrics;
}
async function executeFlowStep(page, step) {
switch (step.action) {
case 'visit':
await page.goto(step.url, { waitUntil: 'networkidle2', timeout: 30000 });
break;
case 'click':
await page.click(step.selector);
break;
case 'fill':
await page.type(step.selector, step.value);
break;
case 'waitFor':
await page.waitForSelector(step.selector, { timeout: 30000 });
break;
case 'waitForNavigation':
await page.waitForNavigation({ waitUntil: 'networkidle2' });
break;
default:
throw new Error(`Unknown action: ${step.action}`);
}
// Small delay to ensure action is processed
await page.waitForTimeout(100);
}
function generatePerformanceComparison(testResults) {
const comparison = {
summary: {
totalTests: Object.keys(testResults).length,
passedTests: 0,
failedTests: 0,
warnings: 0
},
pageComparison: {},
recommendations: []
};
// Analyze results for each page
for (const [pageName, results] of Object.entries(testResults)) {
const pageAnalysis = {
overallScore: results.validation.overallStatus,
coreWebVitals: {},
lighthouseScore: results.results.lighthouse?.performanceScore || 0,
issues: [],
recommendations: []
};
// Analyze Core Web Vitals performance across devices
for (const [device, vitals] of Object.entries(results.results.coreWebVitals || {})) {
pageAnalysis.coreWebVitals[device] = {
lcp: vitals.largestContentfulPaint,
fid: vitals.firstInputDelay,
cls: vitals.cumulativeLayoutShift,
ttfb: vitals.timeToFirstByte
};
// Identify performance issues
if (vitals.largestContentfulPaint > 4000) {
pageAnalysis.issues.push(`Slow LCP on ${device}: ${vitals.largestContentfulPaint}ms`);
pageAnalysis.recommendations.push('Optimize images and lazy loading');
}
if (vitals.firstInputDelay > 300) {
pageAnalysis.issues.push(`High FID on ${device}: ${vitals.firstInputDelay}ms`);
pageAnalysis.recommendations.push('Reduce JavaScript execution time');
}
if (vitals.cumulativeLayoutShift > 0.25) {
pageAnalysis.issues.push(`High CLS on ${device}: ${vitals.cumulativeLayoutShift}`);
pageAnalysis.recommendations.push('Reserve space for dynamic content');
}
}
// Update summary counts
if (results.passed) {
comparison.summary.passedTests++;
} else {
comparison.summary.failedTests++;
}
if (pageAnalysis.issues.length > 0) {
comparison.summary.warnings++;
}
comparison.pageComparison[pageName] = pageAnalysis;
}
// Generate overall recommendations
const allIssues = Object.values(comparison.pageComparison)
.flatMap(page => page.issues);
if (allIssues.some(issue => issue.includes('LCP'))) {
comparison.recommendations.push({
priority: 'high',
category: 'loading',
recommendation: 'Implement image optimization and lazy loading strategies'
});
}
if (allIssues.some(issue => issue.includes('FID'))) {
comparison.recommendations.push({
priority: 'high',
category: 'interactivity',
recommendation: 'Optimize JavaScript bundles and reduce main thread blocking'
});
}
if (allIssues.some(issue => issue.includes('CLS'))) {
comparison.recommendations.push({
priority: 'medium',
category: 'visual-stability',
recommendation: 'Define dimensions for images and reserve space for ads'
});
}
return comparison;
}
async function saveTestResults(testResults, comparisonReport) {
const fs = require('fs').promises;
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
// Save detailed results
await fs.writeFile(
`performance-results-${timestamp}.json`,
JSON.stringify(testResults, null, 2)
);
// Save comparison report
await fs.writeFile(
`performance-report-${timestamp}.json`,
JSON.stringify(comparisonReport, null, 2)
);
// Generate HTML report
const htmlReport = generateHTMLReport(testResults, comparisonReport);
await fs.writeFile(
`performance-report-${timestamp}.html`,
htmlReport
);
}
function generateHTMLReport(testResults, comparisonReport) {
return `
<!DOCTYPE html>
<html>
<head>
<title>Performance Test Report</title>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
.summary { background: #f5f5f5; padding: 20px; border-radius: 8px; margin-bottom: 20px; }
.test-result { border: 1px solid #ddd; margin: 10px 0; padding: 15px; border-radius: 5px; }
.passed { border-left: 5px solid #4CAF50; }
.failed { border-left: 5px solid #f44336; }
.warning { border-left: 5px solid #FF9800; }
.metric { display: inline-block; margin: 5px 15px 5px 0; }
.metric-good { color: #4CAF50; }
.metric-warning { color: #FF9800; }
.metric-poor { color: #f44336; }
table { width: 100%; border-collapse: collapse; margin: 10px 0; }
th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
th { background-color: #f2f2f2; }
</style>
</head>
<body>
<h1>Performance Test Report</h1>
<div class="summary">
<h2>Summary</h2>
<p><strong>Total Tests:</strong> ${comparisonReport.summary.totalTests}</p>
<p><strong>Passed:</strong> <span class="metric-good">${comparisonReport.summary.passedTests}</span></p>
<p><strong>Failed:</strong> <span class="metric-poor">${comparisonReport.summary.failedTests}</span></p>
<p><strong>Warnings:</strong> <span class="metric-warning">${comparisonReport.summary.warnings}</span></p>
<p><strong>Generated:</strong> ${new Date().toLocaleString()}</p>
</div>
<h2>Test Results by Page</h2>
${Object.entries(testResults).map(([pageName, results]) => `
<div class="test-result ${results.passed ? 'passed' : 'failed'}">
<h3>${pageName}</h3>
<p><strong>Overall Status:</strong> ${results.validation.overallStatus}</p>
<h4>Core Web Vitals</h4>
<table>
<tr><th>Device</th><th>LCP</th><th>FID</th><th>CLS</th><th>TTFB</th></tr>
${Object.entries(results.results.coreWebVitals || {}).map(([device, metrics]) => `
<tr>
<td>${device}</td>
<td class="metric-${getMetricClass('lcp', metrics.largestContentfulPaint)}">${metrics.largestContentfulPaint}ms</td>
<td class="metric-${getMetricClass('fid', metrics.firstInputDelay)}">${metrics.firstInputDelay}ms</td>
<td class="metric-${getMetricClass('cls', metrics.cumulativeLayoutShift)}">${metrics.cumulativeLayoutShift}</td>
<td class="metric-${getMetricClass('ttfb', metrics.timeToFirstByte)}">${metrics.timeToFirstByte}ms</td>
</tr>
`).join('')}
</table>
${results.results.lighthouse ? `
<h4>Lighthouse Performance Score</h4>
<p class="metric-${getLighthouseClass(results.results.lighthouse.performanceScore)}">${results.results.lighthouse.performanceScore}/100</p>
` : ''}
</div>
`).join('')}
<h2>Recommendations</h2>
<ul>
${comparisonReport.recommendations.map(rec => `
<li><strong>${rec.category}</strong> (${rec.priority} priority): ${rec.recommendation}</li>
`).join('')}
</ul>
</body>
</html>`;
}
// Utility functions for HTML report generation
function getMetricClass(metric, value) {
const thresholds = {
lcp: { good: 2500, warning: 4000 },
fid: { good: 100, warning: 300 },
cls: { good: 0.1, warning: 0.25 },
ttfb: { good: 800, warning: 1800 }
};
const threshold = thresholds[metric];
if (value <= threshold.good) return 'good';
if (value <= threshold.warning) return 'warning';
return 'poor';
}
function getLighthouseClass(score) {
if (score >= 90) return 'good';
if (score >= 50) return 'warning';
return 'poor';
}
// Run the performance tests
runPerformanceTests().then(results => {
console.log('Performance testing completed successfully');
console.log(`Results saved with timestamp: ${new Date().toISOString()}`);
}).catch(error => {
console.error('Performance testing failed:', error);
process.exit(1);
});
// Export for use in CI/CD pipelines
export {
runPerformanceTests,
testUserFlowPerformance,
generatePerformanceComparison,
saveTestResults
};
// performance-ci.js - CI/CD Integration for Performance Testing
class PerformanceCIIntegration {
constructor(config) {
this.config = config;
this.performanceFramework = new PerformanceTestingFramework(config);
}
async runCIPipeline(environment, options = {}) {
const pipeline = {
stage: 'performance-testing',
environment,
startTime: new Date(),
results: {},
status: 'running'
};
try {
// 1. Pre-deployment performance baseline
if (options.captureBaseline) {
console.log('Capturing performance baseline...');
pipeline.results.baseline = await this.capturePerformanceBaseline(environment);
}
// 2. Run performance tests
console.log('Running performance validation...');
pipeline.results.performanceTests = await this.performanceFramework.runPerformanceTestSuite(
this.getEnvironmentURL(environment),
{
environment,
devices: ['desktop', 'mobile'],
includeLoadTesting: environment !== 'production'
}
);
// 3. Performance regression analysis
if (options.baselineResults) {
console.log('Analyzing performance regression...');
pipeline.results.regression = await this.analyzePerformanceRegression(
pipeline.results.performanceTests,
options.baselineResults
);
}
// 4. Generate performance budget report
console.log('Checking performance budgets...');
pipeline.results.budgetAnalysis = await this.checkPerformanceBudgets(
pipeline.results.performanceTests
);
// 5. Determine pipeline status
pipeline.status = this.determinePipelineStatus(pipeline.results);
pipeline.endTime = new Date();
// 6. Generate and send reports
await this.generateCIReports(pipeline);
await this.sendNotifications(pipeline);
return pipeline;
} catch (error) {
pipeline.status = 'failed';
pipeline.error = error.message;
pipeline.endTime = new Date();
await this.handlePipelineFailure(pipeline, error);
throw error;
}
}
async checkPerformanceBudgets(performanceResults) {
const budgets = this.config.performanceBudgets || {
desktop: {
lcp: 2000, // 2 seconds
fid: 100, // 100ms
cls: 0.1, // 0.1
ttfb: 500, // 500ms
lighthouseScore: 85
},
mobile: {
lcp: 2500, // 2.5 seconds
fid: 100, // 100ms
cls: 0.1, // 0.1
ttfb: 800, // 800ms
lighthouseScore: 80
}
};
const budgetAnalysis = {
passed: true,
violations: [],
warnings: [],
summary: {}
};
for (const [device, deviceBudgets] of Object.entries(budgets)) {
const deviceResults = performanceResults.results.coreWebVitals[device];
const lighthouseResults = performanceResults.results.lighthouse;
budgetAnalysis.summary[device] = {
budgetsMet: 0,
totalBudgets: Object.keys(deviceBudgets).length,
violations: []
};
// Check Core Web Vitals budgets
for (const [metric, budgetValue] of Object.entries(deviceBudgets)) {
if (metric === 'lighthouseScore') {
if (lighthouseResults && lighthouseResults.performanceScore < budgetValue) {
const violation = {
device,
metric,
actual: lighthouseResults.performanceScore,
budget: budgetValue,
severity: 'error'
};
budgetAnalysis.violations.push(violation);
budgetAnalysis.summary[device].violations.push(violation);
budgetAnalysis.passed = false;
} else {
budgetAnalysis.summary[device].budgetsMet++;
}
continue;
}
const actualValue = deviceResults[this.getMetricProperty(metric)];
if (actualValue > budgetValue) {
const overage = ((actualValue - budgetValue) / budgetValue * 100).toFixed(1);
const violation = {
device,
metric,
actual: actualValue,
budget: budgetValue,
overage: `${overage}%`,
severity: overage > 50 ? 'error' : 'warning'
};
if (violation.severity === 'error') {
budgetAnalysis.violations.push(violation);
budgetAnalysis.passed = false;
} else {
budgetAnalysis.warnings.push(violation);
}
budgetAnalysis.summary[device].violations.push(violation);
} else {
budgetAnalysis.summary[device].budgetsMet++;
}
}
}
return budgetAnalysis;
}
async analyzePerformanceRegression(currentResults, baselineResults) {
const regression = {
hasRegression: false,
improvements: [],
regressions: [],
summary: {}
};
const regressionThreshold = this.config.regressionThreshold || 0.1; // 10% regression threshold
for (const [device, currentMetrics] of Object.entries(currentResults.results.coreWebVitals)) {
const baselineMetrics = baselineResults.coreWebVitals[device];
if (!baselineMetrics) continue;
regression.summary[device] = {
improvements: 0,
regressions: 0,
noChange: 0
};
const metricsToCompare = ['largestContentfulPaint', 'firstInputDelay', 'cumulativeLayoutShift', 'timeToFirstByte'];
for (const metric of metricsToCompare) {
const currentValue = currentMetrics[metric];
const baselineValue = baselineMetrics[metric];
if (currentValue && baselineValue) {
const change = (currentValue - baselineValue) / baselineValue;
if (Math.abs(change) > regressionThreshold) {
const changePercentage = (change * 100).toFixed(1);
const analysis = {
device,
metric,
current: currentValue,
baseline: baselineValue,
change: changePercentage + '%',
changeValue: currentValue - baselineValue
};
if (change > 0) { // Performance got worse
regression.regressions.push(analysis);
regression.summary[device].regressions++;
regression.hasRegression = true;
} else { // Performance improved
regression.improvements.push(analysis);
regression.summary[device].improvements++;
}
} else {
regression.summary[device].noChange++;
}
}
}
}
return regression;
}
determinePipelineStatus(results) {
// Fail if performance tests failed
if (!results.performanceTests.passed) {
return 'failed';
}
// Fail if performance budgets are violated
if (results.budgetAnalysis && !results.budgetAnalysis.passed) {
return 'failed';
}
// Fail if significant performance regression detected
if (results.regression && results.regression.hasRegression) {
const criticalRegressions = results.regression.regressions.filter(r =>
parseFloat(r.change) > 25 // More than 25% regression
);
if (criticalRegressions.length > 0) {
return 'failed';
}
}
return 'passed';
}
async generateCIReports(pipeline) {
const reportData = {
pipeline,
timestamp: new Date(),
environment: pipeline.environment,
duration: pipeline.endTime - pipeline.startTime
};
// Generate JUnit XML for CI systems
const junitXML = this.generateJUnitReport(reportData);
await this.saveReport('junit-performance-report.xml', junitXML);
// Generate JSON report for dashboards
const jsonReport = JSON.stringify(reportData, null, 2);
await this.saveReport('performance-report.json', jsonReport);
// Generate HTML report for human consumption
const htmlReport = this.generateHTMLCIReport(reportData);
await this.saveReport('performance-report.html', htmlReport);
}
generateJUnitReport(reportData) {
const { pipeline } = reportData;
const testCases = [];
// Create test cases for each performance check
if (pipeline.results.performanceTests) {
for (const [device, results] of Object.entries(pipeline.results.performanceTests.results.coreWebVitals)) {
testCases.push({
classname: 'CoreWebVitals',
name: `${device}_LCP`,
time: results.largestContentfulPaint / 1000,
failure: results.largestContentfulPaint > this.config.thresholds.lcp.needsImprovement ?
`LCP too slow: ${results.largestContentfulPaint}ms` : null
});
testCases.push({
classname: 'CoreWebVitals',
name: `${device}_FID`,
time: results.firstInputDelay / 1000,
failure: results.firstInputDelay > this.config.thresholds.fid.needsImprovement ?
`FID too slow: ${results.firstInputDelay}ms` : null
});
testCases.push({
classname: 'CoreWebVitals',
name: `${device}_CLS`,
time: 0,
failure: results.cumulativeLayoutShift > this.config.thresholds.cls.needsImprovement ?
`CLS too high: ${results.cumulativeLayoutShift}` : null
});
}
}
const totalTests = testCases.length;
const failures = testCases.filter(tc => tc.failure).length;
return `<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="PerformanceTests" tests="${totalTests}" failures="${failures}" time="${reportData.duration / 1000}">
${testCases.map(tc => `
<testcase classname="${tc.classname}" name="${tc.name}" time="${tc.time}">
${tc.failure ? `<failure message="${tc.failure}">${tc.failure}</failure>` : ''}
</testcase>
`).join('')}
</testsuite>`;
}
getMetricProperty(metric) {
const metricMap = {
'lcp': 'largestContentfulPaint',
'fid': 'firstInputDelay',
'cls': 'cumulativeLayoutShift',
'ttfb': 'timeToFirstByte'
};
return metricMap[metric] || metric;
}
getEnvironmentURL(environment) {
return this.config.environments[environment] || this.config.environments.staging;
}
async saveReport(filename, content) {
const fs = require('fs').promises;
await fs.writeFile(`reports/${filename}`, content);
}
}
// Export for CI/CD usage
module.exports = { PerformanceCIIntegration };
Performance testing ensures applications meet user expectations and business requirements across all conditions. Our comprehensive framework provides:
Key Benefits:
Framework Features:
Real-World Applications:
This performance testing approach ensures your applications deliver consistently fast, responsive experiences that meet user expectations while providing development teams with the data needed to make informed optimization decisions.

I'm Rahul, Sr. Software Engineer (SDE II) and passionate content creator. Sharing my expertise in software development to assist learners.
More about me