hacker news alerts in slack (incessant pings if you make front page)
1// Performance test for the HN alerts API
2import { $ } from "bun";
3import chalk from "chalk";
4
5// Configuration
6const API_URL = "http://localhost:3000";
7const ENDPOINTS = {
8 stories: "/api/stories",
9 totalStories: "/api/stats/total-stories",
10 verifiedUsers: "/api/stats/verified-users",
11};
12
13// Test parameters
14const CONCURRENCY = 50; // Number of concurrent users
15const DURATION = 30; // Test duration in seconds
16const RAMP_UP = 5; // Ramp-up time in seconds
17const COOLDOWN = 2; // Cooldown between tests in seconds
18
19// Performance metrics
20let totalRequests = 0;
21let successfulRequests = 0;
22let failedRequests = 0;
23let totalLatency = 0;
24let maxLatency = 0;
25let minLatency = Number.POSITIVE_INFINITY;
26let responseTimeDistribution = {
27 under50ms: 0,
28 under100ms: 0,
29 under250ms: 0,
30 under500ms: 0,
31 under1s: 0,
32 under2s: 0,
33 over2s: 0,
34};
35
36// Function to format milliseconds as a human-readable duration
37function formatDuration(ms) {
38 if (ms < 1000) return `${ms.toFixed(2)}ms`;
39 return `${(ms / 1000).toFixed(2)}s`;
40}
41
42// Function to send HTTP requests and measure performance
43async function sendRequest(endpoint) {
44 const start = performance.now();
45 try {
46 const response = await fetch(`${API_URL}${endpoint}`);
47 const end = performance.now();
48 const latency = end - start;
49
50 if (response.status === 200) {
51 totalRequests++;
52 successfulRequests++;
53 totalLatency += latency;
54 maxLatency = Math.max(maxLatency, latency);
55 minLatency = Math.min(minLatency, latency);
56
57 // Record in distribution
58 if (latency < 50) responseTimeDistribution.under50ms++;
59 else if (latency < 100) responseTimeDistribution.under100ms++;
60 else if (latency < 250) responseTimeDistribution.under250ms++;
61 else if (latency < 500) responseTimeDistribution.under500ms++;
62 else if (latency < 1000) responseTimeDistribution.under1s++;
63 else if (latency < 2000) responseTimeDistribution.under2s++;
64 else responseTimeDistribution.over2s++;
65
66 return { success: true, latency, status: response.status };
67 }
68 totalRequests++;
69 failedRequests++;
70 return { success: false, latency, status: response.status };
71 } catch (error) {
72 totalRequests++;
73 failedRequests++;
74 const end = performance.now();
75 return { success: false, latency: end - start, error: error.message };
76 }
77}
78
79// Function to show progress
80function showProgress(current, total, testName) {
81 const progressBar = "█"
82 .repeat(Math.floor((current / total) * 30))
83 .padEnd(30, "░");
84 process.stdout.write(
85 `\r${testName}: [${progressBar}] ${Math.floor((current / total) * 100)}% `,
86 );
87}
88
89// Run load test for a specific endpoint
90async function runLoadTest(endpoint, name) {
91 console.log(chalk.blue(`\n📊 Starting load test for ${name} (${endpoint})`));
92 console.log(
93 chalk.gray(` ${CONCURRENCY} concurrent users for ${DURATION} seconds`),
94 );
95
96 // Reset metrics
97 totalRequests = 0;
98 successfulRequests = 0;
99 failedRequests = 0;
100 totalLatency = 0;
101 maxLatency = 0;
102 minLatency = Number.POSITIVE_INFINITY;
103 responseTimeDistribution = {
104 under50ms: 0,
105 under100ms: 0,
106 under250ms: 0,
107 under500ms: 0,
108 under1s: 0,
109 under2s: 0,
110 over2s: 0,
111 };
112
113 const startTime = Date.now();
114 const endTime = startTime + DURATION * 1000;
115
116 // Array to track active promises
117 const activePromises = new Set();
118
119 let testInterval;
120 try {
121 testInterval = setInterval(() => {
122 const elapsedTime = Date.now() - startTime;
123 const progress = Math.min(elapsedTime / (DURATION * 1000), 1);
124 showProgress(elapsedTime, DURATION * 1000, name);
125
126 // Determine how many active users should be present based on ramp-up
127 let targetConcurrency = CONCURRENCY;
128 if (elapsedTime < RAMP_UP * 1000) {
129 targetConcurrency = Math.ceil(
130 (elapsedTime / (RAMP_UP * 1000)) * CONCURRENCY,
131 );
132 }
133
134 // Add more requests if needed and we're still within the test duration
135 while (activePromises.size < targetConcurrency && Date.now() < endTime) {
136 const promise = sendRequest(endpoint).then((result) => {
137 activePromises.delete(promise);
138 });
139 activePromises.add(promise);
140 }
141 }, 50);
142
143 // Wait for the test duration
144 await new Promise((resolve) => setTimeout(resolve, DURATION * 1000));
145
146 // Clean up the interval
147 clearInterval(testInterval);
148
149 // Wait for all in-flight requests to complete
150 await Promise.all(Array.from(activePromises));
151
152 // Calculate final metrics
153 const avgLatency =
154 totalRequests > 0 ? totalLatency / successfulRequests : 0;
155 const successRate =
156 totalRequests > 0 ? (successfulRequests / totalRequests) * 100 : 0;
157 const requestsPerSecond = totalRequests / DURATION;
158
159 // Print results
160 console.log(`\n\n${chalk.green("📈 Test Results:")}`);
161 console.log(chalk.bold(` Endpoint: ${endpoint}`));
162 console.log(` Total Requests: ${chalk.yellow(totalRequests)}`);
163 console.log(
164 ` Successful: ${chalk.green(successfulRequests)} (${successRate.toFixed(1)}%)`,
165 );
166 console.log(` Failed: ${chalk.red(failedRequests)}`);
167 console.log(
168 ` Requests/second: ${chalk.cyan(requestsPerSecond.toFixed(2))}`,
169 );
170 console.log(
171 ` Avg Response Time: ${chalk.cyan(formatDuration(avgLatency))}`,
172 );
173 console.log(
174 ` Min Response Time: ${chalk.cyan(formatDuration(minLatency))}`,
175 );
176 console.log(
177 ` Max Response Time: ${chalk.cyan(formatDuration(maxLatency))}`,
178 );
179
180 console.log("\n Response Time Distribution:");
181 console.log(
182 ` < 50ms: ${chalk.green(responseTimeDistribution.under50ms)} (${((responseTimeDistribution.under50ms / totalRequests) * 100).toFixed(1)}%)`,
183 );
184 console.log(
185 ` < 100ms: ${chalk.green(responseTimeDistribution.under100ms)} (${((responseTimeDistribution.under100ms / totalRequests) * 100).toFixed(1)}%)`,
186 );
187 console.log(
188 ` < 250ms: ${chalk.yellow(responseTimeDistribution.under250ms)} (${((responseTimeDistribution.under250ms / totalRequests) * 100).toFixed(1)}%)`,
189 );
190 console.log(
191 ` < 500ms: ${chalk.yellow(responseTimeDistribution.under500ms)} (${((responseTimeDistribution.under500ms / totalRequests) * 100).toFixed(1)}%)`,
192 );
193 console.log(
194 ` < 1s: ${chalk.yellow(responseTimeDistribution.under1s)} (${((responseTimeDistribution.under1s / totalRequests) * 100).toFixed(1)}%)`,
195 );
196 console.log(
197 ` < 2s: ${chalk.red(responseTimeDistribution.under2s)} (${((responseTimeDistribution.under2s / totalRequests) * 100).toFixed(1)}%)`,
198 );
199 console.log(
200 ` >= 2s: ${chalk.red(responseTimeDistribution.over2s)} (${((responseTimeDistribution.over2s / totalRequests) * 100).toFixed(1)}%)`,
201 );
202 } catch (error) {
203 clearInterval(testInterval);
204 console.error(chalk.red(`\nTest failed: ${error.message}`));
205 }
206
207 // Cooldown period
208 if (COOLDOWN > 0) {
209 console.log(chalk.gray(`\nCooling down for ${COOLDOWN} seconds...`));
210 await new Promise((resolve) => setTimeout(resolve, COOLDOWN * 1000));
211 }
212}
213
214// Main function to run all tests
215async function runAllTests() {
216 console.log(chalk.bold.blue("\n🔍 HN-ALERTS API PERFORMANCE TEST\n"));
217
218 try {
219 // Test health endpoint first to make sure the API is up
220 console.log(chalk.gray("Checking API health..."));
221 const healthCheck = await fetch(`${API_URL}/health`);
222 if (!healthCheck.ok) {
223 throw new Error(
224 `API health check failed with status ${healthCheck.status}`,
225 );
226 }
227 console.log(chalk.green("✅ API is healthy and responding\n"));
228
229 // Run tests for each endpoint
230 for (const [name, endpoint] of Object.entries(ENDPOINTS)) {
231 await runLoadTest(endpoint, name);
232 }
233
234 console.log(chalk.bold.green("\n🎉 All tests completed successfully!\n"));
235 } catch (error) {
236 console.error(chalk.bold.red(`\n❌ Testing failed: ${error.message}\n`));
237 process.exit(1);
238 }
239}
240
241// Run the tests
242runAllTests();