hacker news alerts in slack (incessant pings if you make front page)
1import { version } from "../../package.json";
2import * as Sentry from "@sentry/bun";
3
4// Check if we're in production mode to reduce logging
5const isProduction = process.env.NODE_ENV === "production";
6
7/**
8 * Creates consistent cache headers with stable ETags
9 * @param key Cache key for the resource
10 * @param maxAge Max age in seconds for the Cache-Control header
11 * @returns Headers object with proper caching directives
12 */
13export function createCacheHeaders(
14 key: string,
15 maxAge = 300,
16 data?: unknown,
17): Record<string, string> {
18 // Generate stable ETag based on version, cache key, and data hash if available
19 let etag: string;
20
21 if (data) {
22 // Generate based on actual data content for stronger validation
23 const dataStr = JSON.stringify(data);
24 const dataHash = Bun.hash(dataStr).toString(36).slice(0, 12);
25 etag = `"${version}-${key}-${dataHash}"`;
26 } else {
27 // Fallback to time-based for headers without data
28 etag = `"${version}-${key}-${Math.floor(Date.now() / (maxAge * 1000))}"`;
29 }
30
31 return {
32 "Content-Type": "application/json",
33 "Cache-Control": `public, max-age=${maxAge - 10}, stale-while-revalidate=60`,
34 ETag: etag,
35 "X-Cache-Key": key, // Helps with debugging cache issues
36 };
37}
38
39/**
40 * Applies compression to a Response if the client supports it
41 * @param request Original request to check Accept-Encoding
42 * @param response Response to potentially compress
43 * @returns Compressed response if possible, original otherwise
44 */
45export async function compressResponse(
46 request: Request,
47 response: Response,
48 // @ts-expect-error
49): Promise<Response | Bun.Response> {
50 // Skip compression for non-JSON responses or small responses
51 const contentType = response.headers.get("Content-Type");
52 if (!contentType?.includes("application/json")) {
53 return response;
54 }
55
56 // Early exit if compression is not supported by client
57 const acceptEncoding = request.headers.get("Accept-Encoding") || "";
58 const supportsGzip = acceptEncoding.includes("gzip");
59 const supportsDeflate = acceptEncoding.includes("deflate");
60
61 if (!supportsGzip && !supportsDeflate) {
62 return response;
63 }
64
65 // Get response body
66 const body = await response.text();
67
68 // Only compress responses over a certain size (1KB)
69 if (body.length < 1024) {
70 return new Response(body, {
71 status: response.status,
72 headers: response.headers,
73 });
74 }
75
76 // Get headers once
77 const headers = Object.fromEntries(response.headers.entries());
78
79 // Try gzip first as it's more widely supported
80 if (supportsGzip) {
81 try {
82 // Use a lower compression level (4) for speed vs. size tradeoff
83 const compressedBody = Bun.gzipSync(Buffer.from(body), {
84 level: 4, // Medium compression level for better performance
85 });
86
87 // Only compress if it actually reduces size
88 if (compressedBody.length < body.length) {
89 return new Response(compressedBody, {
90 status: response.status,
91 headers: {
92 ...headers,
93 "Content-Encoding": "gzip",
94 "Content-Length": compressedBody.length.toString(),
95 Vary: "Accept-Encoding",
96 },
97 });
98 }
99 } catch (error) {
100 // Fall back to uncompressed if compression fails
101 if (!isProduction) {
102 console.error("Compression error:", error);
103 }
104 }
105 } else if (supportsDeflate) {
106 try {
107 const compressedBody = Bun.deflateSync(Buffer.from(body), {
108 level: 4, // Medium compression level
109 });
110
111 if (compressedBody.length < body.length) {
112 return new Response(compressedBody, {
113 status: response.status,
114 headers: {
115 ...headers,
116 "Content-Encoding": "deflate",
117 "Content-Length": compressedBody.length.toString(),
118 Vary: "Accept-Encoding",
119 },
120 });
121 }
122 } catch (error) {
123 if (!isProduction) {
124 console.error("Deflate compression error:", error);
125 }
126 }
127 }
128
129 // Return original response if compression not possible or not beneficial
130 return new Response(body, {
131 status: response.status,
132 headers: headers,
133 });
134}
135
136// Cache system for database queries
137export type CacheItem<T> = {
138 data: T;
139 timestamp: number;
140 expiresAt: number;
141};
142
143// Type for registered query functions
144export type QueryFunction<T> = () => Promise<T>;
145
146export class QueryCache {
147 private cache: Map<string, CacheItem<unknown>> = new Map();
148 private defaultTTL: number = 60 * 5; // 5 minutes in seconds
149 private prefetchQueue: Set<string> = new Set();
150 private maxItems = 500; // Maximum cache entries
151 private requestCounter = 0; // Counter for recent requests
152 private lastCounterReset: number = Date.now(); // Last time counter was reset
153 private priorityKeys: Set<string> = new Set(); // High-priority keys that shouldn't be evicted
154 private lowLatencyMode = true; // Whether to optimize for consistent latency
155
156 // Cache hits and misses tracking
157 private hits = 0;
158 private misses = 0;
159 private lastGC: number = Date.now(); // Last garbage collection time
160
161 // Registry to store query functions for reuse during cache warming
162 private queryRegistry: Map<
163 string,
164 { fn: QueryFunction<unknown>; ttl: number; priority: boolean }
165 > = new Map();
166
167 constructor(defaultTTL?: number, maxItems?: number) {
168 if (defaultTTL) {
169 this.defaultTTL = defaultTTL;
170 }
171 if (maxItems) {
172 this.maxItems = maxItems;
173 }
174 if (!isProduction) {
175 console.log(
176 `Initialized query cache with ${this.defaultTTL}s TTL and max ${this.maxItems} items`,
177 );
178 }
179
180 // Set up periodic counter reset for monitoring - less frequent in production
181 setInterval(
182 () => {
183 this.requestCounter = 0;
184 this.lastCounterReset = Date.now();
185 },
186 isProduction ? 30000 : 10000,
187 ); // Reset every 30s in prod, 10s in dev
188
189 // Set up periodic garbage collection for cache health
190 setInterval(
191 () => this.runGarbageCollection(),
192 isProduction ? 300000 : 60000, // 5 min in prod, 1 min in dev
193 );
194 }
195
196 /**
197 * Register a query function for later use in cache warming
198 * @param key Cache key
199 * @param queryFn Function that performs the actual query
200 * @param ttl Cache TTL in seconds
201 * @param priority Whether this is a high-priority key that should resist eviction
202 */
203 register<T>(
204 key: string,
205 queryFn: QueryFunction<T>,
206 ttl: number = this.defaultTTL,
207 priority = false,
208 ): void {
209 this.queryRegistry.set(key, {
210 fn: queryFn as QueryFunction<unknown>,
211 ttl,
212 priority,
213 });
214
215 if (priority) {
216 this.priorityKeys.add(key);
217 } else {
218 // Make sure it's not in priority keys if priority=false
219 this.priorityKeys.delete(key);
220 }
221
222 if (!isProduction) {
223 console.log(
224 `Registered query function for key: ${key} with TTL: ${ttl}s${priority ? " (priority)" : ""}`,
225 );
226 }
227 }
228
229 /**
230 * Get all registered cache keys
231 * @returns Array of registered cache keys
232 */
233 getRegisteredKeys(): string[] {
234 return Array.from(this.queryRegistry.keys());
235 }
236
237 /**
238 * Get all non-priority registered cache keys
239 * @returns Array of non-priority cache keys
240 */
241 getNonPriorityKeys(): string[] {
242 return Array.from(this.queryRegistry.entries())
243 .filter(([_, details]) => !details.priority)
244 .map(([key]) => key);
245 }
246
247 /**
248 * Get registration details for a specific key
249 * @param key Cache key to look up
250 * @returns Registration details or undefined if not found
251 */
252 getQueryRegistration(
253 key: string,
254 ):
255 | { fn: QueryFunction<unknown>; ttl: number; priority: boolean }
256 | undefined {
257 return this.queryRegistry.get(key);
258 }
259
260 /**
261 * Get data from cache or execute the query function
262 * @param key Cache key
263 * @param queryFn Function that performs the actual query
264 * @param ttl Cache TTL in seconds
265 * @returns Query result
266 */
267 async get<T>(
268 key: string,
269 queryFn: QueryFunction<T>,
270 ttl: number = this.defaultTTL,
271 ): Promise<T> {
272 // Track request load
273 this.requestCounter++;
274
275 const now = Math.floor(Date.now() / 1000);
276 const cached = this.cache.get(key);
277
278 // Fast path: Return cached value if it exists and is not expired
279 if (cached && cached.expiresAt > now) {
280 // Track hit rate
281 this.hits++;
282
283 if (!isProduction) {
284 console.log(
285 `Cache hit for ${key} (expires in ${cached.expiresAt - now}s)`,
286 );
287 }
288
289 // Aggressive prefetching for frequently accessed keys
290 // Only prefetch if not already in queue and approaching expiry
291 const timeToExpiry = cached.expiresAt - now;
292 const isPriority = this.priorityKeys.has(key);
293 // More aggressive prefetching for priority keys (15% vs 5%) and for dev (25% vs 15%)
294 const prefetchThreshold = isPriority
295 ? isProduction
296 ? ttl * 0.15
297 : ttl * 0.25 // Priority keys
298 : isProduction
299 ? ttl * 0.05
300 : ttl * 0.15; // Regular keys
301
302 if (timeToExpiry < prefetchThreshold && !this.prefetchQueue.has(key)) {
303 // Schedule prefetch in background
304 this.prefetch(key, queryFn, ttl);
305 }
306
307 return cached.data as T;
308 }
309
310 // Track miss rate
311 this.misses++;
312
313 // Execute the query (cache miss)
314 if (!isProduction) {
315 console.log(`Cache miss for ${key}, fetching from database...`);
316 }
317
318 // Execute query and store result
319 try {
320 const data = await queryFn();
321
322 // Cache the result
323 this.cache.set(key, {
324 data,
325 timestamp: now,
326 expiresAt: now + ttl,
327 });
328
329 // Register this key if it's not already registered
330 if (!this.queryRegistry.has(key)) {
331 this.register(key, queryFn, ttl, this.priorityKeys.has(key));
332 }
333
334 // Check cache size asynchronously to avoid blocking the response
335 if (this.cache.size > this.maxItems * 0.9) {
336 // At 90% capacity
337 queueMicrotask(() => this.pruneCache());
338 }
339
340 return data;
341 } catch (error) {
342 // If query fails but we have stale data, return it with a warning (stale-while-error)
343 if (cached) {
344 console.warn(
345 `Query failed for ${key}, returning stale data from ${new Date(cached.timestamp * 1000).toISOString()}`,
346 );
347 Sentry.captureException(
348 new Error(`Query failed for ${key}, returning stale data`, {
349 cause: error,
350 }),
351 );
352 return cached.data as T;
353 }
354 // No stale data to fall back to
355 throw error;
356 }
357 }
358
359 // Background prefetch to refresh cache before expiration
360 private prefetch<T>(
361 key: string,
362 queryFn: QueryFunction<T>,
363 ttl: number,
364 ): void {
365 this.prefetchQueue.add(key);
366
367 // Use adaptive delay based on system load and key priority
368 const isPriority = this.priorityKeys.has(key);
369 const queueSize = this.prefetchQueue.size;
370 let delay: number;
371
372 if (isPriority) {
373 // Priority keys get lower delay
374 delay = isProduction ? 10 : 0;
375 } else if (queueSize > 10) {
376 // Under heavy prefetch load, increase delay for non-priority keys
377 delay = isProduction ? 200 + queueSize * 10 : 100;
378 } else {
379 // Normal delay
380 delay = isProduction ? 50 : 0;
381 }
382
383 setTimeout(async () => {
384 try {
385 const startTime = Date.now();
386 if (!isProduction) {
387 console.log(`Prefetching ${key} before expiration`);
388 }
389
390 const data = await queryFn();
391 const queryTime = Date.now() - startTime;
392 const now = Math.floor(Date.now() / 1000);
393
394 // Adjust TTL based on query performance if in low latency mode
395 let adjustedTtl = ttl;
396 if (this.lowLatencyMode && queryTime > 200) {
397 // For slow queries, extend the cache TTL to reduce frequency of expensive operations
398 const slowQueryMultiplier = Math.min(3, 1 + queryTime / 1000);
399 adjustedTtl = Math.floor(ttl * slowQueryMultiplier);
400 if (!isProduction) {
401 console.log(
402 `Slow query (${queryTime}ms) for ${key}, extending TTL by ${slowQueryMultiplier}x`,
403 );
404 }
405 }
406
407 this.cache.set(key, {
408 data,
409 timestamp: now,
410 expiresAt: now + adjustedTtl,
411 });
412
413 if (!isProduction) {
414 console.log(`Successfully prefetched ${key} in ${queryTime}ms`);
415 }
416 } catch (error) {
417 console.error(`Error prefetching ${key}:`, error);
418 Sentry.captureException(error);
419 } finally {
420 this.prefetchQueue.delete(key);
421 }
422 }, delay);
423 }
424
425 /**
426 * Warm a specific cache entry using its registered query function
427 * @param key Cache key to warm
428 * @returns Promise resolving to the cached data or null if key not registered
429 */
430 async warmCache<T>(key: string): Promise<T | null> {
431 const registration = this.queryRegistry.get(key);
432 if (!registration) {
433 if (!isProduction) {
434 console.warn(
435 `Cannot warm cache for ${key}: No registered query function`,
436 );
437 }
438 return null;
439 }
440
441 try {
442 if (!isProduction) {
443 console.log(`Warming cache for ${key} using registered function`);
444 }
445
446 const data = await this.get(
447 key,
448 registration.fn as QueryFunction<T>,
449 registration.ttl,
450 );
451 return data;
452 } catch (error) {
453 console.error(`Error warming cache for ${key}:`, error);
454 Sentry.captureException(error);
455 return null;
456 }
457 }
458
459 invalidate(key: string): void {
460 // Fast path - only log if actually invalidating
461 if (this.cache.has(key)) {
462 if (!isProduction) {
463 console.log(`Invalidating cache for ${key}`);
464 }
465 this.cache.delete(key);
466 }
467 }
468
469 invalidateAll(): void {
470 if (!isProduction) {
471 console.log("Invalidating entire cache");
472 }
473
474 // Preserve data for priority keys by keeping their entries
475 if (this.priorityKeys.size > 0) {
476 const priorityEntries: [string, CacheItem<unknown>][] = [];
477
478 // First collect all priority entries
479 for (const [key, value] of this.cache.entries()) {
480 if (this.priorityKeys.has(key)) {
481 priorityEntries.push([key, value]);
482 }
483 }
484
485 // Clear everything
486 this.cache.clear();
487
488 // Restore priority entries
489 for (const [key, value] of priorityEntries) {
490 this.cache.set(key, value);
491 }
492
493 if (!isProduction) {
494 console.log(
495 `Preserved ${priorityEntries.length} priority cache entries during invalidation`,
496 );
497 }
498 } else {
499 this.cache.clear();
500 }
501 }
502
503 // Prune cache when it exceeds max size using smart eviction policy
504 private pruneCache(): void {
505 if (this.cache.size <= this.maxItems) return;
506
507 // Get entries as array for sorting
508 const entries = Array.from(this.cache.entries());
509 const now = Math.floor(Date.now() / 1000);
510
511 // First, check for any expired entries we can remove
512 const expiredEntries = entries.filter(
513 ([key, item]) => item.expiresAt <= now && !this.priorityKeys.has(key),
514 );
515
516 // If we have expired entries, remove those first
517 if (expiredEntries.length > 0) {
518 for (const entry of expiredEntries) {
519 this.cache.delete(entry[0]);
520 }
521
522 if (!isProduction) {
523 console.log(`Pruned ${expiredEntries.length} expired items from cache`);
524 }
525
526 // If removing expired entries was enough, we're done
527 if (this.cache.size <= this.maxItems * 0.9) {
528 return;
529 }
530 }
531
532 // If we still need to remove more, use a smarter eviction policy
533 // Filter out priority keys that should never be evicted
534 const evictableEntries = entries.filter(
535 ([key]) => !this.priorityKeys.has(key),
536 );
537
538 if (evictableEntries.length === 0) {
539 console.warn(
540 "Cache full but all items are priority - consider increasing cache size",
541 );
542 return;
543 }
544
545 // Sort by timestamp (oldest first)
546 evictableEntries.sort((a, b) => a[1].timestamp - b[1].timestamp);
547
548 // Calculate how many to remove - more aggressive cleanup (down to 70%)
549 const removeCount = Math.ceil(this.cache.size - this.maxItems * 0.7);
550 // Remove oldest non-priority entries
551 const entriesToRemove = evictableEntries.slice(0, removeCount);
552 for (const [key] of entriesToRemove) {
553 this.cache.delete(key);
554 }
555
556 if (!isProduction) {
557 console.log(`Pruned ${removeCount} oldest non-priority items from cache`);
558 }
559 }
560
561 /**
562 * Runs a full garbage collection cycle to clean expired entries
563 * and optimize memory usage
564 */
565 private runGarbageCollection(): void {
566 const now = Math.floor(Date.now() / 1000);
567 let expiredCount = 0;
568
569 // Clean expired entries
570 for (const [key, item] of this.cache.entries()) {
571 if (item.expiresAt <= now && !this.priorityKeys.has(key)) {
572 this.cache.delete(key);
573 expiredCount++;
574 }
575 }
576
577 if (!isProduction && expiredCount > 0) {
578 console.log(`GC: Removed ${expiredCount} expired entries`);
579 }
580
581 this.lastGC = Date.now();
582 }
583
584 // Get cache stats for monitoring
585 getStats(): {
586 size: number;
587 keys: string[];
588 registeredKeys: string[];
589 requestRate: number;
590 hitRate: number;
591 } {
592 const elapsedSeconds = (Date.now() - this.lastCounterReset) / 1000;
593 const requestRate =
594 elapsedSeconds > 0 ? this.requestCounter / elapsedSeconds : 0;
595
596 const totalRequests = this.hits + this.misses;
597 const hitRate = totalRequests > 0 ? this.hits / totalRequests : 0;
598
599 return {
600 size: this.cache.size,
601 keys: Array.from(this.cache.keys()),
602 registeredKeys: Array.from(this.queryRegistry.keys()),
603 requestRate: Math.round(requestRate * 100) / 100,
604 hitRate: Math.round(hitRate * 100) / 100,
605 };
606 }
607}
608
609// Pre-prepared error response to avoid recreation
610const ERROR_RESPONSE = JSON.stringify({
611 error: "An error occurred processing your request",
612 code: "INTERNAL_SERVER_ERROR",
613});
614
615// Create a global cache instance with optimized settings
616export const queryCache = new QueryCache(
617 // Default TTL of 10 minutes in production, 5 minutes in dev
618 isProduction ? 600 : 300,
619 // Larger cache size to reduce evictions
620 isProduction ? 1500 : 800,
621);
622
623/**
624 * Factory function for creating consistent API endpoint handlers
625 * @param cacheKey Key for caching the response
626 * @param queryFn Function that performs the actual database query
627 * @param ttl Cache TTL in seconds
628 * @param isPriority Whether this endpoint should have priority in cache
629 */
630export function createCachedEndpoint<T>(
631 cacheKey: string,
632 queryFn: () => Promise<T>,
633 ttl = 300,
634 isPriority = false,
635) {
636 // Register the query function for cache warming with priority flag
637 // Set frequently accessed endpoints as priority by default
638 const defaultToPriority = cacheKey === "leaderboard_stories" || isPriority;
639 queryCache.register(cacheKey, queryFn, ttl, defaultToPriority);
640
641 // Prepare common response headers
642 const errorHeaders = {
643 "Content-Type": "application/json",
644 "Cache-Control": "no-cache, no-store",
645 "X-Error": "true",
646 };
647
648 // Pre-build common responses for reuse
649 const errorResponse = new Response(ERROR_RESPONSE, {
650 status: 500,
651 headers: errorHeaders,
652 });
653
654 return async (request: Request) => {
655 // Start request timing for potential performance monitoring
656 const requestStart = isProduction ? 0 : performance.now();
657
658 try {
659 // Get data from cache or execute query first
660 const data = await queryCache.get(cacheKey, queryFn, ttl);
661
662 // Generate data-based ETag for better validation
663 const headers = createCacheHeaders(cacheKey, ttl, data);
664
665 // Check client ETag after we have our data
666 const clientETag = request.headers.get("if-none-match");
667
668 // Return 304 if client's ETag matches our data-based ETag
669 if (clientETag && clientETag === headers.ETag) {
670 return new Response(null, {
671 status: 304,
672 headers: {
673 ETag: headers.ETag,
674 "Cache-Control": headers["Cache-Control"] as string,
675 },
676 });
677 }
678
679 // Add server timing header in development
680 if (!isProduction && requestStart > 0) {
681 const requestTime = Math.round(performance.now() - requestStart);
682 headers["Server-Timing"] = `cache;dur=${requestTime}`;
683 }
684
685 const response = new Response(JSON.stringify(data), { headers });
686
687 // Apply compression and return
688 return compressResponse(request, response);
689 } catch (error) {
690 // Minimal logging in production
691 if (!isProduction) {
692 console.error(`Error in endpoint ${cacheKey}:`, error);
693 }
694
695 // Report to Sentry without blocking
696 Sentry.captureException(error);
697
698 // Return pre-built error response
699 return errorResponse.clone();
700 }
701 };
702}