hacker news alerts in slack (incessant pings if you make front page)
1import * as Sentry from "@sentry/bun";
2import { db } from "./db";
3import { queryCache } from "./cache";
4
5// Check if we're in production mode to reduce logging
6const isProduction = process.env.NODE_ENV === 'production';
7
8/**
9 * Proactively warms the cache by loading commonly accessed data using registered query functions
10 * Call this after cron jobs update the database or at server startup
11 */
12export async function preloadCaches(): Promise<void> {
13 const startTime = performance.now();
14 if (!isProduction) {
15 console.log("Preloading all caches for optimal performance...");
16 }
17
18 try {
19 // Get all registered cache keys
20 const registeredKeys = queryCache.getRegisteredKeys();
21
22 if (registeredKeys.length === 0) {
23 if (!isProduction) {
24 console.warn("No registered cache keys found. Cache warming skipped.");
25 }
26 return;
27 }
28
29 if (!isProduction) {
30 console.log(`Found ${registeredKeys.length} registered cache keys to warm`);
31 }
32
33 // Define critical, high and regular priority endpoints
34 const criticalKeys = [
35 "leaderboard_stories" // Most important endpoint - load first
36 ];
37
38 const highPriorityKeys = [
39 "total_stories_count",
40 "verified_users_stats"
41 ];
42
43 // Sort keys into priority tiers
44 const sortedCriticalKeys = criticalKeys.filter(key => registeredKeys.includes(key));
45 const sortedHighPriorityKeys = highPriorityKeys.filter(key => registeredKeys.includes(key));
46 const regularPriorityKeys = registeredKeys.filter(key =>
47 !criticalKeys.includes(key) && !highPriorityKeys.includes(key)
48 );
49
50 // Step 1: Load critical endpoints sequentially for most predictable performance
51 for (const key of sortedCriticalKeys) {
52 if (!isProduction) {
53 console.log(`Warming CRITICAL cache for ${key}...`);
54 }
55 await queryCache.warmCache(key);
56
57 // Register as priority in cache system to prevent eviction
58 const registration = queryCache.getQueryRegistration(key);
59 if (registration) {
60 queryCache.register(key, registration.fn, registration.ttl, true);
61 }
62 }
63
64 // Step 2: Load high priority keys in parallel
65 await Promise.all(sortedHighPriorityKeys.map(async (key) => {
66 if (!isProduction) {
67 console.log(`Warming HIGH PRIORITY cache for ${key}...`);
68 }
69 await queryCache.warmCache(key);
70
71 // Register these as priority too
72 const registration = queryCache.getQueryRegistration(key);
73 if (registration) {
74 queryCache.register(key, registration.fn, registration.ttl, true);
75 }
76 }));
77
78 // Step 3: For regular priority, use staggered loading with limited concurrency
79 const concurrencyLimit = isProduction ? 3 : 5;
80 const chunkSize = Math.min(concurrencyLimit, regularPriorityKeys.length);
81
82 // Process in smaller chunks to prevent overwhelming the database
83 for (let i = 0; i < regularPriorityKeys.length; i += chunkSize) {
84 const chunk = regularPriorityKeys.slice(i, i + chunkSize);
85 const chunkPromises = chunk.map(async (key) => {
86 if (!isProduction) {
87 console.log(`Warming regular cache for ${key}...`);
88 }
89 return queryCache.warmCache(key);
90 });
91
92 await Promise.all(chunkPromises);
93
94 // Small breather between chunks to avoid CPU spikes
95 if (i + chunkSize < regularPriorityKeys.length) {
96 await new Promise(resolve => setTimeout(resolve, isProduction ? 50 : 20));
97 }
98 }
99
100 // Preload snapshots for top stories - this requires custom handling
101 // since these use dynamic keys (story_snapshots_{id})
102 if (!isProduction) {
103 console.log("Preloading top story snapshots...");
104 }
105
106 // Get IDs of top 5 stories in production (more likely to be viewed), or top 3 in dev
107 const topStoriesLimit = isProduction ? 5 : 3;
108
109 // Use optimized query with only necessary columns
110 const topStories = await db.query.stories.findMany({
111 columns: { id: true }, // Only retrieve the ID field to minimize memory use
112 where: (stories, { eq }) => eq(stories.isOnLeaderboard, true),
113 orderBy: (stories, { asc }) => [asc(stories.position)],
114 limit: topStoriesLimit,
115 });
116
117 // Warm story snapshots with limited concurrency to prevent DB overload
118 for (const story of topStories) {
119 const snapshotKey = `story_snapshots_${story.id}`;
120 await queryCache.warmCache(snapshotKey);
121
122 // Short delay between each story to minimize server load spikes
123 if (story !== topStories[topStories.length - 1]) {
124 await new Promise(resolve => setTimeout(resolve, isProduction ? 100 : 50));
125 }
126 }
127
128 const totalTime = Math.round(performance.now() - startTime);
129 if (!isProduction) {
130 console.log(`Cache preloading completed successfully in ${totalTime}ms`);
131 }
132 } catch (error) {
133 console.error("Error during cache preloading:", error);
134 Sentry.captureException(error);
135 }
136}
137
138/**
139 * Invalidates all caches and then immediately reloads them
140 * Call this after data updates (like the HN check cron job)
141 */
142export function invalidateAndRefreshCaches(): void {
143 if (!isProduction) {
144 console.log("Invalidating all query caches and refreshing data");
145 }
146
147 // Don't invalidate everything - be selective to maintain performance
148 // First get a list of keys to invalidate (non-priority ones)
149 const allKeys = queryCache.getRegisteredKeys();
150 const nonPriorityKeys = queryCache.getNonPriorityKeys();
151
152 // Only invalidate non-priority keys first
153 for (const key of nonPriorityKeys) {
154 queryCache.invalidate(key);
155 }
156
157 // Gradually refresh caches with staggered starts
158 setTimeout(() => {
159 preloadCaches().catch((err) => {
160 console.error("Error during cache preloading after invalidation:", err);
161 Sentry.captureException(err);
162 });
163 }, isProduction ? 100 : 200); // Slightly longer delay to ensure system stability
164}