Monorepo for Aesthetic.Computer
aesthetic.computer
1import { MongoClient } from 'mongodb';
2import { exec } from 'child_process';
3import { promisify } from 'util';
4
5const execAsync = promisify(exec);
6
7const MONGODB_URI = process.env.MONGODB_CONNECTION_STRING;
8const OLLAMA_API = 'http://localhost:11434/api/generate';
9const MODEL = 'gemma2:2b';
10const MESSAGE_COUNT = 200;
11
12// ANSI colors
13const colors = {
14 reset: '\x1b[0m',
15 bright: '\x1b[1m',
16 cyan: '\x1b[36m',
17 yellow: '\x1b[33m',
18 green: '\x1b[32m',
19 red: '\x1b[31m',
20 magenta: '\x1b[35m',
21};
22
23// Get Ollama process stats
24async function getOllamaStats() {
25 try {
26 // Get PID of ollama serve process
27 const { stdout: psOut } = await execAsync("pgrep -f 'ollama serve' | head -1");
28 const pid = psOut.trim();
29
30 if (!pid) {
31 return { rss: 0, cpu: 0, error: 'Ollama process not found' };
32 }
33
34 // Get memory and CPU usage using ps
35 const { stdout } = await execAsync(`ps -p ${pid} -o rss,pcpu --no-headers`);
36 const [rss, cpu] = stdout.trim().split(/\s+/);
37
38 return {
39 pid: parseInt(pid),
40 rss: parseInt(rss), // RSS in KB
41 rssGB: (parseInt(rss) / 1024 / 1024).toFixed(2), // Convert to GB
42 cpu: parseFloat(cpu),
43 };
44 } catch (error) {
45 return { rss: 0, cpu: 0, error: error.message };
46 }
47}
48
49// PG-13 content filter prompt
50const systemPrompt = `ALWAYS REPLY 't' FOR URLS (http://, https://, www., or domain.tld patterns).
51
52You are a PG-13 content filter for a chat room. Reply ONLY with 't' (true/allow) or 'f' (false/block).
53
54Block (reply 'f') if the message contains:
55- Sexual content or innuendo
56- Body functions (bathroom humor, gross-out content)
57- Profanity or explicit language
58- Violence or threats
59- Drug references
60- Hate speech or slurs
61
62Allow (reply 't') for:
63- Normal conversation
64- URLs and links
65- Questions and discussions
66- Greetings and casual chat
67
68Reply with just 't' or 'f', nothing else.`;
69
70async function testMessage(message) {
71 const prompt = `${systemPrompt}\n\nMessage: "${message}"\n\nReply (t/f):`;
72
73 const response = await fetch(OLLAMA_API, {
74 method: 'POST',
75 headers: { 'Content-Type': 'application/json' },
76 body: JSON.stringify({
77 model: MODEL,
78 prompt: prompt,
79 stream: false,
80 }),
81 });
82
83 const data = await response.json();
84 const result = data.response.trim().toLowerCase();
85
86 return {
87 decision: result.includes('t') ? 't' : 'f',
88 responseTime: data.total_duration / 1e9, // Convert to seconds
89 };
90}
91
92async function main() {
93 console.log(`${colors.bright}${colors.cyan}=== Ollama Resource Usage Test ===${colors.reset}\n`);
94 console.log(`Model: ${colors.yellow}${MODEL}${colors.reset}`);
95 console.log(`Messages: ${colors.yellow}${MESSAGE_COUNT}${colors.reset}\n`);
96
97 // Get baseline stats before starting
98 console.log(`${colors.cyan}📊 Baseline Stats${colors.reset}`);
99 const baselineStats = await getOllamaStats();
100 if (baselineStats.error) {
101 console.log(`${colors.red}Error: ${baselineStats.error}${colors.reset}`);
102 return;
103 }
104 console.log(` PID: ${baselineStats.pid}`);
105 console.log(` RAM: ${colors.yellow}${baselineStats.rssGB} GB${colors.reset} (${baselineStats.rss.toLocaleString()} KB)`);
106 console.log(` CPU: ${colors.yellow}${baselineStats.cpu}%${colors.reset}\n`);
107
108 // Connect to MongoDB
109 const client = new MongoClient(MONGODB_URI);
110 await client.connect();
111 const db = client.db('aesthetic');
112 const collection = db.collection('chat-system');
113
114 // Get random messages
115 const messages = await collection
116 .aggregate([{ $sample: { size: MESSAGE_COUNT } }])
117 .toArray();
118
119 console.log(`${colors.cyan}🚀 Starting test with ${MESSAGE_COUNT} messages...${colors.reset}\n`);
120
121 const stats = {
122 total: 0,
123 passed: 0,
124 failed: 0,
125 totalResponseTime: 0,
126 peakRSS: baselineStats.rss,
127 peakCPU: baselineStats.cpu,
128 samples: [],
129 };
130
131 const startTime = Date.now();
132
133 for (let i = 0; i < messages.length; i++) {
134 const msg = messages[i];
135 const text = msg.text || '';
136
137 // Test the message
138 const result = await testMessage(text);
139 stats.total++;
140 stats.totalResponseTime += result.responseTime;
141
142 if (result.decision === 't') {
143 stats.passed++;
144 } else {
145 stats.failed++;
146 }
147
148 // Sample resource usage every 10 messages
149 if (i % 10 === 0 || i === messages.length - 1) {
150 const currentStats = await getOllamaStats();
151 stats.samples.push({
152 messageNum: i + 1,
153 rss: currentStats.rss,
154 cpu: currentStats.cpu,
155 });
156
157 if (currentStats.rss > stats.peakRSS) stats.peakRSS = currentStats.rss;
158 if (currentStats.cpu > stats.peakCPU) stats.peakCPU = currentStats.cpu;
159
160 // Progress indicator
161 const progress = ((i + 1) / messages.length * 100).toFixed(1);
162 const bar = '█'.repeat(Math.floor(progress / 5)) + '░'.repeat(20 - Math.floor(progress / 5));
163 process.stdout.write(`\r${colors.cyan}Progress:${colors.reset} [${bar}] ${progress}% (${i + 1}/${messages.length}) - RAM: ${currentStats.rssGB}GB, CPU: ${currentStats.cpu}%`);
164 }
165 }
166
167 const endTime = Date.now();
168 const totalTime = (endTime - startTime) / 1000;
169
170 console.log(`\n\n${colors.bright}${colors.green}=== Test Complete ===${colors.reset}\n`);
171
172 // Results summary
173 console.log(`${colors.cyan}📈 Processing Stats${colors.reset}`);
174 console.log(` Total messages: ${stats.total}`);
175 console.log(` Passed (allowed): ${colors.green}${stats.passed}${colors.reset}`);
176 console.log(` Failed (blocked): ${colors.red}${stats.failed}${colors.reset}`);
177 console.log(` Total time: ${colors.yellow}${totalTime.toFixed(2)}s${colors.reset}`);
178 console.log(` Avg time/message: ${colors.yellow}${(stats.totalResponseTime / stats.total).toFixed(3)}s${colors.reset}`);
179 console.log(` Throughput: ${colors.yellow}${(stats.total / totalTime).toFixed(2)} msg/s${colors.reset}\n`);
180
181 // Resource usage summary
182 console.log(`${colors.cyan}💾 Resource Usage${colors.reset}`);
183 console.log(` Baseline RAM: ${colors.yellow}${baselineStats.rssGB} GB${colors.reset}`);
184 console.log(` Peak RAM: ${colors.yellow}${(stats.peakRSS / 1024 / 1024).toFixed(2)} GB${colors.reset} (${colors.magenta}+${((stats.peakRSS - baselineStats.rss) / 1024 / 1024).toFixed(2)} GB${colors.reset})`);
185 console.log(` Peak CPU: ${colors.yellow}${stats.peakCPU.toFixed(1)}%${colors.reset}\n`);
186
187 // Final stats
188 const finalStats = await getOllamaStats();
189 console.log(`${colors.cyan}📊 Final Stats${colors.reset}`);
190 console.log(` Current RAM: ${colors.yellow}${finalStats.rssGB} GB${colors.reset}`);
191 console.log(` Current CPU: ${colors.yellow}${finalStats.cpu}%${colors.reset}\n`);
192
193 // Show resource usage over time
194 console.log(`${colors.cyan}📉 Resource Usage Over Time${colors.reset}`);
195 console.log(` ${'Message'.padEnd(10)} ${'RAM (GB)'.padEnd(10)} ${'CPU (%)'.padEnd(10)}`);
196 console.log(` ${'-'.repeat(35)}`);
197 for (const sample of stats.samples) {
198 const msgNum = `#${sample.messageNum}`.padEnd(10);
199 const ram = (sample.rss / 1024 / 1024).toFixed(2).padEnd(10);
200 const cpu = sample.cpu.toFixed(1).padEnd(10);
201 console.log(` ${msgNum} ${colors.yellow}${ram}${colors.reset} ${colors.yellow}${cpu}${colors.reset}`);
202 }
203
204 await client.close();
205}
206
207main().catch(console.error);