Nothing to see here, move along
1use crate::cap::cnode;
2use crate::cap::object::ObjectTag;
3use crate::cap::ops;
4use crate::cap::pool::POOL;
5use crate::cap::table::{CapRef, Rights};
6use crate::error::KernelError;
7use crate::ipc::{IpcOutcome, endpoint, notification};
8use crate::proc::context::IpcMessage;
9use crate::proc::{BlockedReason, PROCESSES, ProcessState};
10use crate::tests::helpers::{alloc_endpoint_cap, alloc_notification_cap};
11use crate::types::Priority;
12use lancer_core::object_layout::{EndpointObject, NotificationObject};
13
14const MAX_NOTIFICATION_WAITERS: usize = 4;
15
16crate::kernel_test!(
17 fn notification_waiter_overflow() {
18 let (id, generation, cap) = alloc_notification_cap();
19 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
20 let mut ptable = PROCESSES.lock();
21
22 let pids: [crate::types::Pid; 5] = core::array::from_fn(|_| {
23 let created = ptable.allocate(&mut allocator).expect("alloc");
24 ptable.start(created).expect("start");
25 let pid = created.pid();
26 ptable.simulate_dispatch(pid);
27 pid
28 });
29
30 (0..MAX_NOTIFICATION_WAITERS).for_each(|i| {
31 let result = notification::do_wait(&cap, pids[i], &mut ptable);
32 assert!(
33 matches!(result, Ok(IpcOutcome::Blocked)),
34 "waiter {} should block",
35 i
36 );
37 });
38
39 let overflow_result = notification::do_wait(&cap, pids[4], &mut ptable);
40 assert!(
41 matches!(overflow_result, Err(KernelError::ResourceExhausted)),
42 "5th waiter should fail with ResourceExhausted"
43 );
44 assert!(
45 ptable[pids[4]].state() != ProcessState::Blocked,
46 "5th process should NOT be left in Blocked state after rollback"
47 );
48
49 pids.iter().for_each(|&pid| {
50 ptable.destroy(pid, &mut allocator);
51 });
52 let _ = POOL.lock().dec_ref_phys(id, generation);
53 }
54);
55
56crate::kernel_test!(
57 fn signal_zero_bits_preserves_word() {
58 let (id, generation, cap) = alloc_notification_cap();
59 let mut ptable = PROCESSES.lock();
60
61 {
62 let mut pool = POOL.lock();
63 let notif = pool
64 .write_as::<NotificationObject>(id, generation)
65 .expect("write notification");
66 notif.word = 0x0F;
67 }
68
69 notification::do_signal(&cap, 0x00, &mut ptable).expect("signal zero bits");
70
71 let pool = POOL.lock();
72 let notif = pool
73 .read_as::<NotificationObject>(id, generation)
74 .expect("read notification");
75 assert!(
76 notif.word == 0x0F,
77 "word should be preserved after signal(0)"
78 );
79 drop(pool);
80 drop(ptable);
81 let _ = POOL.lock().dec_ref_phys(id, generation);
82 }
83);
84
85crate::kernel_test!(
86 fn signal_u64_max_sets_all_bits() {
87 let (id, generation, cap) = alloc_notification_cap();
88 let mut ptable = PROCESSES.lock();
89
90 notification::do_signal(&cap, u64::MAX, &mut ptable).expect("signal max");
91
92 let pool = POOL.lock();
93 let notif = pool
94 .read_as::<NotificationObject>(id, generation)
95 .expect("read notification");
96 assert!(
97 notif.word == u64::MAX,
98 "word should be u64::MAX after signal(u64::MAX)"
99 );
100 drop(pool);
101 drop(ptable);
102 let _ = POOL.lock().dec_ref_phys(id, generation);
103 }
104);
105
106crate::kernel_test!(
107 fn revoke_unblocks_mixed_caller_and_sender() {
108 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
109 let mut ptable = PROCESSES.lock();
110
111 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
112 let sender_created = ptable.allocate(&mut allocator).expect("alloc sender");
113 let caller_created = ptable.allocate(&mut allocator).expect("alloc caller");
114 ptable.start(owner_created).expect("start owner");
115 ptable.start(sender_created).expect("start sender");
116 ptable.start(caller_created).expect("start caller");
117 let owner_pid = owner_created.pid();
118 let sender_pid = sender_created.pid();
119 let caller_pid = caller_created.pid();
120
121 crate::tests::helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
122
123 let address = 0u64;
124 let (cnode_id, cnode_gen, depth, gv, gb) =
125 cnode::cnode_coords(owner_pid, &ptable).expect("cnode coords");
126 let (ep_id, ep_gen) = {
127 let mut pool = POOL.lock_after(&ptable);
128 let (ep_id, ep_gen) =
129 crate::tests::helpers::alloc_endpoint(&mut pool).expect("alloc ep");
130 let cap = CapRef::new(ObjectTag::Endpoint, ep_id, Rights::ALL, ep_gen);
131 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
132 .expect("insert cap");
133 (ep_id, ep_gen)
134 };
135
136 ptable.simulate_dispatch(sender_pid);
137 ptable.exec_mut(sender_pid).unwrap().ipc_message =
138 IpcMessage::from_regs([0x5E, 0, 0, 0, 0, 0]);
139 let sender_blocked = ptable[sender_pid]
140 .block_on(BlockedReason::Sending(ep_id, ep_gen))
141 .expect("block sender");
142
143 ptable.simulate_dispatch(caller_pid);
144 ptable.exec_mut(caller_pid).unwrap().ipc_message =
145 IpcMessage::from_regs([0xCA, 0, 0, 0, 0, 0]);
146 let caller_blocked = ptable[caller_pid]
147 .block_on(BlockedReason::Calling(ep_id, ep_gen))
148 .expect("block caller");
149
150 {
151 let mut pool = POOL.lock_after(&ptable);
152 let ep = pool
153 .write_as::<EndpointObject>(ep_id, ep_gen)
154 .expect("get ep");
155 let mut senders = endpoint::load_senders(ep);
156 endpoint::enqueue(&mut senders, sender_blocked, &mut ptable).expect("enqueue sender");
157 endpoint::enqueue(&mut senders, caller_blocked, &mut ptable).expect("enqueue caller");
158 let ep = pool
159 .write_as::<EndpointObject>(ep_id, ep_gen)
160 .expect("get ep for store");
161 endpoint::store_senders(ep, &senders);
162 }
163
164 ops::revoke_via_cnode(owner_pid, address, &mut ptable).expect("revoke");
165
166 assert!(
167 ptable[sender_pid].state() == ProcessState::Ready,
168 "sender should be unblocked after revoke"
169 );
170 assert!(
171 ptable.exec(sender_pid).unwrap().saved_context.rax
172 == KernelError::InvalidObject.to_errno() as u64,
173 "sender rax should be InvalidObject errno"
174 );
175 assert!(
176 ptable[caller_pid].state() == ProcessState::Ready,
177 "caller should be unblocked after revoke"
178 );
179 assert!(
180 ptable.exec(caller_pid).unwrap().saved_context.rax
181 == KernelError::InvalidObject.to_errno() as u64,
182 "caller rax should be InvalidObject errno"
183 );
184
185 ptable.destroy(owner_pid, &mut allocator);
186 ptable.destroy(sender_pid, &mut allocator);
187 ptable.destroy(caller_pid, &mut allocator);
188 }
189);
190
191crate::kernel_test!(
192 fn revoke_empty_slot_returns_error() {
193 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
194 let mut ptable = PROCESSES.lock();
195
196 let created = ptable.allocate(&mut allocator).expect("alloc");
197 ptable.start(created).expect("start");
198 let pid = created.pid();
199 crate::tests::helpers::bootstrap_test_cnode(pid, &mut ptable);
200
201 let result = ops::revoke_via_cnode(pid, 100, &mut ptable);
202 assert!(
203 matches!(result, Err(KernelError::SlotEmpty)),
204 "revoke on empty slot should return SlotEmpty"
205 );
206
207 ptable.destroy(pid, &mut allocator);
208 }
209);
210
211crate::kernel_test!(
212 fn double_destroy_is_idempotent() {
213 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
214 let mut ptable = PROCESSES.lock();
215
216 let created = ptable.allocate(&mut allocator).expect("alloc");
217 ptable.start(created).expect("start");
218 let pid = created.pid();
219
220 ptable.destroy(pid, &mut allocator);
221 ptable.destroy(pid, &mut allocator);
222
223 assert!(
224 ptable.get(pid).is_none(),
225 "slot should be freed after double destroy"
226 );
227 }
228);
229
230crate::kernel_test!(
231 fn destroy_cleans_up_endpoint_queues() {
232 let (ep_id, ep_gen, cap) = alloc_endpoint_cap();
233 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
234 let mut ptable = PROCESSES.lock();
235
236 let sender_created = ptable.allocate(&mut allocator).expect("alloc sender");
237 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
238 ptable.start(sender_created).expect("start sender");
239 ptable.start(owner_created).expect("start owner");
240 let sender_pid = sender_created.pid();
241 let owner_pid = owner_created.pid();
242
243 crate::tests::helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
244 let (cnode_id, cnode_gen, depth, gv, gb) =
245 cnode::cnode_coords(owner_pid, &ptable).expect("cnode coords");
246 {
247 let pool = POOL.lock_after(&ptable);
248 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, 0, depth, gv, gb, cap)
249 .expect("insert");
250 }
251
252 ptable.simulate_dispatch(sender_pid);
253 ptable.exec_mut(sender_pid).unwrap().ipc_message =
254 IpcMessage::from_regs([0xDE, 0, 0, 0, 0, 0]);
255 let blocked = ptable[sender_pid]
256 .block_on(BlockedReason::Sending(ep_id, ep_gen))
257 .expect("block sender");
258
259 {
260 let mut pool = POOL.lock_after(&ptable);
261 let ep = pool
262 .write_as::<EndpointObject>(ep_id, ep_gen)
263 .expect("get ep");
264 let mut senders = endpoint::load_senders(ep);
265 endpoint::enqueue(&mut senders, blocked, &mut ptable).expect("enqueue");
266 let ep = pool
267 .write_as::<EndpointObject>(ep_id, ep_gen)
268 .expect("get ep for store");
269 endpoint::store_senders(ep, &senders);
270 }
271
272 ptable.destroy(sender_pid, &mut allocator);
273
274 {
275 let pool = POOL.lock_after(&ptable);
276 let ep = pool
277 .read_as::<EndpointObject>(ep_id, ep_gen)
278 .expect("read ep");
279 let senders = endpoint::load_senders(ep);
280 assert!(
281 senders.is_empty(),
282 "endpoint sender queue should be empty after destroying the sender"
283 );
284 }
285
286 ptable.destroy(owner_pid, &mut allocator);
287 }
288);
289
290crate::kernel_test!(
291 fn process_table_exhaustion() {
292 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
293 let mut ptable = PROCESSES.lock();
294
295 let mut pids = crate::static_vec::StaticVec::<crate::types::Pid, 64>::new();
296 let mut count = 0usize;
297 (0..crate::types::MAX_PIDS).for_each(|_| {
298 if let Some(created) = ptable.allocate(&mut allocator) {
299 ptable.start(created).expect("start");
300 let _ = pids.push(created.pid());
301 count += 1;
302 }
303 });
304 assert!(count > 0, "should have allocated at least 1 process");
305
306 let overflow = ptable.allocate(&mut allocator);
307 assert!(
308 overflow.is_none(),
309 "should return None when process table is full"
310 );
311
312 pids.as_slice().iter().for_each(|&pid| {
313 ptable.destroy(pid, &mut allocator);
314 });
315 }
316);
317
318crate::kernel_test!(
319 fn block_already_blocked_fails() {
320 let (ep_id, ep_gen, _cap) = alloc_endpoint_cap();
321 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
322 let mut ptable = PROCESSES.lock();
323
324 let created = ptable.allocate(&mut allocator).expect("alloc");
325 ptable.start(created).expect("start");
326 let pid = created.pid();
327 ptable.simulate_dispatch(pid);
328
329 let _blocked = ptable[pid]
330 .block_on(BlockedReason::Sending(ep_id, ep_gen))
331 .expect("first block");
332
333 let second = ptable[pid].block_on(BlockedReason::Receiving(ep_id, ep_gen));
334 assert!(
335 matches!(second, Err(KernelError::BadState)),
336 "blocking an already-blocked process should fail with BadState"
337 );
338
339 ptable.destroy(pid, &mut allocator);
340 let _ = POOL.lock().dec_ref_phys(ep_id, ep_gen);
341 }
342);
343
344crate::kernel_test!(
345 fn revoke_without_revoke_right_fails() {
346 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
347 let mut ptable = PROCESSES.lock();
348
349 let created = ptable.allocate(&mut allocator).expect("alloc");
350 ptable.start(created).expect("start");
351 let pid = created.pid();
352 crate::tests::helpers::bootstrap_test_cnode(pid, &mut ptable);
353
354 let address = 50u64;
355 let (cnode_id, cnode_gen, depth, gv, gb) =
356 cnode::cnode_coords(pid, &ptable).expect("cnode coords");
357 {
358 let mut pool = POOL.lock_after(&ptable);
359 ops::create_via_cnode(
360 &mut pool,
361 cnode_id,
362 cnode_gen,
363 address,
364 depth,
365 gv,
366 gb,
367 ObjectTag::Endpoint,
368 )
369 .expect("create endpoint");
370 }
371
372 {
373 let pool = POOL.lock_after(&ptable);
374 let old_cap =
375 cnode::resolve_and_clear(&pool, cnode_id, cnode_gen, address, depth, gv, gb)
376 .expect("clear slot");
377 let modified = old_cap.with_rights(Rights::READ | Rights::WRITE);
378 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, modified)
379 .expect("re-insert");
380 }
381
382 let result = ops::revoke_via_cnode(pid, address, &mut ptable);
383 assert!(
384 matches!(result, Err(KernelError::InsufficientRights)),
385 "revoke without REVOKE right should fail"
386 );
387
388 ptable.destroy(pid, &mut allocator);
389 }
390);
391
392crate::kernel_test!(
393 fn derive_self_slot_returns_occupied() {
394 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
395 let mut ptable = PROCESSES.lock();
396
397 let created = ptable.allocate(&mut allocator).expect("alloc");
398 ptable.start(created).expect("start");
399 let pid = created.pid();
400 crate::tests::helpers::bootstrap_test_cnode(pid, &mut ptable);
401
402 let address = 60u64;
403 let (cnode_id, cnode_gen, depth, gv, gb) =
404 cnode::cnode_coords(pid, &ptable).expect("cnode coords");
405 {
406 let mut pool = POOL.lock_after(&ptable);
407 ops::create_via_cnode(
408 &mut pool,
409 cnode_id,
410 cnode_gen,
411 address,
412 depth,
413 gv,
414 gb,
415 ObjectTag::Endpoint,
416 )
417 .expect("create source");
418 }
419
420 {
421 let mut pool = POOL.lock_after(&ptable);
422 let result = ops::derive_via_cnode(
423 &mut pool,
424 cnode_id,
425 cnode_gen,
426 address,
427 address,
428 depth,
429 gv,
430 gb,
431 Rights::READ,
432 );
433 assert!(
434 matches!(result, Err(KernelError::SlotOccupied)),
435 "derive into same slot should return SlotOccupied, got {:?}",
436 result
437 );
438 }
439
440 {
441 let pool = POOL.lock_after(&ptable);
442 let cap = cnode::resolve_and_read(&pool, cnode_id, cnode_gen, address, depth, gv, gb)
443 .expect("read slot");
444 assert!(
445 cap.tag() == ObjectTag::Endpoint,
446 "original cap should still be intact"
447 );
448 }
449
450 ptable.destroy(pid, &mut allocator);
451 }
452);
453
454crate::kernel_test!(
455 fn identify_after_revoke_fails() {
456 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
457 let mut ptable = PROCESSES.lock();
458
459 let created = ptable.allocate(&mut allocator).expect("alloc");
460 ptable.start(created).expect("start");
461 let pid = created.pid();
462 crate::tests::helpers::bootstrap_test_cnode(pid, &mut ptable);
463
464 let address = 70u64;
465 let (cnode_id, cnode_gen, depth, gv, gb) =
466 cnode::cnode_coords(pid, &ptable).expect("cnode coords");
467 {
468 let mut pool = POOL.lock_after(&ptable);
469 ops::create_via_cnode(
470 &mut pool,
471 cnode_id,
472 cnode_gen,
473 address,
474 depth,
475 gv,
476 gb,
477 ObjectTag::Notification,
478 )
479 .expect("create notification");
480 }
481
482 ops::revoke_via_cnode(pid, address, &mut ptable).expect("revoke");
483
484 {
485 let pool = POOL.lock_after(&ptable);
486 let result =
487 ops::identify_via_cnode(&pool, cnode_id, cnode_gen, address, depth, gv, gb);
488 assert!(
489 matches!(result, Err(KernelError::SlotEmpty)),
490 "identify after revoke should return SlotEmpty"
491 );
492 }
493
494 ptable.destroy(pid, &mut allocator);
495 }
496);
497
498crate::kernel_test!(
499 fn priority_boost_only_increases() {
500 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
501 let mut ptable = PROCESSES.lock();
502
503 let created = ptable.allocate(&mut allocator).expect("alloc");
504 ptable.start(created).expect("start");
505 let pid = created.pid();
506
507 ptable[pid].boost_effective_priority(Priority::new(200));
508 assert!(
509 ptable[pid].effective_priority() == Priority::new(200),
510 "priority should be boosted to 200"
511 );
512
513 ptable[pid].boost_effective_priority(Priority::new(150));
514 assert!(
515 ptable[pid].effective_priority() == Priority::new(200),
516 "lower boost should not decrease effective priority"
517 );
518
519 ptable.destroy(pid, &mut allocator);
520 }
521);
522
523crate::kernel_test!(
524 fn reset_effective_restores_base() {
525 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
526 let mut ptable = PROCESSES.lock();
527
528 let created = ptable.allocate(&mut allocator).expect("alloc");
529 ptable.start(created).expect("start");
530 let pid = created.pid();
531
532 let base = ptable[pid].effective_priority();
533 ptable[pid].boost_effective_priority(Priority::new(200));
534 assert!(
535 ptable[pid].effective_priority() == Priority::new(200),
536 "should be boosted"
537 );
538
539 ptable[pid].reset_effective_priority();
540 assert!(
541 ptable[pid].effective_priority() == base,
542 "reset should restore base priority"
543 );
544
545 ptable.destroy(pid, &mut allocator);
546 }
547);
548
549crate::kernel_test!(
550 fn reply_target_cleared_on_destroy() {
551 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
552 let mut ptable = PROCESSES.lock();
553
554 let a_created = ptable.allocate(&mut allocator).expect("alloc A");
555 let b_created = ptable.allocate(&mut allocator).expect("alloc B");
556 ptable.start(a_created).expect("start A");
557 ptable.start(b_created).expect("start B");
558 let a_pid = a_created.pid();
559 let b_pid = b_created.pid();
560
561 ptable.exec_mut(a_pid).unwrap().reply_target = Some(b_pid);
562
563 ptable.destroy(b_pid, &mut allocator);
564
565 assert!(
566 ptable.exec(a_pid).unwrap().reply_target.is_none(),
567 "reply_target for pid {} should be None after target pid {} was destroyed",
568 a_pid.raw(),
569 b_pid.raw()
570 );
571
572 ptable.destroy(a_pid, &mut allocator);
573 }
574);