Nothing to see here, move along
1use crate::cap::cnode;
2use crate::cap::ops;
3use crate::cap::pool::POOL;
4use crate::cap::table::{CapRef, Rights};
5use crate::error::KernelError;
6use crate::ipc::endpoint;
7use crate::proc::{BlockedReason, PROCESSES, ProcessState};
8use crate::tests::helpers;
9use crate::types::Priority;
10use lancer_core::header::KernelObjectHeader;
11use lancer_core::object_layout::{
12 EndpointObject, KernelObject, NotificationObject, ProcessObject, SchedContextObject,
13};
14use lancer_core::object_tag::ObjectTag;
15
16crate::kernel_test!(
17 fn create_endpoint_inserts_cap() {
18 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
19 let mut ptable = PROCESSES.lock();
20 let created = ptable.allocate(&mut allocator).expect("alloc");
21 ptable.start(created).expect("start");
22 let pid = created.pid();
23 helpers::bootstrap_test_cnode(pid, &mut ptable);
24
25 let address = 10u64;
26 let (cnode_id, cnode_gen, depth, gv, gb) =
27 cnode::cnode_coords(pid, &ptable).expect("coords");
28 let oid = {
29 let mut pool = POOL.lock_after(&ptable);
30 ops::create_via_cnode(
31 &mut pool,
32 cnode_id,
33 cnode_gen,
34 address,
35 depth,
36 gv,
37 gb,
38 ObjectTag::Endpoint,
39 )
40 .expect("create endpoint")
41 };
42
43 {
44 let pool = POOL.lock_after(&ptable);
45 let cap = cnode::resolve_and_read(&pool, cnode_id, cnode_gen, address, depth, gv, gb)
46 .expect("read slot");
47 assert!(cap.tag() == ObjectTag::Endpoint);
48 assert!(cap.rights().contains(Rights::ALL));
49 assert!(cap.phys() == oid);
50 }
51
52 ptable.destroy(pid, &mut allocator);
53 }
54);
55
56crate::kernel_test!(
57 fn create_notification_inserts_cap() {
58 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
59 let mut ptable = PROCESSES.lock();
60 let created = ptable.allocate(&mut allocator).expect("alloc");
61 ptable.start(created).expect("start");
62 let pid = created.pid();
63 helpers::bootstrap_test_cnode(pid, &mut ptable);
64
65 let address = 11u64;
66 let (cnode_id, cnode_gen, depth, gv, gb) =
67 cnode::cnode_coords(pid, &ptable).expect("coords");
68 let oid = {
69 let mut pool = POOL.lock_after(&ptable);
70 ops::create_via_cnode(
71 &mut pool,
72 cnode_id,
73 cnode_gen,
74 address,
75 depth,
76 gv,
77 gb,
78 ObjectTag::Notification,
79 )
80 .expect("create notification")
81 };
82
83 {
84 let pool = POOL.lock_after(&ptable);
85 let cap = cnode::resolve_and_read(&pool, cnode_id, cnode_gen, address, depth, gv, gb)
86 .expect("read slot");
87 assert!(cap.tag() == ObjectTag::Notification);
88 assert!(cap.phys() == oid);
89 }
90
91 ptable.destroy(pid, &mut allocator);
92 }
93);
94
95crate::kernel_test!(
96 fn create_invalid_tag_rejected() {
97 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
98 let mut ptable = PROCESSES.lock();
99 let created = ptable.allocate(&mut allocator).expect("alloc");
100 ptable.start(created).expect("start");
101 let pid = created.pid();
102 helpers::bootstrap_test_cnode(pid, &mut ptable);
103
104 let address = 13u64;
105 let (cnode_id, cnode_gen, depth, gv, gb) =
106 cnode::cnode_coords(pid, &ptable).expect("coords");
107 let result = {
108 let mut pool = POOL.lock_after(&ptable);
109 ops::create_via_cnode(
110 &mut pool,
111 cnode_id,
112 cnode_gen,
113 address,
114 depth,
115 gv,
116 gb,
117 ObjectTag::Process,
118 )
119 };
120 assert!(
121 matches!(result, Err(KernelError::InvalidType)),
122 "creating a Process cap via ops::create must fail"
123 );
124
125 ptable.destroy(pid, &mut allocator);
126 }
127);
128
129crate::kernel_test!(
130 fn create_occupied_slot_rolls_back() {
131 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
132 let mut ptable = PROCESSES.lock();
133 let created = ptable.allocate(&mut allocator).expect("alloc");
134 ptable.start(created).expect("start");
135 let pid = created.pid();
136 helpers::bootstrap_test_cnode(pid, &mut ptable);
137
138 let address = 14u64;
139 let (cnode_id, cnode_gen, depth, gv, gb) =
140 cnode::cnode_coords(pid, &ptable).expect("coords");
141 {
142 let mut pool = POOL.lock_after(&ptable);
143 ops::create_via_cnode(
144 &mut pool,
145 cnode_id,
146 cnode_gen,
147 address,
148 depth,
149 gv,
150 gb,
151 ObjectTag::Endpoint,
152 )
153 .expect("first create");
154 }
155
156 let result = {
157 let mut pool = POOL.lock_after(&ptable);
158 ops::create_via_cnode(
159 &mut pool,
160 cnode_id,
161 cnode_gen,
162 address,
163 depth,
164 gv,
165 gb,
166 ObjectTag::Notification,
167 )
168 };
169 assert!(
170 matches!(result, Err(KernelError::SlotOccupied)),
171 "second create into occupied slot must fail"
172 );
173
174 {
175 let pool = POOL.lock_after(&ptable);
176 let cap = cnode::resolve_and_read(&pool, cnode_id, cnode_gen, address, depth, gv, gb)
177 .expect("read slot");
178 assert!(
179 cap.tag() == ObjectTag::Endpoint,
180 "original cap should still be in the slot"
181 );
182 }
183
184 ptable.destroy(pid, &mut allocator);
185 }
186);
187
188crate::kernel_test!(
189 fn derive_attenuates_rights() {
190 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
191 let mut ptable = PROCESSES.lock();
192 let created = ptable.allocate(&mut allocator).expect("alloc");
193 ptable.start(created).expect("start");
194 let pid = created.pid();
195 helpers::bootstrap_test_cnode(pid, &mut ptable);
196
197 let src_addr = 20u64;
198 let dest_addr = 21u64;
199 let (cnode_id, cnode_gen, depth, gv, gb) =
200 cnode::cnode_coords(pid, &ptable).expect("coords");
201
202 {
203 let mut pool = POOL.lock_after(&ptable);
204 ops::create_via_cnode(
205 &mut pool,
206 cnode_id,
207 cnode_gen,
208 src_addr,
209 depth,
210 gv,
211 gb,
212 ObjectTag::Endpoint,
213 )
214 .expect("create source");
215 }
216
217 {
218 let mut pool = POOL.lock_after(&ptable);
219 ops::derive_via_cnode(
220 &mut pool,
221 cnode_id,
222 cnode_gen,
223 src_addr,
224 dest_addr,
225 depth,
226 gv,
227 gb,
228 Rights::READ,
229 )
230 .expect("derive");
231 }
232
233 {
234 let pool = POOL.lock_after(&ptable);
235 let cap = cnode::resolve_and_read(&pool, cnode_id, cnode_gen, dest_addr, depth, gv, gb)
236 .expect("read derived slot");
237 assert!(cap.rights().contains(Rights::READ));
238 assert!(!cap.rights().contains(Rights::WRITE));
239 assert!(!cap.rights().contains(Rights::REVOKE));
240 }
241
242 ptable.destroy(pid, &mut allocator);
243 }
244);
245
246crate::kernel_test!(
247 fn derive_without_grant_fails() {
248 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
249 let mut ptable = PROCESSES.lock();
250 let created = ptable.allocate(&mut allocator).expect("alloc");
251 ptable.start(created).expect("start");
252 let pid = created.pid();
253 helpers::bootstrap_test_cnode(pid, &mut ptable);
254
255 let src_addr = 30u64;
256 let dest_addr = 31u64;
257 let (cnode_id, cnode_gen, depth, gv, gb) =
258 cnode::cnode_coords(pid, &ptable).expect("coords");
259
260 {
261 let mut pool = POOL.lock_after(&ptable);
262 ops::create_via_cnode(
263 &mut pool,
264 cnode_id,
265 cnode_gen,
266 src_addr,
267 depth,
268 gv,
269 gb,
270 ObjectTag::Endpoint,
271 )
272 .expect("create source");
273 }
274
275 {
276 let pool = POOL.lock_after(&ptable);
277 let old_cap =
278 cnode::resolve_and_clear(&pool, cnode_id, cnode_gen, src_addr, depth, gv, gb)
279 .expect("clear slot");
280 let new_cap = old_cap.with_rights(Rights::READ | Rights::WRITE);
281 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, src_addr, depth, gv, gb, new_cap)
282 .expect("reinsert cap");
283 }
284
285 let result = {
286 let mut pool = POOL.lock_after(&ptable);
287 ops::derive_via_cnode(
288 &mut pool,
289 cnode_id,
290 cnode_gen,
291 src_addr,
292 dest_addr,
293 depth,
294 gv,
295 gb,
296 Rights::READ,
297 )
298 };
299 assert!(
300 matches!(result, Err(KernelError::InsufficientRights)),
301 "derive without GRANT right must fail"
302 );
303
304 ptable.destroy(pid, &mut allocator);
305 }
306);
307
308crate::kernel_test!(
309 fn identify_returns_tag_and_rights() {
310 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
311 let mut ptable = PROCESSES.lock();
312 let created = ptable.allocate(&mut allocator).expect("alloc");
313 ptable.start(created).expect("start");
314 let pid = created.pid();
315 helpers::bootstrap_test_cnode(pid, &mut ptable);
316
317 let address = 40u64;
318 let (cnode_id, cnode_gen, depth, gv, gb) =
319 cnode::cnode_coords(pid, &ptable).expect("coords");
320
321 {
322 let mut pool = POOL.lock_after(&ptable);
323 ops::create_via_cnode(
324 &mut pool,
325 cnode_id,
326 cnode_gen,
327 address,
328 depth,
329 gv,
330 gb,
331 ObjectTag::Notification,
332 )
333 .expect("create notification");
334 }
335
336 let (tag, rights) = {
337 let pool = POOL.lock_after(&ptable);
338 ops::identify_via_cnode(&pool, cnode_id, cnode_gen, address, depth, gv, gb)
339 .expect("identify")
340 };
341
342 assert!(tag == ObjectTag::Notification);
343 assert!(rights.contains(Rights::ALL));
344
345 ptable.destroy(pid, &mut allocator);
346 }
347);
348
349crate::kernel_test!(
350 fn endpoint_revoke_unblocks_senders() {
351 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
352 let mut ptable = PROCESSES.lock();
353
354 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
355 let sender_created = ptable.allocate(&mut allocator).expect("alloc sender");
356 ptable.start(owner_created).expect("start owner");
357 ptable.start(sender_created).expect("start sender");
358 let owner_pid = owner_created.pid();
359 let sender_pid = sender_created.pid();
360 helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
361
362 let (ep_id, ep_gen) =
363 crate::tests::helpers::alloc_endpoint(&mut POOL.lock_after(&ptable)).expect("alloc ep");
364
365 let cap = CapRef::new(ObjectTag::Endpoint, ep_id, Rights::ALL, ep_gen);
366 let address = 0u64;
367 let (cnode_id, cnode_gen, depth, gv, gb) =
368 cnode::cnode_coords(owner_pid, &ptable).expect("coords");
369 {
370 let pool = POOL.lock_after(&ptable);
371 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
372 .expect("insert cap");
373 }
374
375 ptable.simulate_dispatch(sender_pid);
376 let blocked = ptable[sender_pid]
377 .block_on(BlockedReason::Sending(ep_id, ep_gen))
378 .expect("block sender");
379
380 {
381 let mut pool = POOL.lock_after(&ptable);
382 let ep = pool
383 .write_as::<EndpointObject>(ep_id, ep_gen)
384 .expect("get ep");
385 let mut senders = endpoint::load_senders(ep);
386 endpoint::enqueue(&mut senders, blocked, &mut ptable).expect("enqueue");
387 let ep = pool
388 .write_as::<EndpointObject>(ep_id, ep_gen)
389 .expect("get ep again");
390 endpoint::store_senders(ep, &senders);
391 }
392
393 ops::revoke_via_cnode(owner_pid, address, &mut ptable).expect("revoke");
394
395 assert!(
396 ptable[sender_pid].state() == ProcessState::Ready,
397 "sender should be unblocked after endpoint revoke"
398 );
399 assert!(
400 ptable.exec(sender_pid).unwrap().saved_context.rax
401 == KernelError::InvalidObject.to_errno() as u64,
402 "sender's rax should contain InvalidObject errno after endpoint revoke"
403 );
404
405 ptable.destroy(owner_pid, &mut allocator);
406 ptable.destroy(sender_pid, &mut allocator);
407 }
408);
409
410crate::kernel_test!(
411 fn notification_revoke_unblocks_waiters() {
412 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
413 let mut ptable = PROCESSES.lock();
414
415 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
416 let waiter_created = ptable.allocate(&mut allocator).expect("alloc waiter");
417 ptable.start(owner_created).expect("start owner");
418 ptable.start(waiter_created).expect("start waiter");
419 let owner_pid = owner_created.pid();
420 let waiter_pid = waiter_created.pid();
421 helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
422
423 let (notif_id, notif_gen) =
424 crate::tests::helpers::alloc_notification(&mut POOL.lock_after(&ptable))
425 .expect("alloc notif");
426
427 let cap = CapRef::new(ObjectTag::Notification, notif_id, Rights::ALL, notif_gen);
428 let address = 0u64;
429 let (cnode_id, cnode_gen, depth, gv, gb) =
430 cnode::cnode_coords(owner_pid, &ptable).expect("coords");
431 {
432 let pool = POOL.lock_after(&ptable);
433 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
434 .expect("insert cap");
435 }
436
437 ptable.simulate_dispatch(waiter_pid);
438 let _ = ptable[waiter_pid]
439 .block_on(BlockedReason::WaitingNotification(notif_id, notif_gen))
440 .expect("block waiter");
441
442 {
443 let mut pool = POOL.lock_after(&ptable);
444 let notif = pool
445 .write_as::<NotificationObject>(notif_id, notif_gen)
446 .expect("get notif");
447 notif.waiters[notif.waiter_count as usize] = waiter_pid.raw();
448 notif.waiter_count += 1;
449 }
450
451 ops::revoke_via_cnode(owner_pid, address, &mut ptable).expect("revoke");
452
453 assert!(
454 ptable[waiter_pid].state() == ProcessState::Ready,
455 "waiter should be unblocked after notification revoke"
456 );
457 assert!(
458 ptable.exec(waiter_pid).unwrap().saved_context.rax
459 == KernelError::InvalidObject.to_errno() as u64,
460 "waiter's rax should contain InvalidObject errno after notification revoke"
461 );
462
463 ptable.destroy(owner_pid, &mut allocator);
464 ptable.destroy(waiter_pid, &mut allocator);
465 }
466);
467
468crate::kernel_test!(
469 fn proc_destroy_decrements_pool_refcount() {
470 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
471 let mut ptable = PROCESSES.lock();
472
473 let parent_created = ptable.allocate(&mut allocator).expect("alloc parent");
474 let child_created = ptable.allocate(&mut allocator).expect("alloc child");
475 ptable.start(parent_created).expect("start parent");
476 ptable.start(child_created).expect("start child");
477 let parent_pid = parent_created.pid();
478 let child_pid = child_created.pid();
479 helpers::bootstrap_test_cnode(parent_pid, &mut ptable);
480
481 let header = KernelObjectHeader::new(ObjectTag::Process, 0, 64);
482 let mut proc_obj = ProcessObject::init_default(header);
483 proc_obj.pid = child_pid.raw();
484 let (obj_id, generation) = crate::tests::helpers::alloc_typed(
485 &mut POOL.lock_after(&ptable),
486 ObjectTag::Process,
487 proc_obj,
488 )
489 .expect("alloc process object");
490 let cap = CapRef::new(ObjectTag::Process, obj_id, Rights::ALL, generation);
491 let address = 0u64;
492 let (cnode_id, cnode_gen, depth, gv, gb) =
493 cnode::cnode_coords(parent_pid, &ptable).expect("coords");
494 {
495 let pool = POOL.lock_after(&ptable);
496 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
497 .expect("insert cap");
498 }
499
500 POOL.lock_after(&ptable)
501 .inc_ref(obj_id, generation)
502 .expect("inc_ref");
503 let refcount_before = POOL
504 .lock_after(&ptable)
505 .get_tag(obj_id, generation)
506 .is_ok();
507 assert!(refcount_before, "object should be alive before destroy");
508
509 ptable.destroy(child_pid, &mut allocator);
510
511 POOL.lock_after(&ptable).dec_ref_phys(obj_id, generation);
512 {
513 let pool = POOL.lock_after(&ptable);
514 let _ = cnode::resolve_and_clear(&pool, cnode_id, cnode_gen, address, depth, gv, gb);
515 }
516
517 ptable.destroy(parent_pid, &mut allocator);
518 }
519);
520
521crate::kernel_test!(
522 fn sched_context_revoke_detaches_process() {
523 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
524 let mut ptable = PROCESSES.lock();
525
526 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
527 ptable.start(owner_created).expect("start owner");
528 let owner_pid = owner_created.pid();
529 helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
530
531 let header = KernelObjectHeader::new(ObjectTag::SchedContext, 0, 64);
532 let mut sc_obj = SchedContextObject::init_default(header);
533 sc_obj.budget_us = 1000;
534 sc_obj.period_us = 10000;
535 sc_obj.priority = 100;
536 let (sc_id, sc_gen) = crate::tests::helpers::alloc_typed(
537 &mut POOL.lock_after(&ptable),
538 ObjectTag::SchedContext,
539 sc_obj,
540 )
541 .expect("alloc sched context");
542
543 {
544 let mut pool = POOL.lock_after(&ptable);
545 let sc = pool
546 .write_as::<SchedContextObject>(sc_id, sc_gen)
547 .expect("get sc");
548 sc.attached_pid = owner_pid.raw();
549 }
550
551 let cap = CapRef::new(ObjectTag::SchedContext, sc_id, Rights::ALL, sc_gen);
552 let address = 0u64;
553 let (cnode_id, cnode_gen, depth, gv, gb) =
554 cnode::cnode_coords(owner_pid, &ptable).expect("coords");
555 {
556 let pool = POOL.lock_after(&ptable);
557 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
558 .expect("insert cap");
559 }
560
561 ptable[owner_pid].attach_sched_context(sc_id, sc_gen, Priority::new(100));
562 assert!(
563 ptable[owner_pid].sched_context().is_some(),
564 "sched_context should be attached before revoke"
565 );
566
567 ops::revoke_via_cnode(owner_pid, address, &mut ptable).expect("revoke sched context");
568
569 assert!(
570 ptable[owner_pid].sched_context().is_none(),
571 "sched_context must be detached after SchedContext revoke"
572 );
573
574 ptable.destroy(owner_pid, &mut allocator);
575 }
576);
577
578crate::kernel_test!(
579 fn sched_context_revoke_prevents_scheduling() {
580 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
581 let mut ptable = PROCESSES.lock();
582
583 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
584 ptable.start(owner_created).expect("start owner");
585 let owner_pid = owner_created.pid();
586 helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
587
588 let header = KernelObjectHeader::new(ObjectTag::SchedContext, 0, 64);
589 let mut sc_obj = SchedContextObject::init_default(header);
590 sc_obj.budget_us = 1000;
591 sc_obj.period_us = 10000;
592 sc_obj.priority = 200;
593 let address = 0u64;
594 let (sc_id, sc_gen) = crate::tests::helpers::alloc_typed(
595 &mut POOL.lock_after(&ptable),
596 ObjectTag::SchedContext,
597 sc_obj,
598 )
599 .expect("alloc sc");
600
601 {
602 let mut pool = POOL.lock_after(&ptable);
603 let sc = pool
604 .write_as::<SchedContextObject>(sc_id, sc_gen)
605 .expect("get sc");
606 sc.attached_pid = owner_pid.raw();
607 }
608
609 let cap = CapRef::new(ObjectTag::SchedContext, sc_id, Rights::ALL, sc_gen);
610 let (cnode_id, cnode_gen, depth, gv, gb) =
611 cnode::cnode_coords(owner_pid, &ptable).expect("coords");
612 {
613 let pool = POOL.lock_after(&ptable);
614 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
615 .expect("insert cap");
616 }
617 ptable[owner_pid].attach_sched_context(sc_id, sc_gen, Priority::new(200));
618
619 assert!(
620 ptable[owner_pid].is_runnable(),
621 "process should be runnable before revoke"
622 );
623
624 ops::revoke_via_cnode(owner_pid, address, &mut ptable).expect("revoke");
625
626 assert!(
627 ptable[owner_pid].sched_context().is_none(),
628 "sched_context detached after revoke"
629 );
630
631 ptable.destroy(owner_pid, &mut allocator);
632 }
633);
634
635crate::kernel_test!(
636 fn cleanup_sched_context_with_no_attached_process() {
637 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
638 let mut ptable = PROCESSES.lock();
639
640 let owner_created = ptable.allocate(&mut allocator).expect("alloc owner");
641 ptable.start(owner_created).expect("start owner");
642 let owner_pid = owner_created.pid();
643 helpers::bootstrap_test_cnode(owner_pid, &mut ptable);
644
645 let header = KernelObjectHeader::new(ObjectTag::SchedContext, 0, 64);
646 let mut sc_obj = SchedContextObject::init_default(header);
647 sc_obj.budget_us = 500;
648 sc_obj.period_us = 5000;
649 sc_obj.priority = 50;
650 let address = 0u64;
651 let (sc_id, sc_gen) = crate::tests::helpers::alloc_typed(
652 &mut POOL.lock_after(&ptable),
653 ObjectTag::SchedContext,
654 sc_obj,
655 )
656 .expect("alloc sc");
657 let cap = CapRef::new(ObjectTag::SchedContext, sc_id, Rights::ALL, sc_gen);
658 let (cnode_id, cnode_gen, depth, gv, gb) =
659 cnode::cnode_coords(owner_pid, &ptable).expect("coords");
660 {
661 let pool = POOL.lock_after(&ptable);
662 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
663 .expect("insert cap");
664 }
665
666 ops::revoke_via_cnode(owner_pid, address, &mut ptable).expect("revoke unattached sc");
667
668 ptable.destroy(owner_pid, &mut allocator);
669 }
670);
671
672crate::kernel_test!(
673 fn sched_context_cleanup_different_holder_and_attached() {
674 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
675 let mut ptable = PROCESSES.lock();
676
677 let holder_created = ptable.allocate(&mut allocator).expect("alloc holder");
678 let target_created = ptable.allocate(&mut allocator).expect("alloc target");
679 ptable.start(holder_created).expect("start holder");
680 ptable.start(target_created).expect("start target");
681 let holder_pid = holder_created.pid();
682 let target_pid = target_created.pid();
683 helpers::bootstrap_test_cnode(holder_pid, &mut ptable);
684
685 let header = KernelObjectHeader::new(ObjectTag::SchedContext, 0, 64);
686 let mut sc_obj = SchedContextObject::init_default(header);
687 sc_obj.budget_us = 1000;
688 sc_obj.period_us = 10000;
689 sc_obj.priority = 100;
690 let (sc_id, sc_gen) = crate::tests::helpers::alloc_typed(
691 &mut POOL.lock_after(&ptable),
692 ObjectTag::SchedContext,
693 sc_obj,
694 )
695 .expect("alloc sc");
696
697 {
698 let mut pool = POOL.lock_after(&ptable);
699 let sc = pool
700 .write_as::<SchedContextObject>(sc_id, sc_gen)
701 .expect("get sc");
702 sc.attached_pid = target_pid.raw();
703 }
704
705 ptable[target_pid].attach_sched_context(sc_id, sc_gen, Priority::new(100));
706 assert!(
707 ptable[target_pid].sched_context().is_some(),
708 "target should have sched context"
709 );
710
711 let cap = CapRef::new(ObjectTag::SchedContext, sc_id, Rights::ALL, sc_gen);
712 let address = 0u64;
713 let (cnode_id, cnode_gen, depth, gv, gb) =
714 cnode::cnode_coords(holder_pid, &ptable).expect("coords");
715 {
716 let pool = POOL.lock_after(&ptable);
717 cnode::resolve_and_insert(&pool, cnode_id, cnode_gen, address, depth, gv, gb, cap)
718 .expect("insert cap");
719 }
720
721 ops::revoke_via_cnode(holder_pid, address, &mut ptable).expect("revoke");
722
723 assert!(
724 ptable[target_pid].sched_context().is_none(),
725 "target's sched_context must be cleared when holder revokes the SchedContext"
726 );
727
728 ptable.destroy(holder_pid, &mut allocator);
729 ptable.destroy(target_pid, &mut allocator);
730 }
731);