Nothing to see here, move along
1use crate::cap::object::PidQueue;
2use crate::cap::pool::POOL;
3use crate::cap::table::CapRef;
4use crate::ipc::{AlwaysBlocked, CallResult, IpcOutcome, IpcResult, message};
5use crate::proc::{BlockedReason, ProcessManager};
6use crate::types::{BlockedPid, MAX_PIDS, Pid};
7use lancer_core::header::NONE_SENTINEL;
8use lancer_core::object_layout::EndpointObject;
9
10pub(crate) fn load_senders(ep: &EndpointObject) -> PidQueue {
11 PidQueue::from_repr_c(ep.sender_head, ep.sender_tail, ep.sender_len)
12}
13
14pub(crate) fn load_receivers(ep: &EndpointObject) -> PidQueue {
15 PidQueue::from_repr_c(ep.receiver_head, ep.receiver_tail, ep.receiver_len)
16}
17
18pub(crate) fn store_senders(ep: &mut EndpointObject, q: &PidQueue) {
19 let (h, t, l) = q.to_repr_c();
20 ep.sender_head = h;
21 ep.sender_tail = t;
22 ep.sender_len = l;
23}
24
25pub(crate) fn store_receivers(ep: &mut EndpointObject, q: &PidQueue) {
26 let (h, t, l) = q.to_repr_c();
27 ep.receiver_head = h;
28 ep.receiver_tail = t;
29 ep.receiver_len = l;
30}
31
32pub(crate) fn load_holder(ep: &EndpointObject) -> Option<Pid> {
33 Pid::try_new(ep.holder)
34}
35
36pub(crate) fn store_holder(ep: &mut EndpointObject, holder: Option<Pid>) {
37 ep.holder = holder.map_or(NONE_SENTINEL, |p| p.raw());
38}
39
40fn assert_queue_valid(queue: &PidQueue, ptable: &ProcessManager) {
41 let valid = queue.validate(|pid| ptable[pid].next_ipc);
42 debug_assert!(valid, "PidQueue invariant violation detected");
43}
44
45pub(crate) fn enqueue(
46 queue: &mut PidQueue,
47 blocked: BlockedPid,
48 ptable: &mut ProcessManager,
49) -> Result<(), (crate::error::KernelError, BlockedPid)> {
50 let pid = blocked.pid();
51 if ptable[pid].next_ipc.is_some() {
52 return Err((crate::error::KernelError::BadState, blocked));
53 }
54 queue.check_capacity().map_err(|e| (e, blocked))?;
55 match queue.tail {
56 Some(tail) => {
57 ptable[tail].next_ipc = Some(pid);
58 queue.tail = Some(pid);
59 }
60 None => {
61 queue.head = Some(pid);
62 queue.tail = Some(pid);
63 }
64 }
65 let r = queue.inc_len();
66 debug_assert!(r.is_ok(), "inc_len cannot fail after check_capacity");
67 assert_queue_valid(queue, ptable);
68 Ok(())
69}
70
71pub(crate) fn dequeue(queue: &mut PidQueue, ptable: &mut ProcessManager) -> Option<BlockedPid> {
72 queue.head.map(|pid| {
73 queue.head = ptable[pid].next_ipc;
74 ptable[pid].next_ipc = None;
75 if queue.head.is_none() {
76 queue.tail = None;
77 }
78 queue.dec_len();
79 assert_queue_valid(queue, ptable);
80 ptable[pid].blocked_proof()
81 })
82}
83
84pub(crate) fn dequeue_genuine_receiver(
85 queue: &mut PidQueue,
86 ptable: &mut ProcessManager,
87) -> Option<BlockedPid> {
88 let candidate = core::iter::successors(queue.head, |&cur| ptable[cur].next_ipc)
89 .take(MAX_PIDS)
90 .find(|&pid| {
91 !matches!(
92 ptable[pid].blocked_reason(),
93 Some(BlockedReason::Calling(_, _))
94 )
95 })?;
96 remove_pid(queue, candidate, ptable);
97 Some(ptable[candidate].blocked_proof())
98}
99
100fn remove_pid(queue: &mut PidQueue, target: Pid, ptable: &mut ProcessManager) {
101 let Some(h) = queue.head else { return };
102
103 if h == target {
104 queue.head = ptable[target].next_ipc;
105 ptable[target].next_ipc = None;
106 if queue.head.is_none() {
107 queue.tail = None;
108 }
109 queue.dec_len();
110 assert_queue_valid(queue, ptable);
111 return;
112 }
113
114 let Some(prev) = core::iter::successors(queue.head, |&cur| ptable[cur].next_ipc)
115 .take(MAX_PIDS)
116 .find(|&cur| ptable[cur].next_ipc == Some(target))
117 else {
118 return;
119 };
120
121 let next_after_target = ptable[target].next_ipc;
122 ptable[prev].next_ipc = next_after_target;
123 ptable[target].next_ipc = None;
124 if next_after_target.is_none() {
125 queue.tail = Some(prev);
126 }
127 queue.dec_len();
128 assert_queue_valid(queue, ptable);
129}
130
131pub fn do_send(cap: &CapRef, sender_pid: Pid, ptable: &mut ProcessManager) -> IpcResult<()> {
132 let sender_msg = ptable.exec(sender_pid).unwrap().ipc_message;
133
134 let blocked_recv = {
135 let mut pool = POOL.lock();
136 let ep = pool.write_as::<EndpointObject>(cap.phys(), cap.generation())?;
137 let mut receivers = load_receivers(ep);
138 let dequeued = dequeue_genuine_receiver(&mut receivers, ptable);
139 if let Some(br) = &dequeued {
140 store_holder(ep, Some(br.pid()));
141 }
142 store_receivers(ep, &receivers);
143 dequeued
144 };
145
146 match blocked_recv {
147 Some(blocked_recv) => {
148 let recv_pid = blocked_recv.pid();
149 let sender_prio = ptable[sender_pid].effective_priority();
150
151 ptable.unblock_and_enqueue(recv_pid, blocked_recv)?;
152 let recv_exec = ptable.exec_mut(recv_pid).unwrap();
153 recv_exec.ipc_message = sender_msg;
154 recv_exec.ipc_badge = sender_pid.raw() as u64;
155 message::inject_into_context(&mut recv_exec.saved_context, &sender_msg);
156 recv_exec.saved_context.rax = sender_pid.raw() as u64;
157 recv_exec.seal_context();
158
159 crate::sched::boost_effective(ptable, recv_pid, sender_prio);
160
161 Ok(IpcOutcome::Done(()))
162 }
163 None => {
164 let mut pool = POOL.lock();
165 let (holder, donor_prio) = {
166 let ep = pool.write_as::<EndpointObject>(cap.phys(), cap.generation())?;
167 let mut senders = load_senders(ep);
168 let blocked_sender = ptable[sender_pid]
169 .block_on(BlockedReason::Sending(cap.phys(), cap.generation()))?;
170 enqueue(&mut senders, blocked_sender, ptable).map_err(|(e, proof)| {
171 let _ = ptable.unblock_and_enqueue(sender_pid, proof);
172 e
173 })?;
174 store_senders(ep, &senders);
175 let holder = load_holder(ep);
176 (holder, ptable[sender_pid].effective_priority())
177 };
178 if let Some(h) = holder {
179 crate::sched::propagate_priority(ptable, &pool, h, donor_prio);
180 }
181 Ok(IpcOutcome::Blocked)
182 }
183 }
184}
185
186fn dequeue_sender(
187 cap: &CapRef,
188 recv_pid: Pid,
189 ptable: &mut ProcessManager,
190) -> Result<Option<BlockedPid>, crate::error::KernelError> {
191 let mut pool = POOL.lock();
192 let ep = pool.write_as::<EndpointObject>(cap.phys(), cap.generation())?;
193 let mut senders = load_senders(ep);
194 let dequeued = dequeue(&mut senders, ptable);
195 if dequeued.is_some() {
196 store_holder(ep, Some(recv_pid));
197 }
198 store_senders(ep, &senders);
199 Ok(dequeued)
200}
201
202fn handle_dequeued_sender(
203 cap: &CapRef,
204 recv_pid: Pid,
205 blocked_sender: BlockedPid,
206 ptable: &mut ProcessManager,
207) -> IpcResult<Pid> {
208 let sender_pid = blocked_sender.pid();
209 let sender_msg = ptable.exec(sender_pid).unwrap().ipc_message;
210 let sender_prio = ptable[sender_pid].effective_priority();
211
212 match ptable[sender_pid].blocked_reason() {
213 Some(BlockedReason::Calling(_, _)) => {
214 let mut pool = POOL.lock();
215 match pool.write_as::<EndpointObject>(cap.phys(), cap.generation()) {
216 Ok(ep) => {
217 let mut receivers = load_receivers(ep);
218 match enqueue(&mut receivers, blocked_sender, ptable) {
219 Ok(()) => {
220 store_receivers(ep, &receivers);
221 let recv_exec = ptable.exec_mut(recv_pid).unwrap();
222 recv_exec.ipc_message = sender_msg;
223 recv_exec.ipc_badge = sender_pid.raw() as u64;
224 recv_exec.reply_target = Some(sender_pid);
225 }
226 Err((e, proof)) => {
227 store_receivers(ep, &receivers);
228 crate::show!(ipc, error, "recv re-enqueue caller failed {:?}", e);
229 let sender_exec = ptable.exec_mut(sender_pid).unwrap();
230 sender_exec.saved_context.rax =
231 crate::error::KernelError::ResourceExhausted.to_errno() as u64;
232 sender_exec.seal_context();
233 {
234 let r = ptable.unblock_and_enqueue(sender_pid, proof);
235 debug_assert!(r.is_ok());
236 }
237 }
238 }
239 }
240 Err(e) => {
241 let sender_exec = ptable.exec_mut(sender_pid).unwrap();
242 sender_exec.saved_context.rax = e.to_errno() as u64;
243 sender_exec.seal_context();
244 {
245 let r = ptable.unblock_and_enqueue(sender_pid, blocked_sender);
246 debug_assert!(r.is_ok());
247 }
248 }
249 }
250 }
251 Some(BlockedReason::Sending(_, _))
252 | Some(BlockedReason::Receiving(_, _))
253 | Some(BlockedReason::WaitingNotification(_, _))
254 | None => {
255 let recv_exec = ptable.exec_mut(recv_pid).unwrap();
256 recv_exec.ipc_message = sender_msg;
257 recv_exec.ipc_badge = sender_pid.raw() as u64;
258 ptable.unblock_and_enqueue(sender_pid, blocked_sender)?;
259 }
260 }
261
262 {
263 let pool = POOL.lock();
264 match pool.read_as::<EndpointObject>(cap.phys(), cap.generation()) {
265 Ok(ep) => {
266 let senders = load_senders(ep);
267 crate::sched::recalculate_effective(ptable, recv_pid, &senders);
268 crate::sched::boost_effective(ptable, recv_pid, sender_prio);
269 }
270 Err(_) => {
271 crate::sched::reset_effective(ptable, recv_pid);
272 crate::sched::boost_effective(ptable, recv_pid, sender_prio);
273 }
274 }
275 }
276
277 Ok(IpcOutcome::Done(sender_pid))
278}
279
280pub fn do_recv(cap: &CapRef, recv_pid: Pid, ptable: &mut ProcessManager) -> IpcResult<Pid> {
281 match dequeue_sender(cap, recv_pid, ptable)? {
282 Some(blocked_sender) => handle_dequeued_sender(cap, recv_pid, blocked_sender, ptable),
283 None => {
284 let mut pool = POOL.lock();
285 let ep = pool.write_as::<EndpointObject>(cap.phys(), cap.generation())?;
286 let mut receivers = load_receivers(ep);
287 let blocked_recv = ptable[recv_pid]
288 .block_on(BlockedReason::Receiving(cap.phys(), cap.generation()))?;
289 enqueue(&mut receivers, blocked_recv, ptable).map_err(|(e, proof)| {
290 let _ = ptable.unblock_and_enqueue(recv_pid, proof);
291 e
292 })?;
293 store_receivers(ep, &receivers);
294 crate::sched::reset_effective(ptable, recv_pid);
295 store_holder(ep, Some(recv_pid));
296 Ok(IpcOutcome::Blocked)
297 }
298 }
299}
300
301pub fn do_try_recv(cap: &CapRef, recv_pid: Pid, ptable: &mut ProcessManager) -> IpcResult<Pid> {
302 match dequeue_sender(cap, recv_pid, ptable)? {
303 Some(blocked_sender) => handle_dequeued_sender(cap, recv_pid, blocked_sender, ptable),
304 None => Err(crate::error::KernelError::WouldBlock),
305 }
306}
307
308pub fn do_call(cap: &CapRef, caller_pid: Pid, ptable: &mut ProcessManager) -> CallResult {
309 let caller_msg = ptable.exec(caller_pid).unwrap().ipc_message;
310
311 let blocked_recv = {
312 let mut pool = POOL.lock();
313 let ep = pool.write_as::<EndpointObject>(cap.phys(), cap.generation())?;
314 let mut receivers = load_receivers(ep);
315 let dequeued = dequeue(&mut receivers, ptable);
316 if let Some(br) = &dequeued {
317 let recv_pid = br.pid();
318 let old_holder = load_holder(ep);
319 store_holder(ep, Some(recv_pid));
320 match ptable[caller_pid]
321 .block_on(BlockedReason::Calling(cap.phys(), cap.generation()))
322 {
323 Ok(blocked_caller) => {
324 if let Err((enq_err, caller_proof)) =
325 enqueue(&mut receivers, blocked_caller, ptable)
326 {
327 crate::show!(
328 ipc,
329 error,
330 "call enqueue caller pid {} failed {:?}",
331 caller_pid.raw(),
332 enq_err
333 );
334 {
335 let r = ptable.unblock_and_enqueue(caller_pid, caller_proof);
336 debug_assert!(r.is_ok());
337 }
338 let caller_exec = ptable.exec_mut(caller_pid).unwrap();
339 caller_exec.saved_context.rax = enq_err.to_errno() as u64;
340 caller_exec.seal_context();
341 let recv_proof = ptable[recv_pid].blocked_proof();
342 let re_enq = enqueue(&mut receivers, recv_proof, ptable);
343 debug_assert!(
344 re_enq.is_ok(),
345 "re-enqueue of just-dequeued receiver must succeed"
346 );
347 if let Err((_, recv_recovery)) = re_enq {
348 let recv_exec = ptable.exec_mut(recv_pid).unwrap();
349 recv_exec.saved_context.rax =
350 crate::error::KernelError::ResourceExhausted.to_errno() as u64;
351 recv_exec.seal_context();
352 {
353 let r = ptable.unblock_and_enqueue(recv_pid, recv_recovery);
354 debug_assert!(r.is_ok());
355 }
356 }
357 store_holder(ep, old_holder);
358 store_receivers(ep, &receivers);
359 return Err(enq_err);
360 }
361 }
362 Err(e) => {
363 let proof = ptable[recv_pid].blocked_proof();
364 let re_enq = enqueue(&mut receivers, proof, ptable);
365 debug_assert!(
366 re_enq.is_ok(),
367 "re-enqueue of just-dequeued receiver must succeed"
368 );
369 if let Err((enq_err, recv_recovery)) = re_enq {
370 crate::show!(
371 ipc,
372 error,
373 "call re-enqueue recv pid {} failed {:?}",
374 recv_pid.raw(),
375 enq_err
376 );
377 let recv_exec = ptable.exec_mut(recv_pid).unwrap();
378 recv_exec.saved_context.rax =
379 crate::error::KernelError::ResourceExhausted.to_errno() as u64;
380 recv_exec.seal_context();
381 {
382 let r = ptable.unblock_and_enqueue(recv_pid, recv_recovery);
383 debug_assert!(r.is_ok());
384 }
385 }
386 store_holder(ep, old_holder);
387 store_receivers(ep, &receivers);
388 return Err(e);
389 }
390 }
391 }
392 store_receivers(ep, &receivers);
393 dequeued
394 };
395
396 match blocked_recv {
397 Some(blocked_recv) => {
398 let recv_pid = blocked_recv.pid();
399 let caller_prio = ptable[caller_pid].effective_priority();
400
401 ptable.unblock_and_enqueue(recv_pid, blocked_recv)?;
402 let recv_exec = ptable.exec_mut(recv_pid).unwrap();
403 recv_exec.ipc_message = caller_msg;
404 recv_exec.ipc_badge = caller_pid.raw() as u64;
405 message::inject_into_context(&mut recv_exec.saved_context, &caller_msg);
406 recv_exec.saved_context.rax = caller_pid.raw() as u64;
407 recv_exec.reply_target = Some(caller_pid);
408 recv_exec.seal_context();
409
410 crate::sched::boost_effective(ptable, recv_pid, caller_prio);
411
412 Ok(AlwaysBlocked)
413 }
414 None => {
415 let mut pool = POOL.lock();
416 let (holder, donor_prio) = {
417 let ep = pool.write_as::<EndpointObject>(cap.phys(), cap.generation())?;
418 let mut senders = load_senders(ep);
419 let blocked_caller = ptable[caller_pid]
420 .block_on(BlockedReason::Calling(cap.phys(), cap.generation()))?;
421 enqueue(&mut senders, blocked_caller, ptable).map_err(|(e, proof)| {
422 let _ = ptable.unblock_and_enqueue(caller_pid, proof);
423 e
424 })?;
425 store_senders(ep, &senders);
426 let holder = load_holder(ep);
427 (holder, ptable[caller_pid].effective_priority())
428 };
429 if let Some(h) = holder {
430 crate::sched::propagate_priority(ptable, &pool, h, donor_prio);
431 }
432
433 Ok(AlwaysBlocked)
434 }
435 }
436}
437
438pub fn remove_from_recv(ep: &mut EndpointObject, pid: Pid, ptable: &mut ProcessManager) {
439 let mut receivers = load_receivers(ep);
440 remove_pid(&mut receivers, pid, ptable);
441 store_receivers(ep, &receivers);
442}
443
444fn unblock_callers_in_receivers(queue: &mut PidQueue, ptable: &mut ProcessManager) {
445 let mut new_head: Option<Pid> = None;
446 let mut new_tail: Option<Pid> = None;
447 let mut new_len: u16 = 0;
448 let mut cursor = queue.head;
449 let mut steps = 0u32;
450
451 core::iter::from_fn(|| {
452 cursor.filter(|_| steps < MAX_PIDS as u32).inspect(|&pid| {
453 steps += 1;
454 let next = ptable[pid].next_ipc;
455 ptable[pid].next_ipc = None;
456 cursor = next;
457
458 if matches!(
459 ptable[pid].blocked_reason(),
460 Some(BlockedReason::Calling(_, _))
461 ) {
462 let proof = ptable[pid].blocked_proof();
463 match ptable.unblock_and_enqueue(pid, proof) {
464 Ok(()) => {
465 let exec = ptable.exec_mut(pid).unwrap();
466 exec.saved_context.rax =
467 crate::error::KernelError::InvalidObject.to_errno() as u64;
468 exec.seal_context();
469 }
470 Err(e) => {
471 crate::show!(
472 ipc,
473 error,
474 "unblock caller pid {} on holder death failed {:?}",
475 pid.raw(),
476 e
477 );
478 }
479 }
480 } else {
481 if let Some(tail) = new_tail {
482 ptable[tail].next_ipc = Some(pid);
483 } else {
484 new_head = Some(pid);
485 }
486 new_tail = Some(pid);
487 new_len = new_len.saturating_add(1);
488 }
489 })
490 })
491 .count();
492
493 *queue = PidQueue::from_parts(new_head, new_tail, new_len);
494
495 assert_queue_valid(queue, ptable);
496}
497
498pub fn remove_from_queues(
499 pid: Pid,
500 pool: &mut crate::cap::pool::ObjectPool,
501 ptable: &mut ProcessManager,
502) {
503 let blocked = ptable.get(pid).and_then(|s| s.blocked_reason());
504
505 match blocked {
506 Some(BlockedReason::Sending(phys, generation)) | Some(BlockedReason::Calling(phys, generation)) => {
507 if let Ok(ep) = pool.write_as::<EndpointObject>(phys, generation) {
508 let mut senders = load_senders(ep);
509 remove_pid(&mut senders, pid, ptable);
510 store_senders(ep, &senders);
511
512 let mut receivers = load_receivers(ep);
513 let holder = load_holder(ep);
514 match holder {
515 Some(h) if h == pid => {
516 store_holder(ep, None);
517 unblock_callers_in_receivers(&mut receivers, ptable);
518 store_receivers(ep, &receivers);
519 }
520 _ => {
521 store_receivers(ep, &receivers);
522 }
523 }
524 }
525 }
526 Some(BlockedReason::Receiving(phys, generation)) => {
527 if let Ok(ep) = pool.write_as::<EndpointObject>(phys, generation) {
528 let mut receivers = load_receivers(ep);
529 remove_pid(&mut receivers, pid, ptable);
530 store_receivers(ep, &receivers);
531
532 let holder = load_holder(ep);
533 match holder {
534 Some(h) if h == pid => {
535 store_holder(ep, None);
536 let mut receivers = load_receivers(ep);
537 unblock_callers_in_receivers(&mut receivers, ptable);
538 store_receivers(ep, &receivers);
539 }
540 _ => {}
541 }
542 }
543 }
544 Some(BlockedReason::WaitingNotification(phys, generation)) => {
545 if let Ok(notif) = pool
546 .write_as::<lancer_core::object_layout::NotificationObject>(phys, generation)
547 {
548 let raw = pid.raw();
549 let old_waiters = notif.waiters;
550 let retained_count = (0..notif.waiter_count as usize)
551 .filter(|&i| old_waiters[i] != raw)
552 .fold(0u8, |dst_idx, i| {
553 notif.waiters[dst_idx as usize] = old_waiters[i];
554 dst_idx + 1
555 });
556 (retained_count as usize..notif.waiters.len()).for_each(|i| {
557 notif.waiters[i] = NONE_SENTINEL;
558 });
559 notif.waiter_count = retained_count;
560 }
561 }
562 None => {}
563 }
564}