Nothing to see here, move along
1use core::sync::atomic::Ordering;
2
3use crate::cap::cnode;
4use crate::cap::object::ObjectTag;
5use crate::cap::ops;
6use crate::cap::pool::POOL;
7use crate::cap::table::Rights;
8use crate::error::KernelError;
9use crate::ipc::message;
10use crate::ipc::{endpoint, notification};
11use crate::mem::addr;
12use crate::proc::context::IpcMessage;
13use crate::proc::{BlockedReason, PROCESSES};
14use crate::ring::{
15 CompletionEntry, MAX_CQ_ENTRIES, MAX_SQ_ENTRIES, RingHeader, RingIndex, RingOpcode,
16 SubmissionEntry,
17};
18use crate::types::Pid;
19
20const MAX_RING_BATCH: u32 = 16;
21
22fn _assert_from_bytes<T: zerocopy::FromBytes>() {}
23#[allow(dead_code)]
24fn _assert_ring_types_are_zerocopy() {
25 _assert_from_bytes::<SubmissionEntry>();
26 _assert_from_bytes::<CompletionEntry>();
27}
28
29struct UserSnapshot {
30 sq_tail: RingIndex,
31 cq_head: RingIndex,
32}
33
34unsafe fn snapshot_user_fields(ring_base: *const u8) -> UserSnapshot {
35 let header = unsafe { &*(ring_base as *const RingHeader) };
36 UserSnapshot {
37 sq_tail: RingIndex::new(header.sq_tail.load(Ordering::Acquire)),
38 cq_head: RingIndex::new(header.cq_head.load(Ordering::Acquire)),
39 }
40}
41
42unsafe fn commit_header(ring_base: *mut u8, new_sq_head: RingIndex, new_cq_tail: RingIndex) {
43 let header = unsafe { &*(ring_base as *const RingHeader) };
44 header.sq_head.store(new_sq_head.raw(), Ordering::Release);
45 header.cq_tail.store(new_cq_tail.raw(), Ordering::Release);
46}
47
48unsafe fn read_sq_entry(ring_base: *const u8, index: u32) -> SubmissionEntry {
49 let sq_base = unsafe { ring_base.add(super::ring_sq_offset()) };
50 let entry_ptr =
51 unsafe { sq_base.add((index as usize) * core::mem::size_of::<SubmissionEntry>()) };
52 unsafe { core::ptr::read_volatile(entry_ptr as *const SubmissionEntry) }
53}
54
55unsafe fn write_cq_entry(ring_base: *mut u8, index: u32, entry: CompletionEntry) {
56 let cq_base = unsafe { ring_base.add(super::ring_cq_offset()) };
57 let entry_ptr =
58 unsafe { cq_base.add((index as usize) * core::mem::size_of::<CompletionEntry>()) };
59 unsafe { core::ptr::write_volatile(entry_ptr as *mut CompletionEntry, entry) };
60}
61
62struct CqeResult {
63 value: i64,
64 extra: u64,
65}
66
67impl CqeResult {
68 const fn ok() -> Self {
69 Self { value: 0, extra: 0 }
70 }
71
72 const fn success(value: i64) -> Self {
73 Self { value, extra: 0 }
74 }
75
76 const fn with_extra(value: i64, extra: u64) -> Self {
77 Self { value, extra }
78 }
79}
80
81fn ring_cap_create(sqe: &SubmissionEntry, pid: Pid) -> Result<CqeResult, KernelError> {
82 let address = sqe.cap_slot as u64;
83 let tag = ObjectTag::try_from(sqe.arg0)?;
84
85 let ptable = PROCESSES.lock();
86 let (cnode_id, cnode_gen, depth, gv, gb) = cnode::cnode_coords(pid, &ptable)?;
87 let mut pool = POOL.lock_after(&ptable);
88 ops::create_via_cnode(&mut pool, cnode_id, cnode_gen, address, depth, gv, gb, tag)
89 .map(|phys| CqeResult::success(phys.raw() as i64))
90}
91
92fn ring_cap_derive(sqe: &SubmissionEntry, pid: Pid) -> Result<CqeResult, KernelError> {
93 let src_addr = sqe.cap_slot as u64;
94 let dest_addr = sqe.arg0;
95 let rights_mask = Rights::from_bits(sqe.arg1 as u16);
96
97 let ptable = PROCESSES.lock();
98 let (cnode_id, cnode_gen, depth, gv, gb) = cnode::cnode_coords(pid, &ptable)?;
99 let mut pool = POOL.lock_after(&ptable);
100 ops::derive_via_cnode(
101 &mut pool,
102 cnode_id,
103 cnode_gen,
104 src_addr,
105 dest_addr,
106 depth,
107 gv,
108 gb,
109 rights_mask,
110 )
111 .map(|()| CqeResult::ok())
112}
113
114fn ring_ipc_send(sqe: &SubmissionEntry, pid: Pid) -> Result<CqeResult, KernelError> {
115 let address = sqe.cap_slot as u64;
116 let msg = IpcMessage::from_regs([sqe.arg0, sqe.arg1, sqe.arg2, 0, 0, 0]);
117
118 let mut ptable = PROCESSES.lock();
119 let cap = {
120 let pool = POOL.lock_after(&ptable);
121 cnode::resolve_caller_validate(
122 pid,
123 address,
124 ObjectTag::Endpoint,
125 Rights::WRITE,
126 &ptable,
127 &pool,
128 )?
129 };
130
131 ptable.exec_mut(pid).unwrap().ipc_message = msg;
132
133 let recv_pid = {
134 let mut pool = POOL.lock_after(&ptable);
135 let ep = pool.write_as::<lancer_core::object_layout::EndpointObject>(
136 cap.phys(),
137 cap.generation(),
138 )?;
139 let mut receivers = endpoint::load_receivers(ep);
140 let dequeued = endpoint::dequeue_genuine_receiver(&mut receivers, &mut ptable);
141 if let Some(ref br) = dequeued {
142 endpoint::store_holder(ep, Some(br.pid()));
143 }
144 endpoint::store_receivers(ep, &receivers);
145 dequeued
146 };
147
148 match recv_pid {
149 Some(blocked_recv) => {
150 let recv_pid_val = blocked_recv.pid();
151 let sender_prio = ptable[pid].effective_priority();
152
153 ptable.unblock_and_enqueue(recv_pid_val, blocked_recv)?;
154 let recv_exec = ptable.exec_mut(recv_pid_val).unwrap();
155 recv_exec.ipc_message = msg;
156 recv_exec.ipc_badge = pid.raw() as u64;
157 message::inject_into_context(&mut recv_exec.saved_context, &msg);
158 recv_exec.saved_context.rax = pid.raw() as u64;
159 recv_exec.seal_context();
160
161 crate::sched::boost_effective(&mut ptable, recv_pid_val, sender_prio);
162
163 Ok(CqeResult::ok())
164 }
165 None => Err(KernelError::WouldBlock),
166 }
167}
168
169fn ring_ipc_recv(sqe: &SubmissionEntry, pid: Pid) -> Result<CqeResult, KernelError> {
170 let address = sqe.cap_slot as u64;
171
172 let mut ptable = PROCESSES.lock();
173 let cap = {
174 let pool = POOL.lock_after(&ptable);
175 cnode::resolve_caller_validate(
176 pid,
177 address,
178 ObjectTag::Endpoint,
179 Rights::READ,
180 &ptable,
181 &pool,
182 )?
183 };
184
185 let sender_pid = {
186 let mut pool = POOL.lock_after(&ptable);
187 let ep = pool.write_as::<lancer_core::object_layout::EndpointObject>(
188 cap.phys(),
189 cap.generation(),
190 )?;
191 let mut senders = endpoint::load_senders(ep);
192 let dequeued = endpoint::dequeue(&mut senders, &mut ptable);
193 if dequeued.is_some() {
194 endpoint::store_holder(ep, Some(pid));
195 }
196 endpoint::store_senders(ep, &senders);
197 dequeued
198 };
199
200 match sender_pid {
201 Some(blocked_sender) => {
202 let sender_pid = blocked_sender.pid();
203 let sender_msg = ptable.exec(sender_pid).unwrap().ipc_message;
204 let recv_exec = ptable.exec_mut(pid).unwrap();
205 recv_exec.ipc_message = sender_msg;
206 recv_exec.ipc_badge = sender_pid.raw() as u64;
207
208 match ptable[sender_pid].blocked_reason() {
209 Some(BlockedReason::Calling(_, _)) => {
210 let mut pool = POOL.lock_after(&ptable);
211 match pool.write_as::<lancer_core::object_layout::EndpointObject>(
212 cap.phys(),
213 cap.generation(),
214 ) {
215 Ok(ep) => {
216 let mut receivers = endpoint::load_receivers(ep);
217 match endpoint::enqueue(&mut receivers, blocked_sender, &mut ptable) {
218 Ok(()) => {
219 endpoint::store_receivers(ep, &receivers);
220 ptable.exec_mut(pid).unwrap().reply_target = Some(sender_pid);
221 }
222 Err(_) => {
223 endpoint::store_receivers(ep, &receivers);
224 let sender_exec = ptable.exec_mut(sender_pid).unwrap();
225 sender_exec.saved_context.rax =
226 crate::error::KernelError::ResourceExhausted.to_errno()
227 as u64;
228 sender_exec.seal_context();
229 {
230 let proof = ptable[sender_pid].blocked_proof();
231 let r = ptable.unblock_and_enqueue(sender_pid, proof);
232 debug_assert!(r.is_ok());
233 }
234 }
235 }
236 }
237 Err(e) => {
238 let sender_exec = ptable.exec_mut(sender_pid).unwrap();
239 sender_exec.saved_context.rax = e.to_errno() as u64;
240 sender_exec.seal_context();
241 {
242 let r = ptable.unblock_and_enqueue(sender_pid, blocked_sender);
243 debug_assert!(r.is_ok());
244 }
245 }
246 }
247 }
248 _ => {
249 ptable.unblock_and_enqueue(sender_pid, blocked_sender)?;
250 }
251 }
252
253 let sender_prio = ptable[sender_pid].effective_priority();
254 crate::sched::reset_effective(&mut ptable, pid);
255 crate::sched::boost_effective(&mut ptable, pid, sender_prio);
256
257 Ok(CqeResult::with_extra(
258 sender_pid.raw() as i64,
259 sender_msg.regs[0],
260 ))
261 }
262 None => Err(KernelError::WouldBlock),
263 }
264}
265
266fn ring_notify_signal(sqe: &SubmissionEntry, pid: Pid) -> Result<CqeResult, KernelError> {
267 let address = sqe.cap_slot as u64;
268 let bits = sqe.arg0;
269
270 let mut ptable = PROCESSES.lock();
271 let cap = {
272 let pool = POOL.lock_after(&ptable);
273 cnode::resolve_caller_validate(
274 pid,
275 address,
276 ObjectTag::Notification,
277 Rights::WRITE,
278 &ptable,
279 &pool,
280 )?
281 };
282
283 notification::do_signal(&cap, bits, &mut ptable).map(|_| CqeResult::ok())
284}
285
286fn ring_notify_poll(sqe: &SubmissionEntry, pid: Pid) -> Result<CqeResult, KernelError> {
287 let address = sqe.cap_slot as u64;
288
289 let ptable = PROCESSES.lock();
290 let cap = {
291 let pool = POOL.lock_after(&ptable);
292 cnode::resolve_caller_validate(
293 pid,
294 address,
295 ObjectTag::Notification,
296 Rights::READ,
297 &ptable,
298 &pool,
299 )?
300 };
301
302 drop(ptable);
303 notification::do_poll(&cap).map(|val| CqeResult::with_extra(0, val))
304}
305
306fn process_submission(sqe: &SubmissionEntry, pid: Pid) -> CompletionEntry {
307 let result = match RingOpcode::from_u8(sqe.opcode) {
308 Some(RingOpcode::Nop) => Ok(CqeResult::ok()),
309 Some(RingOpcode::CapCreate) => ring_cap_create(sqe, pid),
310 Some(RingOpcode::CapDerive) => ring_cap_derive(sqe, pid),
311 Some(RingOpcode::IpcSend) => ring_ipc_send(sqe, pid),
312 Some(RingOpcode::IpcRecv) => ring_ipc_recv(sqe, pid),
313 Some(RingOpcode::NotifySignal) => ring_notify_signal(sqe, pid),
314 Some(RingOpcode::NotifyPoll) => ring_notify_poll(sqe, pid),
315 None => Err(KernelError::InvalidParameter),
316 };
317
318 match result {
319 Ok(cqe) => CompletionEntry {
320 result: cqe.value,
321 user_data: sqe.user_data as u64,
322 extra: cqe.extra,
323 },
324 Err(e) => CompletionEntry {
325 result: e.to_errno(),
326 user_data: sqe.user_data as u64,
327 extra: 0,
328 },
329 }
330}
331
332pub fn ring_enter(
333 ring_phys_base: x86_64::PhysAddr,
334 pid: Pid,
335 _min_complete: u32,
336) -> Result<i64, KernelError> {
337 if !ring_phys_base.as_u64().is_multiple_of(4096) {
338 return Err(KernelError::InvalidAddress);
339 }
340
341 let max_phys = (crate::mem::phys::BitmapFrameAllocator::total_frames() as u64) * 4096;
342 let ring_end = ring_phys_base
343 .as_u64()
344 .checked_add(super::ring_total_size() as u64)
345 .ok_or(KernelError::InvalidAddress)?;
346 if ring_end > max_phys {
347 return Err(KernelError::InvalidAddress);
348 }
349
350 let hhdm = addr::hhdm_offset();
351 let ring_virt = (ring_phys_base.as_u64() + hhdm) as *mut u8;
352
353 let mut ptable = PROCESSES.lock();
354 let exec = ptable.exec_mut(pid).ok_or(KernelError::InvalidObject)?;
355
356 let sq_head = exec.ring_sq_head;
357 let cq_tail = exec.ring_cq_tail;
358 let user_snap = unsafe { snapshot_user_fields(ring_virt) };
359
360 let pending = user_snap.sq_tail.distance(sq_head);
361 if pending > MAX_SQ_ENTRIES {
362 return Err(KernelError::InvalidParameter);
363 }
364
365 let cq_used = cq_tail.distance(user_snap.cq_head);
366 let cq_available = MAX_CQ_ENTRIES.saturating_sub(cq_used);
367 let to_process = pending.min(cq_available).min(MAX_RING_BATCH);
368
369 if to_process == 0 {
370 return Ok(0);
371 }
372 drop(ptable);
373
374 let (completed, cq_write) = (0..to_process).fold((0u32, cq_tail), |(done, cq_w), i| {
375 let sq_slot = sq_head.advance(i).slot(MAX_SQ_ENTRIES);
376 let sqe = unsafe { read_sq_entry(ring_virt, sq_slot) };
377 let cqe = process_submission(&sqe, pid);
378
379 let cq_slot = cq_w.slot(MAX_CQ_ENTRIES);
380 unsafe { write_cq_entry(ring_virt, cq_slot, cqe) };
381 (done + 1, cq_w.advance(1))
382 });
383
384 let new_sq_head = sq_head.advance(to_process);
385 unsafe { commit_header(ring_virt, new_sq_head, cq_write) };
386
387 let mut ptable = PROCESSES.lock();
388 if let Some(exec) = ptable.exec_mut(pid) {
389 exec.ring_sq_head = new_sq_head;
390 exec.ring_cq_tail = cq_write;
391 }
392
393 Ok(completed as i64)
394}