Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3// Copyright (C) 2025 Google LLC.
4
5use kernel::{
6 list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
7 prelude::*,
8 seq_file::SeqFile,
9 seq_print,
10 sync::lock::{spinlock::SpinLockBackend, Guard},
11 sync::{Arc, LockedBy, SpinLock},
12};
13
14use crate::{
15 defs::*,
16 error::BinderError,
17 process::{NodeRefInfo, Process, ProcessInner},
18 thread::Thread,
19 transaction::Transaction,
20 BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
21};
22
23use core::mem;
24
25mod wrapper;
26pub(crate) use self::wrapper::CritIncrWrapper;
27
28#[derive(Debug)]
29pub(crate) struct CouldNotDeliverCriticalIncrement;
30
31/// Keeps track of how this node is scheduled.
32///
33/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
34/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
35/// make it possible to "move" a node from one list to another - when `do_work` is called directly
36/// on the `Node`, then it's a no-op if there's also a pending wrapper.
37///
38/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
39/// of this: weak increments and strong increments. We call such increments "critical" because it
40/// is critical that they are delivered to the thread doing the increment. Some examples:
41///
42/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
43/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
44/// thread doing the strong increment may have ended a long time ago when the command is actually
45/// processed by userspace.
46///
47/// * We have a weak reference and are about to drop it on one thread. But then another thread does
48/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
49/// about to drop the weak reference, then the strong increment could be processed after the
50/// other thread has already exited, which would be too late.
51///
52/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
53/// set. This is because another thread might just have popped the node from a todo list, but not
54/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
55/// always succeed.
56///
57/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
58struct DeliveryState {
59 /// Is the `Node` currently scheduled?
60 has_pushed_node: bool,
61
62 /// Is a wrapper currently scheduled?
63 ///
64 /// The wrapper is used only for strong zero2one increments.
65 has_pushed_wrapper: bool,
66
67 /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
68 ///
69 /// Weak zero2one operations are always scheduled using the `Node`.
70 has_weak_zero2one: bool,
71
72 /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
73 ///
74 /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
75 /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
76 has_strong_zero2one: bool,
77}
78
79impl DeliveryState {
80 fn should_normal_push(&self) -> bool {
81 !self.has_pushed_node && !self.has_pushed_wrapper
82 }
83
84 fn did_normal_push(&mut self) {
85 assert!(self.should_normal_push());
86 self.has_pushed_node = true;
87 }
88
89 fn should_push_weak_zero2one(&self) -> bool {
90 !self.has_weak_zero2one && !self.has_strong_zero2one
91 }
92
93 fn can_push_weak_zero2one_normally(&self) -> bool {
94 !self.has_pushed_node
95 }
96
97 fn did_push_weak_zero2one(&mut self) {
98 assert!(self.should_push_weak_zero2one());
99 assert!(self.can_push_weak_zero2one_normally());
100 self.has_pushed_node = true;
101 self.has_weak_zero2one = true;
102 }
103
104 fn should_push_strong_zero2one(&self) -> bool {
105 !self.has_strong_zero2one
106 }
107
108 fn can_push_strong_zero2one_normally(&self) -> bool {
109 !self.has_pushed_node
110 }
111
112 fn did_push_strong_zero2one(&mut self) {
113 assert!(self.should_push_strong_zero2one());
114 assert!(self.can_push_strong_zero2one_normally());
115 self.has_pushed_node = true;
116 self.has_strong_zero2one = true;
117 }
118
119 fn did_push_strong_zero2one_wrapper(&mut self) {
120 assert!(self.should_push_strong_zero2one());
121 assert!(!self.can_push_strong_zero2one_normally());
122 self.has_pushed_wrapper = true;
123 self.has_strong_zero2one = true;
124 }
125}
126
127struct CountState {
128 /// The reference count.
129 count: usize,
130 /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
131 /// even if count is greater than one, we only increment it once in the owning process.)
132 has_count: bool,
133}
134
135impl CountState {
136 fn new() -> Self {
137 Self {
138 count: 0,
139 has_count: false,
140 }
141 }
142}
143
144struct NodeInner {
145 /// Strong refcounts held on this node by `NodeRef` objects.
146 strong: CountState,
147 /// Weak refcounts held on this node by `NodeRef` objects.
148 weak: CountState,
149 delivery_state: DeliveryState,
150 /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
151 /// that is, userspace will not be given the next one until it has finished processing the
152 /// previous oneway transaction. This is done to avoid the case where two oneway transactions
153 /// arrive in opposite order from the order in which they were sent. (E.g., they could be
154 /// delivered to two different threads, which could appear as-if they were sent in opposite
155 /// order.)
156 ///
157 /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
158 /// deliver the next oneway transaction until userspace signals that it has finished processing
159 /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
160 oneway_todo: List<DTRWrap<Transaction>>,
161 /// Keeps track of whether this node has a pending oneway transaction.
162 ///
163 /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
164 /// being delivered directly to the process.
165 has_oneway_transaction: bool,
166 /// List of processes to deliver a notification to when this node is destroyed (usually due to
167 /// the process dying).
168 death_list: List<DTRWrap<NodeDeath>, 1>,
169 /// List of processes to deliver freeze notifications to.
170 freeze_list: KVVec<Arc<Process>>,
171 /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
172 ///
173 /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
174 /// active operations have ended. This avoids the situation an increment and decrement get
175 /// reordered from userspace's perspective.
176 active_inc_refs: u8,
177 /// List of `NodeRefInfo` objects that reference this node.
178 refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
179}
180
181#[pin_data]
182pub(crate) struct Node {
183 pub(crate) debug_id: usize,
184 ptr: u64,
185 pub(crate) cookie: u64,
186 pub(crate) flags: u32,
187 pub(crate) owner: Arc<Process>,
188 inner: LockedBy<NodeInner, ProcessInner>,
189 #[pin]
190 links_track: AtomicTracker,
191}
192
193kernel::list::impl_list_arc_safe! {
194 impl ListArcSafe<0> for Node {
195 tracked_by links_track: AtomicTracker;
196 }
197}
198
199// Make `oneway_todo` work.
200kernel::list::impl_list_item! {
201 impl ListItem<0> for DTRWrap<Transaction> {
202 using ListLinks { self.links.inner };
203 }
204}
205
206impl Node {
207 pub(crate) fn new(
208 ptr: u64,
209 cookie: u64,
210 flags: u32,
211 owner: Arc<Process>,
212 ) -> impl PinInit<Self> {
213 pin_init!(Self {
214 inner: LockedBy::new(
215 &owner.inner,
216 NodeInner {
217 strong: CountState::new(),
218 weak: CountState::new(),
219 delivery_state: DeliveryState {
220 has_pushed_node: false,
221 has_pushed_wrapper: false,
222 has_weak_zero2one: false,
223 has_strong_zero2one: false,
224 },
225 death_list: List::new(),
226 oneway_todo: List::new(),
227 freeze_list: KVVec::new(),
228 has_oneway_transaction: false,
229 active_inc_refs: 0,
230 refs: List::new(),
231 },
232 ),
233 debug_id: super::next_debug_id(),
234 ptr,
235 cookie,
236 flags,
237 owner,
238 links_track <- AtomicTracker::new(),
239 })
240 }
241
242 pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
243 let inner = self.inner.access_mut(owner_inner);
244 inner.has_oneway_transaction
245 }
246
247 #[inline(never)]
248 pub(crate) fn full_debug_print(
249 &self,
250 m: &SeqFile,
251 owner_inner: &mut ProcessInner,
252 ) -> Result<()> {
253 let inner = self.inner.access_mut(owner_inner);
254 seq_print!(
255 m,
256 " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
257 self.debug_id,
258 self.ptr,
259 self.cookie,
260 inner.strong.has_count,
261 inner.weak.has_count,
262 inner.strong.count,
263 inner.weak.count,
264 );
265 if !inner.refs.is_empty() {
266 seq_print!(m, " proc");
267 for node_ref in &inner.refs {
268 seq_print!(m, " {}", node_ref.process.task.pid());
269 }
270 }
271 seq_print!(m, "\n");
272 for t in &inner.oneway_todo {
273 t.debug_print_inner(m, " pending async transaction ");
274 }
275 Ok(())
276 }
277
278 /// Insert the `NodeRef` into this `refs` list.
279 ///
280 /// # Safety
281 ///
282 /// It must be the case that `info.node_ref.node` is this node.
283 pub(crate) unsafe fn insert_node_info(
284 &self,
285 info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
286 ) {
287 self.inner
288 .access_mut(&mut self.owner.inner.lock())
289 .refs
290 .push_front(info);
291 }
292
293 /// Insert the `NodeRef` into this `refs` list.
294 ///
295 /// # Safety
296 ///
297 /// It must be the case that `info.node_ref.node` is this node.
298 pub(crate) unsafe fn remove_node_info(
299 &self,
300 info: &NodeRefInfo,
301 ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
302 // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
303 // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
304 // the `refs` list of another node.
305 unsafe {
306 self.inner
307 .access_mut(&mut self.owner.inner.lock())
308 .refs
309 .remove(info)
310 }
311 }
312
313 /// An id that is unique across all binder nodes on the system. Used as the key in the
314 /// `by_node` map.
315 pub(crate) fn global_id(&self) -> usize {
316 self as *const Node as usize
317 }
318
319 pub(crate) fn get_id(&self) -> (u64, u64) {
320 (self.ptr, self.cookie)
321 }
322
323 pub(crate) fn add_death(
324 &self,
325 death: ListArc<DTRWrap<NodeDeath>, 1>,
326 guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
327 ) {
328 self.inner.access_mut(guard).death_list.push_back(death);
329 }
330
331 pub(crate) fn inc_ref_done_locked(
332 self: &DArc<Node>,
333 _strong: bool,
334 owner_inner: &mut ProcessInner,
335 ) -> Option<DLArc<Node>> {
336 let inner = self.inner.access_mut(owner_inner);
337 if inner.active_inc_refs == 0 {
338 pr_err!("inc_ref_done called when no active inc_refs");
339 return None;
340 }
341
342 inner.active_inc_refs -= 1;
343 if inner.active_inc_refs == 0 {
344 // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
345 // would send a refcount decrement, and if so, tell the caller to schedule us.
346 let strong = inner.strong.count > 0;
347 let has_strong = inner.strong.has_count;
348 let weak = strong || inner.weak.count > 0;
349 let has_weak = inner.weak.has_count;
350
351 let should_drop_weak = !weak && has_weak;
352 let should_drop_strong = !strong && has_strong;
353
354 // If we want to drop the ref-count again, tell the caller to schedule a work node for
355 // that.
356 let need_push = should_drop_weak || should_drop_strong;
357
358 if need_push && inner.delivery_state.should_normal_push() {
359 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
360 inner.delivery_state.did_normal_push();
361 Some(list_arc)
362 } else {
363 None
364 }
365 } else {
366 None
367 }
368 }
369
370 pub(crate) fn update_refcount_locked(
371 self: &DArc<Node>,
372 inc: bool,
373 strong: bool,
374 count: usize,
375 owner_inner: &mut ProcessInner,
376 ) -> Option<DLArc<Node>> {
377 let is_dead = owner_inner.is_dead;
378 let inner = self.inner.access_mut(owner_inner);
379
380 // Get a reference to the state we'll update.
381 let state = if strong {
382 &mut inner.strong
383 } else {
384 &mut inner.weak
385 };
386
387 // Update the count and determine whether we need to push work.
388 let need_push = if inc {
389 state.count += count;
390 // TODO: This method shouldn't be used for zero-to-one increments.
391 !is_dead && !state.has_count
392 } else {
393 if state.count < count {
394 pr_err!("Failure: refcount underflow!");
395 return None;
396 }
397 state.count -= count;
398 !is_dead && state.count == 0 && state.has_count
399 };
400
401 if need_push && inner.delivery_state.should_normal_push() {
402 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
403 inner.delivery_state.did_normal_push();
404 Some(list_arc)
405 } else {
406 None
407 }
408 }
409
410 pub(crate) fn incr_refcount_allow_zero2one(
411 self: &DArc<Self>,
412 strong: bool,
413 owner_inner: &mut ProcessInner,
414 ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
415 let is_dead = owner_inner.is_dead;
416 let inner = self.inner.access_mut(owner_inner);
417
418 // Get a reference to the state we'll update.
419 let state = if strong {
420 &mut inner.strong
421 } else {
422 &mut inner.weak
423 };
424
425 // Update the count and determine whether we need to push work.
426 state.count += 1;
427 if is_dead || state.has_count {
428 return Ok(None);
429 }
430
431 // Userspace needs to be notified of this.
432 if !strong && inner.delivery_state.should_push_weak_zero2one() {
433 assert!(inner.delivery_state.can_push_weak_zero2one_normally());
434 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
435 inner.delivery_state.did_push_weak_zero2one();
436 Ok(Some(list_arc))
437 } else if strong && inner.delivery_state.should_push_strong_zero2one() {
438 if inner.delivery_state.can_push_strong_zero2one_normally() {
439 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
440 inner.delivery_state.did_push_strong_zero2one();
441 Ok(Some(list_arc))
442 } else {
443 state.count -= 1;
444 Err(CouldNotDeliverCriticalIncrement)
445 }
446 } else {
447 // Work is already pushed, and we don't need to push again.
448 Ok(None)
449 }
450 }
451
452 pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
453 self: &DArc<Self>,
454 strong: bool,
455 wrapper: CritIncrWrapper,
456 owner_inner: &mut ProcessInner,
457 ) -> Option<DLArc<dyn DeliverToRead>> {
458 match self.incr_refcount_allow_zero2one(strong, owner_inner) {
459 Ok(Some(node)) => Some(node as _),
460 Ok(None) => None,
461 Err(CouldNotDeliverCriticalIncrement) => {
462 assert!(strong);
463 let inner = self.inner.access_mut(owner_inner);
464 inner.strong.count += 1;
465 inner.delivery_state.did_push_strong_zero2one_wrapper();
466 Some(wrapper.init(self.clone()))
467 }
468 }
469 }
470
471 pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
472 self.owner
473 .inner
474 .lock()
475 .update_node_refcount(self, inc, strong, count, None);
476 }
477
478 pub(crate) fn populate_counts(
479 &self,
480 out: &mut BinderNodeInfoForRef,
481 guard: &Guard<'_, ProcessInner, SpinLockBackend>,
482 ) {
483 let inner = self.inner.access(guard);
484 out.strong_count = inner.strong.count as _;
485 out.weak_count = inner.weak.count as _;
486 }
487
488 pub(crate) fn populate_debug_info(
489 &self,
490 out: &mut BinderNodeDebugInfo,
491 guard: &Guard<'_, ProcessInner, SpinLockBackend>,
492 ) {
493 out.ptr = self.ptr as _;
494 out.cookie = self.cookie as _;
495 let inner = self.inner.access(guard);
496 if inner.strong.has_count {
497 out.has_strong_ref = 1;
498 }
499 if inner.weak.has_count {
500 out.has_weak_ref = 1;
501 }
502 }
503
504 pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
505 let inner = self.inner.access_mut(guard);
506 inner.strong.has_count = true;
507 inner.weak.has_count = true;
508 }
509
510 fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
511 writer.write_code(code)?;
512 writer.write_payload(&self.ptr)?;
513 writer.write_payload(&self.cookie)?;
514 Ok(())
515 }
516
517 pub(crate) fn submit_oneway(
518 &self,
519 transaction: DLArc<Transaction>,
520 guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
521 ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
522 if guard.is_dead {
523 return Err((BinderError::new_dead(), transaction));
524 }
525
526 let inner = self.inner.access_mut(guard);
527 if inner.has_oneway_transaction {
528 inner.oneway_todo.push_back(transaction);
529 } else {
530 inner.has_oneway_transaction = true;
531 guard.push_work(transaction)?;
532 }
533 Ok(())
534 }
535
536 pub(crate) fn release(&self) {
537 let mut guard = self.owner.inner.lock();
538 while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
539 drop(guard);
540 work.into_arc().cancel();
541 guard = self.owner.inner.lock();
542 }
543
544 while let Some(death) = self.inner.access_mut(&mut guard).death_list.pop_front() {
545 drop(guard);
546 death.into_arc().set_dead();
547 guard = self.owner.inner.lock();
548 }
549 }
550
551 pub(crate) fn pending_oneway_finished(&self) {
552 let mut guard = self.owner.inner.lock();
553 if guard.is_dead {
554 // Cleanup will happen in `Process::deferred_release`.
555 return;
556 }
557
558 let inner = self.inner.access_mut(&mut guard);
559
560 let transaction = inner.oneway_todo.pop_front();
561 inner.has_oneway_transaction = transaction.is_some();
562 if let Some(transaction) = transaction {
563 match guard.push_work(transaction) {
564 Ok(()) => {}
565 Err((_err, work)) => {
566 // Process is dead.
567 // This shouldn't happen due to the `is_dead` check, but if it does, just drop
568 // the transaction and return.
569 drop(guard);
570 drop(work);
571 }
572 }
573 }
574 }
575
576 /// Finds an outdated transaction that the given transaction can replace.
577 ///
578 /// If one is found, it is removed from the list and returned.
579 pub(crate) fn take_outdated_transaction(
580 &self,
581 new: &Transaction,
582 guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
583 ) -> Option<DLArc<Transaction>> {
584 let inner = self.inner.access_mut(guard);
585 let mut cursor = inner.oneway_todo.cursor_front();
586 while let Some(next) = cursor.peek_next() {
587 if new.can_replace(&next) {
588 return Some(next.remove());
589 }
590 cursor.move_next();
591 }
592 None
593 }
594
595 /// This is split into a separate function since it's called by both `Node::do_work` and
596 /// `NodeWrapper::do_work`.
597 fn do_work_locked(
598 &self,
599 writer: &mut BinderReturnWriter<'_>,
600 mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
601 ) -> Result<bool> {
602 let inner = self.inner.access_mut(&mut guard);
603 let strong = inner.strong.count > 0;
604 let has_strong = inner.strong.has_count;
605 let weak = strong || inner.weak.count > 0;
606 let has_weak = inner.weak.has_count;
607
608 if weak && !has_weak {
609 inner.weak.has_count = true;
610 inner.active_inc_refs += 1;
611 }
612
613 if strong && !has_strong {
614 inner.strong.has_count = true;
615 inner.active_inc_refs += 1;
616 }
617
618 let no_active_inc_refs = inner.active_inc_refs == 0;
619 let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
620 let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
621 if should_drop_weak {
622 inner.weak.has_count = false;
623 }
624 if should_drop_strong {
625 inner.strong.has_count = false;
626 }
627 if no_active_inc_refs && !weak {
628 // Remove the node if there are no references to it.
629 guard.remove_node(self.ptr);
630 }
631 drop(guard);
632
633 if weak && !has_weak {
634 self.write(writer, BR_INCREFS)?;
635 }
636 if strong && !has_strong {
637 self.write(writer, BR_ACQUIRE)?;
638 }
639 if should_drop_strong {
640 self.write(writer, BR_RELEASE)?;
641 }
642 if should_drop_weak {
643 self.write(writer, BR_DECREFS)?;
644 }
645
646 Ok(true)
647 }
648
649 pub(crate) fn add_freeze_listener(
650 &self,
651 process: &Arc<Process>,
652 flags: kernel::alloc::Flags,
653 ) -> Result {
654 let mut vec_alloc = KVVec::<Arc<Process>>::new();
655 loop {
656 let mut guard = self.owner.inner.lock();
657 // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
658 // listener, no the target.
659 let inner = self.inner.access_mut(&mut guard);
660 let len = inner.freeze_list.len();
661 if len >= inner.freeze_list.capacity() {
662 if len >= vec_alloc.capacity() {
663 drop(guard);
664 vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
665 continue;
666 }
667 mem::swap(&mut inner.freeze_list, &mut vec_alloc);
668 for elem in vec_alloc.drain_all() {
669 inner.freeze_list.push_within_capacity(elem)?;
670 }
671 }
672 inner.freeze_list.push_within_capacity(process.clone())?;
673 return Ok(());
674 }
675 }
676
677 pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
678 let _unused_capacity;
679 let mut guard = self.owner.inner.lock();
680 let inner = self.inner.access_mut(&mut guard);
681 let len = inner.freeze_list.len();
682 inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
683 if len == inner.freeze_list.len() {
684 pr_warn!(
685 "Could not remove freeze listener for {}\n",
686 p.pid_in_current_ns()
687 );
688 }
689 if inner.freeze_list.is_empty() {
690 _unused_capacity = mem::take(&mut inner.freeze_list);
691 }
692 }
693
694 pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
695 &self.inner.access(guard).freeze_list
696 }
697}
698
699impl DeliverToRead for Node {
700 fn do_work(
701 self: DArc<Self>,
702 _thread: &Thread,
703 writer: &mut BinderReturnWriter<'_>,
704 ) -> Result<bool> {
705 let mut owner_inner = self.owner.inner.lock();
706 let inner = self.inner.access_mut(&mut owner_inner);
707
708 assert!(inner.delivery_state.has_pushed_node);
709 if inner.delivery_state.has_pushed_wrapper {
710 // If the wrapper is scheduled, then we are either a normal push or weak zero2one
711 // increment, and the wrapper is a strong zero2one increment, so the wrapper always
712 // takes precedence over us.
713 assert!(inner.delivery_state.has_strong_zero2one);
714 inner.delivery_state.has_pushed_node = false;
715 inner.delivery_state.has_weak_zero2one = false;
716 return Ok(true);
717 }
718
719 inner.delivery_state.has_pushed_node = false;
720 inner.delivery_state.has_weak_zero2one = false;
721 inner.delivery_state.has_strong_zero2one = false;
722
723 self.do_work_locked(writer, owner_inner)
724 }
725
726 fn cancel(self: DArc<Self>) {}
727
728 fn should_sync_wakeup(&self) -> bool {
729 false
730 }
731
732 #[inline(never)]
733 fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
734 seq_print!(
735 m,
736 "{}node work {}: u{:016x} c{:016x}\n",
737 prefix,
738 self.debug_id,
739 self.ptr,
740 self.cookie,
741 );
742 Ok(())
743 }
744}
745
746/// Represents something that holds one or more ref-counts to a `Node`.
747///
748/// Whenever process A holds a refcount to a node owned by a different process B, then process A
749/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
750/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
751///
752/// This type is also used for some other cases. For example, a transaction allocation holds a
753/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
754/// so that the destructor of the allocation will drop a refcount of the `Node`.
755pub(crate) struct NodeRef {
756 pub(crate) node: DArc<Node>,
757 /// How many times does this NodeRef hold a refcount on the Node?
758 strong_node_count: usize,
759 weak_node_count: usize,
760 /// How many times does userspace hold a refcount on this NodeRef?
761 strong_count: usize,
762 weak_count: usize,
763}
764
765impl NodeRef {
766 pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
767 Self {
768 node,
769 strong_node_count: strong_count,
770 weak_node_count: weak_count,
771 strong_count,
772 weak_count,
773 }
774 }
775
776 pub(crate) fn absorb(&mut self, mut other: Self) {
777 assert!(
778 Arc::ptr_eq(&self.node, &other.node),
779 "absorb called with differing nodes"
780 );
781 self.strong_node_count += other.strong_node_count;
782 self.weak_node_count += other.weak_node_count;
783 self.strong_count += other.strong_count;
784 self.weak_count += other.weak_count;
785 other.strong_count = 0;
786 other.weak_count = 0;
787 other.strong_node_count = 0;
788 other.weak_node_count = 0;
789
790 if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
791 let mut guard = self.node.owner.inner.lock();
792 let inner = self.node.inner.access_mut(&mut guard);
793
794 if self.strong_node_count >= 2 {
795 inner.strong.count -= self.strong_node_count - 1;
796 self.strong_node_count = 1;
797 assert_ne!(inner.strong.count, 0);
798 }
799 if self.weak_node_count >= 2 {
800 inner.weak.count -= self.weak_node_count - 1;
801 self.weak_node_count = 1;
802 assert_ne!(inner.weak.count, 0);
803 }
804 }
805 }
806
807 pub(crate) fn get_count(&self) -> (usize, usize) {
808 (self.strong_count, self.weak_count)
809 }
810
811 pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
812 if strong && self.strong_count == 0 {
813 return Err(EINVAL);
814 }
815 Ok(self
816 .node
817 .owner
818 .inner
819 .lock()
820 .new_node_ref(self.node.clone(), strong, None))
821 }
822
823 /// Updates (increments or decrements) the number of references held against the node. If the
824 /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
825 /// its `update_refcount` function called.
826 ///
827 /// Returns whether `self` should be removed (when both counts are zero).
828 pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
829 if strong && self.strong_count == 0 {
830 return false;
831 }
832 let (count, node_count, other_count) = if strong {
833 (
834 &mut self.strong_count,
835 &mut self.strong_node_count,
836 self.weak_count,
837 )
838 } else {
839 (
840 &mut self.weak_count,
841 &mut self.weak_node_count,
842 self.strong_count,
843 )
844 };
845 if inc {
846 if *count == 0 {
847 *node_count = 1;
848 self.node.update_refcount(true, 1, strong);
849 }
850 *count += 1;
851 } else {
852 if *count == 0 {
853 pr_warn!(
854 "pid {} performed invalid decrement on ref\n",
855 kernel::current!().pid()
856 );
857 return false;
858 }
859 *count -= 1;
860 if *count == 0 {
861 self.node.update_refcount(false, *node_count, strong);
862 *node_count = 0;
863 return other_count == 0;
864 }
865 }
866 false
867 }
868}
869
870impl Drop for NodeRef {
871 // This destructor is called conditionally from `Allocation::drop`. That branch is often
872 // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
873 #[inline(always)]
874 fn drop(&mut self) {
875 if self.strong_node_count > 0 {
876 self.node
877 .update_refcount(false, self.strong_node_count, true);
878 }
879 if self.weak_node_count > 0 {
880 self.node
881 .update_refcount(false, self.weak_node_count, false);
882 }
883 }
884}
885
886struct NodeDeathInner {
887 dead: bool,
888 cleared: bool,
889 notification_done: bool,
890 /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
891 /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
892 /// the user.
893 aborted: bool,
894}
895
896/// Used to deliver notifications when a process dies.
897///
898/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
899/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
900/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
901/// once it has processed the notification.
902///
903/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
904/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
905/// notification has been removed. Note that if the remote process dies before the kernel has
906/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
907/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
908/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
909///
910/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
911/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
912///
913/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
914/// registration, then the death registration is immediately deleted (we implement this using the
915/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
916/// deregistering death notifications, so this codepath is not executed under normal circumstances.
917#[pin_data]
918pub(crate) struct NodeDeath {
919 node: DArc<Node>,
920 process: Arc<Process>,
921 pub(crate) cookie: u64,
922 #[pin]
923 links_track: AtomicTracker<0>,
924 /// Used by the owner `Node` to store a list of registered death notifications.
925 ///
926 /// # Invariants
927 ///
928 /// Only ever used with the `death_list` list of `self.node`.
929 #[pin]
930 death_links: ListLinks<1>,
931 /// Used by the process to keep track of the death notifications for which we have sent a
932 /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
933 ///
934 /// # Invariants
935 ///
936 /// Only ever used with the `delivered_deaths` list of `self.process`.
937 #[pin]
938 delivered_links: ListLinks<2>,
939 #[pin]
940 delivered_links_track: AtomicTracker<2>,
941 #[pin]
942 inner: SpinLock<NodeDeathInner>,
943}
944
945impl NodeDeath {
946 /// Constructs a new node death notification object.
947 pub(crate) fn new(
948 node: DArc<Node>,
949 process: Arc<Process>,
950 cookie: u64,
951 ) -> impl PinInit<DTRWrap<Self>> {
952 DTRWrap::new(pin_init!(
953 Self {
954 node,
955 process,
956 cookie,
957 links_track <- AtomicTracker::new(),
958 death_links <- ListLinks::new(),
959 delivered_links <- ListLinks::new(),
960 delivered_links_track <- AtomicTracker::new(),
961 inner <- kernel::new_spinlock!(NodeDeathInner {
962 dead: false,
963 cleared: false,
964 notification_done: false,
965 aborted: false,
966 }, "NodeDeath::inner"),
967 }
968 ))
969 }
970
971 /// Sets the cleared flag to `true`.
972 ///
973 /// It removes `self` from the node's death notification list if needed.
974 ///
975 /// Returns whether it needs to be queued.
976 pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
977 let (needs_removal, needs_queueing) = {
978 // Update state and determine if we need to queue a work item. We only need to do it
979 // when the node is not dead or if the user already completed the death notification.
980 let mut inner = self.inner.lock();
981 if abort {
982 inner.aborted = true;
983 }
984 if inner.cleared {
985 // Already cleared.
986 return false;
987 }
988 inner.cleared = true;
989 (!inner.dead, !inner.dead || inner.notification_done)
990 };
991
992 // Remove death notification from node.
993 if needs_removal {
994 let mut owner_inner = self.node.owner.inner.lock();
995 let node_inner = self.node.inner.access_mut(&mut owner_inner);
996 // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
997 // its owner, so it is either in this death list or in no death list.
998 unsafe { node_inner.death_list.remove(self) };
999 }
1000 needs_queueing
1001 }
1002
1003 /// Sets the 'notification done' flag to `true`.
1004 pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
1005 let needs_queueing = {
1006 let mut inner = self.inner.lock();
1007 inner.notification_done = true;
1008 inner.cleared
1009 };
1010 if needs_queueing {
1011 if let Some(death) = ListArc::try_from_arc_or_drop(self) {
1012 let _ = thread.push_work_if_looper(death);
1013 }
1014 }
1015 }
1016
1017 /// Sets the 'dead' flag to `true` and queues work item if needed.
1018 pub(crate) fn set_dead(self: DArc<Self>) {
1019 let needs_queueing = {
1020 let mut inner = self.inner.lock();
1021 if inner.cleared {
1022 false
1023 } else {
1024 inner.dead = true;
1025 true
1026 }
1027 };
1028 if needs_queueing {
1029 // Push the death notification to the target process. There is nothing else to do if
1030 // it's already dead.
1031 if let Some(death) = ListArc::try_from_arc_or_drop(self) {
1032 let process = death.process.clone();
1033 let _ = process.push_work(death);
1034 }
1035 }
1036 }
1037}
1038
1039kernel::list::impl_list_arc_safe! {
1040 impl ListArcSafe<0> for NodeDeath {
1041 tracked_by links_track: AtomicTracker;
1042 }
1043}
1044
1045kernel::list::impl_list_arc_safe! {
1046 impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
1047}
1048kernel::list::impl_list_item! {
1049 impl ListItem<1> for DTRWrap<NodeDeath> {
1050 using ListLinks { self.wrapped.death_links };
1051 }
1052}
1053
1054kernel::list::impl_list_arc_safe! {
1055 impl ListArcSafe<2> for DTRWrap<NodeDeath> {
1056 tracked_by wrapped: NodeDeath;
1057 }
1058}
1059kernel::list::impl_list_arc_safe! {
1060 impl ListArcSafe<2> for NodeDeath {
1061 tracked_by delivered_links_track: AtomicTracker<2>;
1062 }
1063}
1064kernel::list::impl_list_item! {
1065 impl ListItem<2> for DTRWrap<NodeDeath> {
1066 using ListLinks { self.wrapped.delivered_links };
1067 }
1068}
1069
1070impl DeliverToRead for NodeDeath {
1071 fn do_work(
1072 self: DArc<Self>,
1073 _thread: &Thread,
1074 writer: &mut BinderReturnWriter<'_>,
1075 ) -> Result<bool> {
1076 let done = {
1077 let inner = self.inner.lock();
1078 if inner.aborted {
1079 return Ok(true);
1080 }
1081 inner.cleared && (!inner.dead || inner.notification_done)
1082 };
1083
1084 let cookie = self.cookie;
1085 let cmd = if done {
1086 BR_CLEAR_DEATH_NOTIFICATION_DONE
1087 } else {
1088 let process = self.process.clone();
1089 let mut process_inner = process.inner.lock();
1090 let inner = self.inner.lock();
1091 if inner.aborted {
1092 return Ok(true);
1093 }
1094 // We're still holding the inner lock, so it cannot be aborted while we insert it into
1095 // the delivered list.
1096 process_inner.death_delivered(self.clone());
1097 BR_DEAD_BINDER
1098 };
1099
1100 writer.write_code(cmd)?;
1101 writer.write_payload(&cookie)?;
1102 // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
1103 // get to a death notification.
1104 Ok(cmd != BR_DEAD_BINDER)
1105 }
1106
1107 fn cancel(self: DArc<Self>) {}
1108
1109 fn should_sync_wakeup(&self) -> bool {
1110 false
1111 }
1112
1113 #[inline(never)]
1114 fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1115 let inner = self.inner.lock();
1116
1117 let dead_binder = inner.dead && !inner.notification_done;
1118
1119 if dead_binder {
1120 if inner.cleared {
1121 seq_print!(m, "{}has cleared dead binder\n", prefix);
1122 } else {
1123 seq_print!(m, "{}has dead binder\n", prefix);
1124 }
1125 } else {
1126 seq_print!(m, "{}has cleared death notification\n", prefix);
1127 }
1128
1129 Ok(())
1130 }
1131}