Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3// Copyright (C) 2025 Google LLC.
4
5//! This module defines the `Thread` type, which represents a userspace thread that is using
6//! binder.
7//!
8//! The `Process` object stores all of the threads in an rb tree.
9
10use kernel::{
11 bindings,
12 fs::{File, LocalFile},
13 list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14 prelude::*,
15 security,
16 seq_file::SeqFile,
17 seq_print,
18 sync::poll::{PollCondVar, PollTable},
19 sync::{Arc, SpinLock},
20 task::Task,
21 types::ARef,
22 uaccess::UserSlice,
23 uapi,
24};
25
26use crate::{
27 allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
28 defs::*,
29 error::BinderResult,
30 process::{GetWorkOrRegister, Process},
31 ptr_align,
32 stats::GLOBAL_STATS,
33 transaction::Transaction,
34 BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
35};
36
37use core::{
38 mem::size_of,
39 sync::atomic::{AtomicU32, Ordering},
40};
41
42/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
43/// call and is discarded when it returns.
44struct ScatterGatherState {
45 /// A struct that tracks the amount of unused buffer space.
46 unused_buffer_space: UnusedBufferSpace,
47 /// Scatter-gather entries to copy.
48 sg_entries: KVec<ScatterGatherEntry>,
49 /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
50 /// was processed and all of its ancestors. The array is in sorted order.
51 ancestors: KVec<usize>,
52}
53
54/// This entry specifies an additional buffer that should be copied using the scatter-gather
55/// mechanism.
56struct ScatterGatherEntry {
57 /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
58 obj_index: usize,
59 /// Offset in target buffer.
60 offset: usize,
61 /// User address in source buffer.
62 sender_uaddr: usize,
63 /// Number of bytes to copy.
64 length: usize,
65 /// The minimum offset of the next fixup in this buffer.
66 fixup_min_offset: usize,
67 /// The offsets within this buffer that contain pointers which should be translated.
68 pointer_fixups: KVec<PointerFixupEntry>,
69}
70
71/// This entry specifies that a fixup should happen at `target_offset` of the
72/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
73/// and is applied later. Otherwise if `skip` is zero, then the size of the
74/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
75struct PointerFixupEntry {
76 /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
77 skip: usize,
78 /// The translated pointer to write when `skip` is zero.
79 pointer_value: u64,
80 /// The offset at which the value should be written. The offset is relative
81 /// to the original buffer.
82 target_offset: usize,
83}
84
85/// Return type of `apply_and_validate_fixup_in_parent`.
86struct ParentFixupInfo {
87 /// The index of the parent buffer in `sg_entries`.
88 parent_sg_index: usize,
89 /// The number of ancestors of the buffer.
90 ///
91 /// The buffer is considered an ancestor of itself, so this is always at
92 /// least one.
93 num_ancestors: usize,
94 /// New value of `fixup_min_offset` if this fixup is applied.
95 new_min_offset: usize,
96 /// The offset of the fixup in the target buffer.
97 target_offset: usize,
98}
99
100impl ScatterGatherState {
101 /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
102 /// to access a region in its parent buffer. These accesses have various
103 /// restrictions, which this method verifies.
104 ///
105 /// The `parent_offset` and `length` arguments describe the offset and
106 /// length of the access in the parent buffer.
107 ///
108 /// # Detailed restrictions
109 ///
110 /// Obviously the fixup must be in-bounds for the parent buffer.
111 ///
112 /// For safety reasons, we only allow fixups inside a buffer to happen
113 /// at increasing offsets; additionally, we only allow fixup on the last
114 /// buffer object that was verified, or one of its parents.
115 ///
116 /// Example of what is allowed:
117 ///
118 /// A
119 /// B (parent = A, offset = 0)
120 /// C (parent = A, offset = 16)
121 /// D (parent = C, offset = 0)
122 /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
123 ///
124 /// Examples of what is not allowed:
125 ///
126 /// Decreasing offsets within the same parent:
127 /// A
128 /// C (parent = A, offset = 16)
129 /// B (parent = A, offset = 0) // decreasing offset within A
130 ///
131 /// Arcerring to a parent that wasn't the last object or any of its parents:
132 /// A
133 /// B (parent = A, offset = 0)
134 /// C (parent = A, offset = 0)
135 /// C (parent = A, offset = 16)
136 /// D (parent = B, offset = 0) // B is not A or any of A's parents
137 fn validate_parent_fixup(
138 &self,
139 parent: usize,
140 parent_offset: usize,
141 length: usize,
142 ) -> Result<ParentFixupInfo> {
143 // Using `position` would also be correct, but `rposition` avoids
144 // quadratic running times.
145 let ancestors_i = self
146 .ancestors
147 .iter()
148 .copied()
149 .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
150 .ok_or(EINVAL)?;
151 let sg_idx = self.ancestors[ancestors_i];
152 let sg_entry = match self.sg_entries.get(sg_idx) {
153 Some(sg_entry) => sg_entry,
154 None => {
155 pr_err!(
156 "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
157 ancestors_i,
158 sg_idx,
159 self.sg_entries.len()
160 );
161 return Err(EINVAL);
162 }
163 };
164 if sg_entry.fixup_min_offset > parent_offset {
165 pr_warn!(
166 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
167 sg_entry.fixup_min_offset,
168 parent_offset
169 );
170 return Err(EINVAL);
171 }
172 let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
173 if new_min_offset > sg_entry.length {
174 pr_warn!(
175 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
176 new_min_offset,
177 sg_entry.length
178 );
179 return Err(EINVAL);
180 }
181 let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
182 // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
183 // most `self.ancestors.len()`, which also fits in a usize.
184 Ok(ParentFixupInfo {
185 parent_sg_index: sg_idx,
186 num_ancestors: ancestors_i + 1,
187 new_min_offset,
188 target_offset,
189 })
190 }
191}
192
193/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
194/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
195/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
196struct UnusedBufferSpace {
197 /// The start of the remaining space.
198 offset: usize,
199 /// The end of the remaining space.
200 limit: usize,
201}
202impl UnusedBufferSpace {
203 /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
204 /// into the buffer is returned.
205 fn claim_next(&mut self, size: usize) -> Result<usize> {
206 // We require every chunk to be aligned.
207 let size = ptr_align(size).ok_or(EINVAL)?;
208 let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
209
210 if new_offset <= self.limit {
211 let offset = self.offset;
212 self.offset = new_offset;
213 Ok(offset)
214 } else {
215 Err(EINVAL)
216 }
217 }
218}
219
220pub(crate) enum PushWorkRes {
221 Ok,
222 FailedDead(DLArc<dyn DeliverToRead>),
223}
224
225impl PushWorkRes {
226 fn is_ok(&self) -> bool {
227 match self {
228 PushWorkRes::Ok => true,
229 PushWorkRes::FailedDead(_) => false,
230 }
231 }
232}
233
234/// The fields of `Thread` protected by the spinlock.
235struct InnerThread {
236 /// Determines the looper state of the thread. It is a bit-wise combination of the constants
237 /// prefixed with `LOOPER_`.
238 looper_flags: u32,
239
240 /// Determines whether the looper should return.
241 looper_need_return: bool,
242
243 /// Determines if thread is dead.
244 is_dead: bool,
245
246 /// Work item used to deliver error codes to the thread that started a transaction. Stored here
247 /// so that it can be reused.
248 reply_work: DArc<ThreadError>,
249
250 /// Work item used to deliver error codes to the current thread. Stored here so that it can be
251 /// reused.
252 return_work: DArc<ThreadError>,
253
254 /// Determines whether the work list below should be processed. When set to false, `work_list`
255 /// is treated as if it were empty.
256 process_work_list: bool,
257 /// List of work items to deliver to userspace.
258 work_list: List<DTRWrap<dyn DeliverToRead>>,
259 current_transaction: Option<DArc<Transaction>>,
260
261 /// Extended error information for this thread.
262 extended_error: ExtendedError,
263}
264
265const LOOPER_REGISTERED: u32 = 0x01;
266const LOOPER_ENTERED: u32 = 0x02;
267const LOOPER_EXITED: u32 = 0x04;
268const LOOPER_INVALID: u32 = 0x08;
269const LOOPER_WAITING: u32 = 0x10;
270const LOOPER_WAITING_PROC: u32 = 0x20;
271const LOOPER_POLL: u32 = 0x40;
272
273impl InnerThread {
274 fn new() -> Result<Self> {
275 fn next_err_id() -> u32 {
276 static EE_ID: AtomicU32 = AtomicU32::new(0);
277 EE_ID.fetch_add(1, Ordering::Relaxed)
278 }
279
280 Ok(Self {
281 looper_flags: 0,
282 looper_need_return: false,
283 is_dead: false,
284 process_work_list: false,
285 reply_work: ThreadError::try_new()?,
286 return_work: ThreadError::try_new()?,
287 work_list: List::new(),
288 current_transaction: None,
289 extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
290 })
291 }
292
293 fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
294 if !self.process_work_list {
295 return None;
296 }
297
298 let ret = self.work_list.pop_front();
299 self.process_work_list = !self.work_list.is_empty();
300 ret
301 }
302
303 fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
304 if self.is_dead {
305 PushWorkRes::FailedDead(work)
306 } else {
307 self.work_list.push_back(work);
308 self.process_work_list = true;
309 PushWorkRes::Ok
310 }
311 }
312
313 fn push_reply_work(&mut self, code: u32) {
314 if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
315 work.set_error_code(code);
316 self.push_work(work);
317 } else {
318 pr_warn!("Thread reply work is already in use.");
319 }
320 }
321
322 fn push_return_work(&mut self, reply: u32) {
323 if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
324 work.set_error_code(reply);
325 self.push_work(work);
326 } else {
327 pr_warn!("Thread return work is already in use.");
328 }
329 }
330
331 /// Used to push work items that do not need to be processed immediately and can wait until the
332 /// thread gets another work item.
333 fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
334 self.work_list.push_back(work);
335 }
336
337 /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
338 /// (that it could respond to) but it has also issued a transaction, it must first wait for the
339 /// previously-issued transaction to complete.
340 ///
341 /// The `thread` parameter should be the thread containing this `ThreadInner`.
342 fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
343 let transaction = self.current_transaction.take().ok_or(EINVAL)?;
344 if core::ptr::eq(thread, transaction.from.as_ref()) {
345 self.current_transaction = Some(transaction);
346 return Err(EINVAL);
347 }
348 // Find a new current transaction for this thread.
349 self.current_transaction = transaction.find_from(thread).cloned();
350 Ok(transaction)
351 }
352
353 fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
354 match self.current_transaction.take() {
355 None => false,
356 Some(old) => {
357 if !Arc::ptr_eq(transaction, &old) {
358 self.current_transaction = Some(old);
359 return false;
360 }
361 self.current_transaction = old.clone_next();
362 true
363 }
364 }
365 }
366
367 fn looper_enter(&mut self) {
368 self.looper_flags |= LOOPER_ENTERED;
369 if self.looper_flags & LOOPER_REGISTERED != 0 {
370 self.looper_flags |= LOOPER_INVALID;
371 }
372 }
373
374 fn looper_register(&mut self, valid: bool) {
375 self.looper_flags |= LOOPER_REGISTERED;
376 if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
377 self.looper_flags |= LOOPER_INVALID;
378 }
379 }
380
381 fn looper_exit(&mut self) {
382 self.looper_flags |= LOOPER_EXITED;
383 }
384
385 /// Determines whether the thread is part of a pool, i.e., if it is a looper.
386 fn is_looper(&self) -> bool {
387 self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
388 }
389
390 /// Determines whether the thread should attempt to fetch work items from the process queue.
391 /// This is generally case when the thread is registered as a looper and not part of a
392 /// transaction stack. But if there is local work, we want to return to userspace before we
393 /// deliver any remote work.
394 fn should_use_process_work_queue(&self) -> bool {
395 self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
396 }
397
398 fn poll(&mut self) -> u32 {
399 self.looper_flags |= LOOPER_POLL;
400 if self.process_work_list || self.looper_need_return {
401 bindings::POLLIN
402 } else {
403 0
404 }
405 }
406}
407
408/// This represents a thread that's used with binder.
409#[pin_data]
410pub(crate) struct Thread {
411 pub(crate) id: i32,
412 pub(crate) process: Arc<Process>,
413 pub(crate) task: ARef<Task>,
414 #[pin]
415 inner: SpinLock<InnerThread>,
416 #[pin]
417 work_condvar: PollCondVar,
418 /// Used to insert this thread into the process' `ready_threads` list.
419 ///
420 /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
421 #[pin]
422 links: ListLinks,
423 #[pin]
424 links_track: AtomicTracker,
425}
426
427kernel::list::impl_list_arc_safe! {
428 impl ListArcSafe<0> for Thread {
429 tracked_by links_track: AtomicTracker;
430 }
431}
432kernel::list::impl_list_item! {
433 impl ListItem<0> for Thread {
434 using ListLinks { self.links };
435 }
436}
437
438impl Thread {
439 pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
440 let inner = InnerThread::new()?;
441
442 Arc::pin_init(
443 try_pin_init!(Thread {
444 id,
445 process,
446 task: ARef::from(&**kernel::current!()),
447 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
448 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
449 links <- ListLinks::new(),
450 links_track <- AtomicTracker::new(),
451 }),
452 GFP_KERNEL,
453 )
454 }
455
456 #[inline(never)]
457 pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
458 let inner = self.inner.lock();
459
460 if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
461 seq_print!(
462 m,
463 " thread {}: l {:02x} need_return {}\n",
464 self.id,
465 inner.looper_flags,
466 inner.looper_need_return,
467 );
468 }
469
470 let mut t_opt = inner.current_transaction.as_ref();
471 while let Some(t) = t_opt {
472 if Arc::ptr_eq(&t.from, self) {
473 t.debug_print_inner(m, " outgoing transaction ");
474 t_opt = t.from_parent.as_ref();
475 } else if Arc::ptr_eq(&t.to, &self.process) {
476 t.debug_print_inner(m, " incoming transaction ");
477 t_opt = t.find_from(self);
478 } else {
479 t.debug_print_inner(m, " bad transaction ");
480 t_opt = None;
481 }
482 }
483
484 for work in &inner.work_list {
485 work.debug_print(m, " ", " pending transaction ")?;
486 }
487 Ok(())
488 }
489
490 pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
491 let mut writer = data.writer();
492 let ee = self.inner.lock().extended_error;
493 writer.write(&ee)?;
494 Ok(())
495 }
496
497 pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
498 self.inner.lock().current_transaction = Some(transaction);
499 }
500
501 pub(crate) fn has_current_transaction(&self) -> bool {
502 self.inner.lock().current_transaction.is_some()
503 }
504
505 /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
506 /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
507 /// signal); otherwise it returns indicating that none is available.
508 fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
509 {
510 let mut inner = self.inner.lock();
511 if inner.looper_need_return {
512 return Ok(inner.pop_work());
513 }
514 }
515
516 // Try once if the caller does not want to wait.
517 if !wait {
518 return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
519 }
520
521 // Loop waiting only on the local queue (i.e., not registering with the process queue).
522 let mut inner = self.inner.lock();
523 loop {
524 if let Some(work) = inner.pop_work() {
525 return Ok(Some(work));
526 }
527
528 inner.looper_flags |= LOOPER_WAITING;
529 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
530 inner.looper_flags &= !LOOPER_WAITING;
531
532 if signal_pending {
533 return Err(EINTR);
534 }
535 if inner.looper_need_return {
536 return Ok(None);
537 }
538 }
539 }
540
541 /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
542 /// queue if none is available locally.
543 ///
544 /// This must only be called when the thread is not participating in a transaction chain. If it
545 /// is, the local version (`get_work_local`) should be used instead.
546 fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
547 // Try to get work from the thread's work queue, using only a local lock.
548 {
549 let mut inner = self.inner.lock();
550 if let Some(work) = inner.pop_work() {
551 return Ok(Some(work));
552 }
553 if inner.looper_need_return {
554 drop(inner);
555 return Ok(self.process.get_work());
556 }
557 }
558
559 // If the caller doesn't want to wait, try to grab work from the process queue.
560 //
561 // We know nothing will have been queued directly to the thread queue because it is not in
562 // a transaction and it is not in the process' ready list.
563 if !wait {
564 return self.process.get_work().ok_or(EAGAIN).map(Some);
565 }
566
567 // Get work from the process queue. If none is available, atomically register as ready.
568 let reg = match self.process.get_work_or_register(self) {
569 GetWorkOrRegister::Work(work) => return Ok(Some(work)),
570 GetWorkOrRegister::Register(reg) => reg,
571 };
572
573 let mut inner = self.inner.lock();
574 loop {
575 if let Some(work) = inner.pop_work() {
576 return Ok(Some(work));
577 }
578
579 inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
580 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
581 inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
582
583 if signal_pending || inner.looper_need_return {
584 // We need to return now. We need to pull the thread off the list of ready threads
585 // (by dropping `reg`), then check the state again after it's off the list to
586 // ensure that something was not queued in the meantime. If something has been
587 // queued, we just return it (instead of the error).
588 drop(inner);
589 drop(reg);
590
591 let res = match self.inner.lock().pop_work() {
592 Some(work) => Ok(Some(work)),
593 None if signal_pending => Err(EINTR),
594 None => Ok(None),
595 };
596 return res;
597 }
598 }
599 }
600
601 /// Push the provided work item to be delivered to user space via this thread.
602 ///
603 /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
604 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
605 let sync = work.should_sync_wakeup();
606
607 let res = self.inner.lock().push_work(work);
608
609 if res.is_ok() {
610 if sync {
611 self.work_condvar.notify_sync();
612 } else {
613 self.work_condvar.notify_one();
614 }
615 }
616
617 res
618 }
619
620 /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
621 /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
622 pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
623 let mut inner = self.inner.lock();
624 if inner.is_looper() && !inner.is_dead {
625 inner.push_work(work);
626 Ok(())
627 } else {
628 drop(inner);
629 self.process.push_work(work)
630 }
631 }
632
633 pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
634 self.inner.lock().push_work_deferred(work);
635 }
636
637 pub(crate) fn push_return_work(&self, reply: u32) {
638 self.inner.lock().push_return_work(reply);
639 }
640
641 fn translate_object(
642 &self,
643 obj_index: usize,
644 offset: usize,
645 object: BinderObjectRef<'_>,
646 view: &mut AllocationView<'_>,
647 allow_fds: bool,
648 sg_state: &mut ScatterGatherState,
649 ) -> BinderResult {
650 match object {
651 BinderObjectRef::Binder(obj) => {
652 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
653 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
654 // representation.
655 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
656 let cookie = obj.cookie as _;
657 let flags = obj.flags as _;
658 let node = self
659 .process
660 .as_arc_borrow()
661 .get_node(ptr, cookie, flags, strong, self)?;
662 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
663 view.transfer_binder_object(offset, obj, strong, node)?;
664 }
665 BinderObjectRef::Handle(obj) => {
666 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
667 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
668 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
669 let node = self.process.get_node_from_handle(handle, strong)?;
670 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
671 view.transfer_binder_object(offset, obj, strong, node)?;
672 }
673 BinderObjectRef::Fd(obj) => {
674 if !allow_fds {
675 return Err(EPERM.into());
676 }
677
678 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
679 let fd = unsafe { obj.__bindgen_anon_1.fd };
680 let file = LocalFile::fget(fd)?;
681 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
682 // ioctl, so there are no active calls to `fdget_pos` on this thread.
683 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
684 security::binder_transfer_file(
685 &self.process.cred,
686 &view.alloc.process.cred,
687 &file,
688 )?;
689
690 let mut obj_write = BinderFdObject::default();
691 obj_write.hdr.type_ = BINDER_TYPE_FD;
692 // This will be overwritten with the actual fd when the transaction is received.
693 obj_write.__bindgen_anon_1.fd = u32::MAX;
694 obj_write.cookie = obj.cookie;
695 view.write::<BinderFdObject>(offset, &obj_write)?;
696
697 const FD_FIELD_OFFSET: usize =
698 core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
699
700 let field_offset = offset + FD_FIELD_OFFSET;
701
702 view.alloc.info_add_fd(file, field_offset, false)?;
703 }
704 BinderObjectRef::Ptr(obj) => {
705 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
706 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
707 Ok(alloc_offset) => alloc_offset,
708 Err(err) => {
709 pr_warn!(
710 "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
711 sg_state.unused_buffer_space.offset,
712 sg_state.unused_buffer_space.limit,
713 obj_length,
714 );
715 return Err(err.into());
716 }
717 };
718
719 let sg_state_idx = sg_state.sg_entries.len();
720 sg_state.sg_entries.push(
721 ScatterGatherEntry {
722 obj_index,
723 offset: alloc_offset,
724 sender_uaddr: obj.buffer as _,
725 length: obj_length,
726 pointer_fixups: KVec::new(),
727 fixup_min_offset: 0,
728 },
729 GFP_KERNEL,
730 )?;
731
732 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
733
734 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
735 sg_state.ancestors.clear();
736 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
737 } else {
738 // Another buffer also has a pointer to this buffer, and we need to fixup that
739 // pointer too.
740
741 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
742 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
743
744 let info = sg_state.validate_parent_fixup(
745 parent_index,
746 parent_offset,
747 size_of::<u64>(),
748 )?;
749
750 sg_state.ancestors.truncate(info.num_ancestors);
751 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
752
753 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
754 Some(parent_entry) => parent_entry,
755 None => {
756 pr_err!(
757 "validate_parent_fixup returned index out of bounds for sg.entries"
758 );
759 return Err(EINVAL.into());
760 }
761 };
762
763 parent_entry.fixup_min_offset = info.new_min_offset;
764 parent_entry.pointer_fixups.push(
765 PointerFixupEntry {
766 skip: 0,
767 pointer_value: buffer_ptr_in_user_space,
768 target_offset: info.target_offset,
769 },
770 GFP_KERNEL,
771 )?;
772 }
773
774 let mut obj_write = BinderBufferObject::default();
775 obj_write.hdr.type_ = BINDER_TYPE_PTR;
776 obj_write.flags = obj.flags;
777 obj_write.buffer = buffer_ptr_in_user_space;
778 obj_write.length = obj.length;
779 obj_write.parent = obj.parent;
780 obj_write.parent_offset = obj.parent_offset;
781 view.write::<BinderBufferObject>(offset, &obj_write)?;
782 }
783 BinderObjectRef::Fda(obj) => {
784 if !allow_fds {
785 return Err(EPERM.into());
786 }
787 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
788 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
789 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
790 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
791
792 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
793 view.alloc.info_add_fd_reserve(num_fds)?;
794
795 sg_state.ancestors.truncate(info.num_ancestors);
796 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
797 Some(parent_entry) => parent_entry,
798 None => {
799 pr_err!(
800 "validate_parent_fixup returned index out of bounds for sg.entries"
801 );
802 return Err(EINVAL.into());
803 }
804 };
805
806 parent_entry.fixup_min_offset = info.new_min_offset;
807 parent_entry
808 .pointer_fixups
809 .push(
810 PointerFixupEntry {
811 skip: fds_len,
812 pointer_value: 0,
813 target_offset: info.target_offset,
814 },
815 GFP_KERNEL,
816 )
817 .map_err(|_| ENOMEM)?;
818
819 let fda_uaddr = parent_entry
820 .sender_uaddr
821 .checked_add(parent_offset)
822 .ok_or(EINVAL)?;
823 let mut fda_bytes = KVec::new();
824 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
825 .read_all(&mut fda_bytes, GFP_KERNEL)?;
826
827 if fds_len != fda_bytes.len() {
828 pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
829 return Err(EINVAL.into());
830 }
831
832 for i in (0..fds_len).step_by(size_of::<u32>()) {
833 let fd = {
834 let mut fd_bytes = [0u8; size_of::<u32>()];
835 fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
836 u32::from_ne_bytes(fd_bytes)
837 };
838
839 let file = LocalFile::fget(fd)?;
840 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
841 // ioctl, so there are no active calls to `fdget_pos` on this thread.
842 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
843 security::binder_transfer_file(
844 &self.process.cred,
845 &view.alloc.process.cred,
846 &file,
847 )?;
848
849 // The `validate_parent_fixup` call ensuers that this addition will not
850 // overflow.
851 view.alloc.info_add_fd(file, info.target_offset + i, true)?;
852 }
853 drop(fda_bytes);
854
855 let mut obj_write = BinderFdArrayObject::default();
856 obj_write.hdr.type_ = BINDER_TYPE_FDA;
857 obj_write.num_fds = obj.num_fds;
858 obj_write.parent = obj.parent;
859 obj_write.parent_offset = obj.parent_offset;
860 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
861 }
862 }
863 Ok(())
864 }
865
866 fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
867 for sg_entry in &mut sg_state.sg_entries {
868 let mut end_of_previous_fixup = sg_entry.offset;
869 let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
870
871 let mut reader =
872 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
873 for fixup in &mut sg_entry.pointer_fixups {
874 let fixup_len = if fixup.skip == 0 {
875 size_of::<u64>()
876 } else {
877 fixup.skip
878 };
879
880 let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
881 if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
882 pr_warn!(
883 "Fixups oob {} {} {} {}",
884 fixup.target_offset,
885 end_of_previous_fixup,
886 offset_end,
887 target_offset_end
888 );
889 return Err(EINVAL.into());
890 }
891
892 let copy_off = end_of_previous_fixup;
893 let copy_len = fixup.target_offset - end_of_previous_fixup;
894 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
895 pr_warn!("Failed copying into alloc: {:?}", err);
896 return Err(err.into());
897 }
898 if fixup.skip == 0 {
899 let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
900 if let Err(err) = res {
901 pr_warn!("Failed copying ptr into alloc: {:?}", err);
902 return Err(err.into());
903 }
904 }
905 if let Err(err) = reader.skip(fixup_len) {
906 pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
907 return Err(err.into());
908 }
909 end_of_previous_fixup = target_offset_end;
910 }
911 let copy_off = end_of_previous_fixup;
912 let copy_len = offset_end - end_of_previous_fixup;
913 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
914 pr_warn!("Failed copying remainder into alloc: {:?}", err);
915 return Err(err.into());
916 }
917 }
918 Ok(())
919 }
920
921 /// This method copies the payload of a transaction into the target process.
922 ///
923 /// The resulting payload will have several different components, which will be stored next to
924 /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
925 /// and those objects have to be translated so that they make sense to the target transaction.
926 pub(crate) fn copy_transaction_data(
927 &self,
928 to_process: Arc<Process>,
929 tr: &BinderTransactionDataSg,
930 debug_id: usize,
931 allow_fds: bool,
932 txn_security_ctx_offset: Option<&mut usize>,
933 ) -> BinderResult<NewAllocation> {
934 let trd = &tr.transaction_data;
935 let is_oneway = trd.flags & TF_ONE_WAY != 0;
936 let mut secctx = if let Some(offset) = txn_security_ctx_offset {
937 let secid = self.process.cred.get_secid();
938 let ctx = match security::SecurityCtx::from_secid(secid) {
939 Ok(ctx) => ctx,
940 Err(err) => {
941 pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
942 return Err(err.into());
943 }
944 };
945 Some((offset, ctx))
946 } else {
947 None
948 };
949
950 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
951 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
952 let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
953 let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
954 let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
955 let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
956 let aligned_secctx_size = match secctx.as_ref() {
957 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
958 None => 0,
959 };
960
961 // This guarantees that at least `sizeof(usize)` bytes will be allocated.
962 let len = usize::max(
963 aligned_data_size
964 .checked_add(aligned_offsets_size)
965 .and_then(|sum| sum.checked_add(aligned_buffers_size))
966 .and_then(|sum| sum.checked_add(aligned_secctx_size))
967 .ok_or(ENOMEM)?,
968 size_of::<usize>(),
969 );
970 let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
971 let mut alloc =
972 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
973 Ok(alloc) => alloc,
974 Err(err) => {
975 pr_warn!(
976 "Failed to allocate buffer. len:{}, is_oneway:{}",
977 len,
978 is_oneway
979 );
980 return Err(err);
981 }
982 };
983
984 // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
985 // all bit-patterns.
986 let trd_data_ptr = unsafe { &trd.data.ptr };
987 let mut buffer_reader =
988 UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
989 let mut end_of_previous_object = 0;
990 let mut sg_state = None;
991
992 // Copy offsets if there are any.
993 if offsets_size > 0 {
994 {
995 let mut reader =
996 UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
997 .reader();
998 alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
999 }
1000
1001 let offsets_start = aligned_data_size;
1002 let offsets_end = aligned_data_size + aligned_offsets_size;
1003
1004 // This state is used for BINDER_TYPE_PTR objects.
1005 let sg_state = sg_state.insert(ScatterGatherState {
1006 unused_buffer_space: UnusedBufferSpace {
1007 offset: offsets_end,
1008 limit: len,
1009 },
1010 sg_entries: KVec::new(),
1011 ancestors: KVec::new(),
1012 });
1013
1014 // Traverse the objects specified.
1015 let mut view = AllocationView::new(&mut alloc, data_size);
1016 for (index, index_offset) in (offsets_start..offsets_end)
1017 .step_by(size_of::<usize>())
1018 .enumerate()
1019 {
1020 let offset = view.alloc.read(index_offset)?;
1021
1022 if offset < end_of_previous_object {
1023 pr_warn!("Got transaction with invalid offset.");
1024 return Err(EINVAL.into());
1025 }
1026
1027 // Copy data between two objects.
1028 if end_of_previous_object < offset {
1029 view.copy_into(
1030 &mut buffer_reader,
1031 end_of_previous_object,
1032 offset - end_of_previous_object,
1033 )?;
1034 }
1035
1036 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1037
1038 match self.translate_object(
1039 index,
1040 offset,
1041 object.as_ref(),
1042 &mut view,
1043 allow_fds,
1044 sg_state,
1045 ) {
1046 Ok(()) => end_of_previous_object = offset + object.size(),
1047 Err(err) => {
1048 pr_warn!("Error while translating object.");
1049 return Err(err);
1050 }
1051 }
1052
1053 // Update the indexes containing objects to clean up.
1054 let offset_after_object = index_offset + size_of::<usize>();
1055 view.alloc
1056 .set_info_offsets(offsets_start..offset_after_object);
1057 }
1058 }
1059
1060 // Copy remaining raw data.
1061 alloc.copy_into(
1062 &mut buffer_reader,
1063 end_of_previous_object,
1064 data_size - end_of_previous_object,
1065 )?;
1066
1067 if let Some(sg_state) = sg_state.as_mut() {
1068 if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1069 pr_warn!("Failure in apply_sg: {:?}", err);
1070 return Err(err);
1071 }
1072 }
1073
1074 if let Some((off_out, secctx)) = secctx.as_mut() {
1075 if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1076 pr_warn!("Failed to write security context: {:?}", err);
1077 return Err(err.into());
1078 }
1079 **off_out = secctx_off;
1080 }
1081 Ok(alloc)
1082 }
1083
1084 fn unwind_transaction_stack(self: &Arc<Self>) {
1085 let mut thread = self.clone();
1086 while let Ok(transaction) = {
1087 let mut inner = thread.inner.lock();
1088 inner.pop_transaction_to_reply(thread.as_ref())
1089 } {
1090 let reply = Err(BR_DEAD_REPLY);
1091 if !transaction.from.deliver_single_reply(reply, &transaction) {
1092 break;
1093 }
1094
1095 thread = transaction.from.clone();
1096 }
1097 }
1098
1099 pub(crate) fn deliver_reply(
1100 &self,
1101 reply: Result<DLArc<Transaction>, u32>,
1102 transaction: &DArc<Transaction>,
1103 ) {
1104 if self.deliver_single_reply(reply, transaction) {
1105 transaction.from.unwind_transaction_stack();
1106 }
1107 }
1108
1109 /// Delivers a reply to the thread that started a transaction. The reply can either be a
1110 /// reply-transaction or an error code to be delivered instead.
1111 ///
1112 /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1113 /// transaction stack by completing transactions for threads that are dead.
1114 fn deliver_single_reply(
1115 &self,
1116 reply: Result<DLArc<Transaction>, u32>,
1117 transaction: &DArc<Transaction>,
1118 ) -> bool {
1119 if let Ok(transaction) = &reply {
1120 transaction.set_outstanding(&mut self.process.inner.lock());
1121 }
1122
1123 {
1124 let mut inner = self.inner.lock();
1125 if !inner.pop_transaction_replied(transaction) {
1126 return false;
1127 }
1128
1129 if inner.is_dead {
1130 return true;
1131 }
1132
1133 match reply {
1134 Ok(work) => {
1135 inner.push_work(work);
1136 }
1137 Err(code) => inner.push_reply_work(code),
1138 }
1139 }
1140
1141 // Notify the thread now that we've released the inner lock.
1142 self.work_condvar.notify_sync();
1143 false
1144 }
1145
1146 /// Determines if the given transaction is the current transaction for this thread.
1147 fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1148 let inner = self.inner.lock();
1149 match &inner.current_transaction {
1150 None => false,
1151 Some(current) => Arc::ptr_eq(current, transaction),
1152 }
1153 }
1154
1155 /// Determines the current top of the transaction stack. It fails if the top is in another
1156 /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1157 /// [`None`] if the thread is not currently participating in a transaction stack.
1158 fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1159 let inner = self.inner.lock();
1160 if let Some(cur) = &inner.current_transaction {
1161 if core::ptr::eq(self, cur.from.as_ref()) {
1162 pr_warn!("got new transaction with bad transaction stack");
1163 return Err(EINVAL);
1164 }
1165 Ok(Some(cur.clone()))
1166 } else {
1167 Ok(None)
1168 }
1169 }
1170
1171 fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
1172 where
1173 T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
1174 {
1175 if let Err(err) = inner(self, tr) {
1176 if err.should_pr_warn() {
1177 let mut ee = self.inner.lock().extended_error;
1178 ee.command = err.reply;
1179 ee.param = err.as_errno();
1180 pr_warn!(
1181 "Transaction failed: {:?} my_pid:{}",
1182 err,
1183 self.process.pid_in_current_ns()
1184 );
1185 }
1186
1187 self.push_return_work(err.reply);
1188 }
1189 }
1190
1191 fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1192 // SAFETY: Handle's type has no invalid bit patterns.
1193 let handle = unsafe { tr.transaction_data.target.handle };
1194 let node_ref = self.process.get_transaction_node(handle)?;
1195 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1196 // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1197 // could this happen?
1198 let top = self.top_of_transaction_stack()?;
1199 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1200 let completion = list_completion.clone_arc();
1201 let transaction = Transaction::new(node_ref, top, self, tr)?;
1202
1203 // Check that the transaction stack hasn't changed while the lock was released, then update
1204 // it with the new transaction.
1205 {
1206 let mut inner = self.inner.lock();
1207 if !transaction.is_stacked_on(&inner.current_transaction) {
1208 pr_warn!("Transaction stack changed during transaction!");
1209 return Err(EINVAL.into());
1210 }
1211 inner.current_transaction = Some(transaction.clone_arc());
1212 // We push the completion as a deferred work so that we wait for the reply before
1213 // returning to userland.
1214 inner.push_work_deferred(list_completion);
1215 }
1216
1217 if let Err(e) = transaction.submit() {
1218 completion.skip();
1219 // Define `transaction` first to drop it after `inner`.
1220 let transaction;
1221 let mut inner = self.inner.lock();
1222 transaction = inner.current_transaction.take().unwrap();
1223 inner.current_transaction = transaction.clone_next();
1224 Err(e)
1225 } else {
1226 Ok(())
1227 }
1228 }
1229
1230 fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1231 let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1232 if !orig.from.is_current_transaction(&orig) {
1233 return Err(EINVAL.into());
1234 }
1235
1236 // We need to complete the transaction even if we cannot complete building the reply.
1237 let out = (|| -> BinderResult<_> {
1238 let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1239 let process = orig.from.process.clone();
1240 let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1241 let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
1242 self.inner.lock().push_work(completion);
1243 orig.from.deliver_reply(Ok(reply), &orig);
1244 Ok(())
1245 })()
1246 .map_err(|mut err| {
1247 // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1248 // the sender know that the transaction has completed (with an error in this case).
1249 pr_warn!(
1250 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1251 err
1252 );
1253 let reply = Err(BR_FAILED_REPLY);
1254 orig.from.deliver_reply(reply, &orig);
1255 err.reply = BR_TRANSACTION_COMPLETE;
1256 err
1257 });
1258
1259 out
1260 }
1261
1262 fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1263 // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
1264 // union is okay.
1265 let handle = unsafe { tr.transaction_data.target.handle };
1266 let node_ref = self.process.get_transaction_node(handle)?;
1267 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1268 let transaction = Transaction::new(node_ref, None, self, tr)?;
1269 let code = if self.process.is_oneway_spam_detection_enabled()
1270 && transaction.oneway_spam_detected
1271 {
1272 BR_ONEWAY_SPAM_SUSPECT
1273 } else {
1274 BR_TRANSACTION_COMPLETE
1275 };
1276 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1277 let completion = list_completion.clone_arc();
1278 self.inner.lock().push_work(list_completion);
1279 match transaction.submit() {
1280 Ok(()) => Ok(()),
1281 Err(err) => {
1282 completion.skip();
1283 Err(err)
1284 }
1285 }
1286 }
1287
1288 fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1289 let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1290 let write_len = req.write_size.saturating_sub(req.write_consumed);
1291 let mut reader =
1292 UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
1293
1294 while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1295 let before = reader.len();
1296 let cmd = reader.read::<u32>()?;
1297 GLOBAL_STATS.inc_bc(cmd);
1298 self.process.stats.inc_bc(cmd);
1299 match cmd {
1300 BC_TRANSACTION => {
1301 let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1302 if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1303 self.transaction(&tr, Self::oneway_transaction_inner);
1304 } else {
1305 self.transaction(&tr, Self::transaction_inner);
1306 }
1307 }
1308 BC_TRANSACTION_SG => {
1309 let tr = reader.read::<BinderTransactionDataSg>()?;
1310 if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1311 self.transaction(&tr, Self::oneway_transaction_inner);
1312 } else {
1313 self.transaction(&tr, Self::transaction_inner);
1314 }
1315 }
1316 BC_REPLY => {
1317 let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1318 self.transaction(&tr, Self::reply_inner)
1319 }
1320 BC_REPLY_SG => {
1321 let tr = reader.read::<BinderTransactionDataSg>()?;
1322 self.transaction(&tr, Self::reply_inner)
1323 }
1324 BC_FREE_BUFFER => {
1325 let buffer = self.process.buffer_get(reader.read()?);
1326 if let Some(buffer) = buffer {
1327 if buffer.looper_need_return_on_free() {
1328 self.inner.lock().looper_need_return = true;
1329 }
1330 drop(buffer);
1331 }
1332 }
1333 BC_INCREFS => {
1334 self.process
1335 .as_arc_borrow()
1336 .update_ref(reader.read()?, true, false)?
1337 }
1338 BC_ACQUIRE => {
1339 self.process
1340 .as_arc_borrow()
1341 .update_ref(reader.read()?, true, true)?
1342 }
1343 BC_RELEASE => {
1344 self.process
1345 .as_arc_borrow()
1346 .update_ref(reader.read()?, false, true)?
1347 }
1348 BC_DECREFS => {
1349 self.process
1350 .as_arc_borrow()
1351 .update_ref(reader.read()?, false, false)?
1352 }
1353 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1354 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1355 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1356 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1357 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1358 BC_REGISTER_LOOPER => {
1359 let valid = self.process.register_thread();
1360 self.inner.lock().looper_register(valid);
1361 }
1362 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1363 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1364 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1365 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1366 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1367
1368 // Fail if given an unknown error code.
1369 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1370 _ => return Err(EINVAL),
1371 }
1372 // Update the number of write bytes consumed.
1373 req.write_consumed += (before - reader.len()) as u64;
1374 }
1375
1376 Ok(())
1377 }
1378
1379 fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1380 let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1381 let read_len = req.read_size.saturating_sub(req.read_consumed);
1382 let mut writer = BinderReturnWriter::new(
1383 UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
1384 self,
1385 );
1386 let (in_pool, use_proc_queue) = {
1387 let inner = self.inner.lock();
1388 (inner.is_looper(), inner.should_use_process_work_queue())
1389 };
1390
1391 let getter = if use_proc_queue {
1392 Self::get_work
1393 } else {
1394 Self::get_work_local
1395 };
1396
1397 // Reserve some room at the beginning of the read buffer so that we can send a
1398 // BR_SPAWN_LOOPER if we need to.
1399 let mut has_noop_placeholder = false;
1400 if req.read_consumed == 0 {
1401 if let Err(err) = writer.write_code(BR_NOOP) {
1402 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1403 return Err(err);
1404 }
1405 has_noop_placeholder = true;
1406 }
1407
1408 // Loop doing work while there is room in the buffer.
1409 let initial_len = writer.len();
1410 while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1411 match getter(self, wait && initial_len == writer.len()) {
1412 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1413 Ok(true) => {}
1414 Ok(false) => break,
1415 Err(err) => {
1416 return Err(err);
1417 }
1418 },
1419 Ok(None) => {
1420 break;
1421 }
1422 Err(err) => {
1423 // Propagate the error if we haven't written anything else.
1424 if err != EINTR && err != EAGAIN {
1425 pr_warn!("Failure in work getter: {:?}", err);
1426 }
1427 if initial_len == writer.len() {
1428 return Err(err);
1429 } else {
1430 break;
1431 }
1432 }
1433 }
1434 }
1435
1436 req.read_consumed += read_len - writer.len() as u64;
1437
1438 // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1439 if has_noop_placeholder && in_pool && self.process.needs_thread() {
1440 let mut writer =
1441 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
1442 .writer();
1443 writer.write(&BR_SPAWN_LOOPER)?;
1444 }
1445 Ok(())
1446 }
1447
1448 pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1449 let (mut reader, mut writer) = data.reader_writer();
1450 let mut req = reader.read::<BinderWriteRead>()?;
1451
1452 // Go through the write buffer.
1453 let mut ret = Ok(());
1454 if req.write_size > 0 {
1455 ret = self.write(&mut req);
1456 if let Err(err) = ret {
1457 pr_warn!(
1458 "Write failure {:?} in pid:{}",
1459 err,
1460 self.process.pid_in_current_ns()
1461 );
1462 req.read_consumed = 0;
1463 writer.write(&req)?;
1464 self.inner.lock().looper_need_return = false;
1465 return ret;
1466 }
1467 }
1468
1469 // Go through the work queue.
1470 if req.read_size > 0 {
1471 ret = self.read(&mut req, wait);
1472 if ret.is_err() && ret != Err(EINTR) {
1473 pr_warn!(
1474 "Read failure {:?} in pid:{}",
1475 ret,
1476 self.process.pid_in_current_ns()
1477 );
1478 }
1479 }
1480
1481 // Write the request back so that the consumed fields are visible to the caller.
1482 writer.write(&req)?;
1483
1484 self.inner.lock().looper_need_return = false;
1485
1486 ret
1487 }
1488
1489 pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1490 table.register_wait(file, &self.work_condvar);
1491 let mut inner = self.inner.lock();
1492 (inner.should_use_process_work_queue(), inner.poll())
1493 }
1494
1495 /// Make the call to `get_work` or `get_work_local` return immediately, if any.
1496 pub(crate) fn exit_looper(&self) {
1497 let mut inner = self.inner.lock();
1498 let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1499 if should_notify {
1500 inner.looper_need_return = true;
1501 }
1502 drop(inner);
1503
1504 if should_notify {
1505 self.work_condvar.notify_one();
1506 }
1507 }
1508
1509 pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1510 // Determine if we need to notify. This requires the lock.
1511 let inner = self.inner.lock();
1512 let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1513 drop(inner);
1514
1515 // Now that the lock is no longer held, notify the waiters if we have to.
1516 if notify {
1517 if sync {
1518 self.work_condvar.notify_sync();
1519 } else {
1520 self.work_condvar.notify_one();
1521 }
1522 }
1523 }
1524
1525 pub(crate) fn release(self: &Arc<Self>) {
1526 self.inner.lock().is_dead = true;
1527
1528 //self.work_condvar.clear();
1529 self.unwind_transaction_stack();
1530
1531 // Cancel all pending work items.
1532 while let Ok(Some(work)) = self.get_work_local(false) {
1533 work.into_arc().cancel();
1534 }
1535 }
1536}
1537
1538#[pin_data]
1539struct ThreadError {
1540 error_code: AtomicU32,
1541 #[pin]
1542 links_track: AtomicTracker,
1543}
1544
1545impl ThreadError {
1546 fn try_new() -> Result<DArc<Self>> {
1547 DTRWrap::arc_pin_init(pin_init!(Self {
1548 error_code: AtomicU32::new(BR_OK),
1549 links_track <- AtomicTracker::new(),
1550 }))
1551 .map(ListArc::into_arc)
1552 }
1553
1554 fn set_error_code(&self, code: u32) {
1555 self.error_code.store(code, Ordering::Relaxed);
1556 }
1557
1558 fn is_unused(&self) -> bool {
1559 self.error_code.load(Ordering::Relaxed) == BR_OK
1560 }
1561}
1562
1563impl DeliverToRead for ThreadError {
1564 fn do_work(
1565 self: DArc<Self>,
1566 _thread: &Thread,
1567 writer: &mut BinderReturnWriter<'_>,
1568 ) -> Result<bool> {
1569 let code = self.error_code.load(Ordering::Relaxed);
1570 self.error_code.store(BR_OK, Ordering::Relaxed);
1571 writer.write_code(code)?;
1572 Ok(true)
1573 }
1574
1575 fn cancel(self: DArc<Self>) {}
1576
1577 fn should_sync_wakeup(&self) -> bool {
1578 false
1579 }
1580
1581 fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1582 seq_print!(
1583 m,
1584 "{}transaction error: {}\n",
1585 prefix,
1586 self.error_code.load(Ordering::Relaxed)
1587 );
1588 Ok(())
1589 }
1590}
1591
1592kernel::list::impl_list_arc_safe! {
1593 impl ListArcSafe<0> for ThreadError {
1594 tracked_by links_track: AtomicTracker;
1595 }
1596}