Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69#include <linux/ktime.h>
70
71#include <kunit/visibility.h>
72
73#include <uapi/linux/android/binder.h>
74
75#include <linux/cacheflush.h>
76
77#include "binder_netlink.h"
78#include "binder_internal.h"
79#include "binder_trace.h"
80
81static HLIST_HEAD(binder_deferred_list);
82static DEFINE_MUTEX(binder_deferred_lock);
83
84static HLIST_HEAD(binder_devices);
85static DEFINE_SPINLOCK(binder_devices_lock);
86
87static HLIST_HEAD(binder_procs);
88static DEFINE_MUTEX(binder_procs_lock);
89
90static HLIST_HEAD(binder_dead_nodes);
91static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
95static atomic_t binder_last_id;
96
97static int proc_show(struct seq_file *m, void *unused);
98DEFINE_SHOW_ATTRIBUTE(proc);
99
100#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102enum {
103 BINDER_DEBUG_USER_ERROR = 1U << 0,
104 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109 BINDER_DEBUG_READ_WRITE = 1U << 6,
110 BINDER_DEBUG_USER_REFS = 1U << 7,
111 BINDER_DEBUG_THREADS = 1U << 8,
112 BINDER_DEBUG_TRANSACTION = 1U << 9,
113 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117 BINDER_DEBUG_SPINLOCKS = 1U << 14,
118};
119static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124module_param_named(devices, binder_devices_param, charp, 0444);
125
126static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127static int binder_stop_on_user_error;
128
129static int binder_set_stop_on_user_error(const char *val,
130 const struct kernel_param *kp)
131{
132 int ret;
133
134 ret = param_set_int(val, kp);
135 if (binder_stop_on_user_error < 2)
136 wake_up(&binder_user_error_wait);
137 return ret;
138}
139module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 param_get_int, &binder_stop_on_user_error, 0644);
141
142static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
143{
144 struct va_format vaf;
145 va_list args;
146
147 if (binder_debug_mask & mask) {
148 va_start(args, format);
149 vaf.va = &args;
150 vaf.fmt = format;
151 pr_info_ratelimited("%pV", &vaf);
152 va_end(args);
153 }
154}
155
156#define binder_txn_error(x...) \
157 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
158
159static __printf(1, 2) void binder_user_error(const char *format, ...)
160{
161 struct va_format vaf;
162 va_list args;
163
164 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
165 va_start(args, format);
166 vaf.va = &args;
167 vaf.fmt = format;
168 pr_info_ratelimited("%pV", &vaf);
169 va_end(args);
170 }
171
172 if (binder_stop_on_user_error)
173 binder_stop_on_user_error = 2;
174}
175
176#define binder_set_extended_error(ee, _id, _command, _param) \
177 do { \
178 (ee)->id = _id; \
179 (ee)->command = _command; \
180 (ee)->param = _param; \
181 } while (0)
182
183#define to_flat_binder_object(hdr) \
184 container_of(hdr, struct flat_binder_object, hdr)
185
186#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187
188#define to_binder_buffer_object(hdr) \
189 container_of(hdr, struct binder_buffer_object, hdr)
190
191#define to_binder_fd_array_object(hdr) \
192 container_of(hdr, struct binder_fd_array_object, hdr)
193
194static struct binder_stats binder_stats;
195
196static inline void binder_stats_deleted(enum binder_stat_types type)
197{
198 atomic_inc(&binder_stats.obj_deleted[type]);
199}
200
201static inline void binder_stats_created(enum binder_stat_types type)
202{
203 atomic_inc(&binder_stats.obj_created[type]);
204}
205
206struct binder_transaction_log_entry {
207 int debug_id;
208 int debug_id_done;
209 int call_type;
210 int from_proc;
211 int from_thread;
212 int target_handle;
213 int to_proc;
214 int to_thread;
215 int to_node;
216 int data_size;
217 int offsets_size;
218 int return_error_line;
219 uint32_t return_error;
220 uint32_t return_error_param;
221 char context_name[BINDERFS_MAX_NAME + 1];
222};
223
224struct binder_transaction_log {
225 atomic_t cur;
226 bool full;
227 struct binder_transaction_log_entry entry[32];
228};
229
230static struct binder_transaction_log binder_transaction_log;
231static struct binder_transaction_log binder_transaction_log_failed;
232
233static struct binder_transaction_log_entry *binder_transaction_log_add(
234 struct binder_transaction_log *log)
235{
236 struct binder_transaction_log_entry *e;
237 unsigned int cur = atomic_inc_return(&log->cur);
238
239 if (cur >= ARRAY_SIZE(log->entry))
240 log->full = true;
241 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242 WRITE_ONCE(e->debug_id_done, 0);
243 /*
244 * write-barrier to synchronize access to e->debug_id_done.
245 * We make sure the initialized 0 value is seen before
246 * memset() other fields are zeroed by memset.
247 */
248 smp_wmb();
249 memset(e, 0, sizeof(*e));
250 return e;
251}
252
253enum binder_deferred_state {
254 BINDER_DEFERRED_FLUSH = 0x01,
255 BINDER_DEFERRED_RELEASE = 0x02,
256};
257
258enum {
259 BINDER_LOOPER_STATE_REGISTERED = 0x01,
260 BINDER_LOOPER_STATE_ENTERED = 0x02,
261 BINDER_LOOPER_STATE_EXITED = 0x04,
262 BINDER_LOOPER_STATE_INVALID = 0x08,
263 BINDER_LOOPER_STATE_WAITING = 0x10,
264 BINDER_LOOPER_STATE_POLL = 0x20,
265};
266
267/**
268 * binder_proc_lock() - Acquire outer lock for given binder_proc
269 * @proc: struct binder_proc to acquire
270 *
271 * Acquires proc->outer_lock. Used to protect binder_ref
272 * structures associated with the given proc.
273 */
274#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
275static void
276_binder_proc_lock(struct binder_proc *proc, int line)
277 __acquires(&proc->outer_lock)
278{
279 binder_debug(BINDER_DEBUG_SPINLOCKS,
280 "%s: line=%d\n", __func__, line);
281 spin_lock(&proc->outer_lock);
282}
283
284/**
285 * binder_proc_unlock() - Release outer lock for given binder_proc
286 * @proc: struct binder_proc to acquire
287 *
288 * Release lock acquired via binder_proc_lock()
289 */
290#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
291static void
292_binder_proc_unlock(struct binder_proc *proc, int line)
293 __releases(&proc->outer_lock)
294{
295 binder_debug(BINDER_DEBUG_SPINLOCKS,
296 "%s: line=%d\n", __func__, line);
297 spin_unlock(&proc->outer_lock);
298}
299
300/**
301 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
302 * @proc: struct binder_proc to acquire
303 *
304 * Acquires proc->inner_lock. Used to protect todo lists
305 */
306#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
307static void
308_binder_inner_proc_lock(struct binder_proc *proc, int line)
309 __acquires(&proc->inner_lock)
310{
311 binder_debug(BINDER_DEBUG_SPINLOCKS,
312 "%s: line=%d\n", __func__, line);
313 spin_lock(&proc->inner_lock);
314}
315
316/**
317 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
318 * @proc: struct binder_proc to acquire
319 *
320 * Release lock acquired via binder_inner_proc_lock()
321 */
322#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
323static void
324_binder_inner_proc_unlock(struct binder_proc *proc, int line)
325 __releases(&proc->inner_lock)
326{
327 binder_debug(BINDER_DEBUG_SPINLOCKS,
328 "%s: line=%d\n", __func__, line);
329 spin_unlock(&proc->inner_lock);
330}
331
332/**
333 * binder_node_lock() - Acquire spinlock for given binder_node
334 * @node: struct binder_node to acquire
335 *
336 * Acquires node->lock. Used to protect binder_node fields
337 */
338#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
339static void
340_binder_node_lock(struct binder_node *node, int line)
341 __acquires(&node->lock)
342{
343 binder_debug(BINDER_DEBUG_SPINLOCKS,
344 "%s: line=%d\n", __func__, line);
345 spin_lock(&node->lock);
346}
347
348/**
349 * binder_node_unlock() - Release spinlock for given binder_proc
350 * @node: struct binder_node to acquire
351 *
352 * Release lock acquired via binder_node_lock()
353 */
354#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
355static void
356_binder_node_unlock(struct binder_node *node, int line)
357 __releases(&node->lock)
358{
359 binder_debug(BINDER_DEBUG_SPINLOCKS,
360 "%s: line=%d\n", __func__, line);
361 spin_unlock(&node->lock);
362}
363
364/**
365 * binder_node_inner_lock() - Acquire node and inner locks
366 * @node: struct binder_node to acquire
367 *
368 * Acquires node->lock. If node->proc also acquires
369 * proc->inner_lock. Used to protect binder_node fields
370 */
371#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
372static void
373_binder_node_inner_lock(struct binder_node *node, int line)
374 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
375{
376 binder_debug(BINDER_DEBUG_SPINLOCKS,
377 "%s: line=%d\n", __func__, line);
378 spin_lock(&node->lock);
379 if (node->proc)
380 binder_inner_proc_lock(node->proc);
381 else
382 /* annotation for sparse */
383 __acquire(&node->proc->inner_lock);
384}
385
386/**
387 * binder_node_inner_unlock() - Release node and inner locks
388 * @node: struct binder_node to acquire
389 *
390 * Release lock acquired via binder_node_lock()
391 */
392#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
393static void
394_binder_node_inner_unlock(struct binder_node *node, int line)
395 __releases(&node->lock) __releases(&node->proc->inner_lock)
396{
397 struct binder_proc *proc = node->proc;
398
399 binder_debug(BINDER_DEBUG_SPINLOCKS,
400 "%s: line=%d\n", __func__, line);
401 if (proc)
402 binder_inner_proc_unlock(proc);
403 else
404 /* annotation for sparse */
405 __release(&node->proc->inner_lock);
406 spin_unlock(&node->lock);
407}
408
409static bool binder_worklist_empty_ilocked(struct list_head *list)
410{
411 return list_empty(list);
412}
413
414/**
415 * binder_worklist_empty() - Check if no items on the work list
416 * @proc: binder_proc associated with list
417 * @list: list to check
418 *
419 * Return: true if there are no items on list, else false
420 */
421static bool binder_worklist_empty(struct binder_proc *proc,
422 struct list_head *list)
423{
424 bool ret;
425
426 binder_inner_proc_lock(proc);
427 ret = binder_worklist_empty_ilocked(list);
428 binder_inner_proc_unlock(proc);
429 return ret;
430}
431
432/**
433 * binder_enqueue_work_ilocked() - Add an item to the work list
434 * @work: struct binder_work to add to list
435 * @target_list: list to add work to
436 *
437 * Adds the work to the specified list. Asserts that work
438 * is not already on a list.
439 *
440 * Requires the proc->inner_lock to be held.
441 */
442static void
443binder_enqueue_work_ilocked(struct binder_work *work,
444 struct list_head *target_list)
445{
446 BUG_ON(target_list == NULL);
447 BUG_ON(work->entry.next && !list_empty(&work->entry));
448 list_add_tail(&work->entry, target_list);
449}
450
451/**
452 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
453 * @thread: thread to queue work to
454 * @work: struct binder_work to add to list
455 *
456 * Adds the work to the todo list of the thread. Doesn't set the process_todo
457 * flag, which means that (if it wasn't already set) the thread will go to
458 * sleep without handling this work when it calls read.
459 *
460 * Requires the proc->inner_lock to be held.
461 */
462static void
463binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
464 struct binder_work *work)
465{
466 WARN_ON(!list_empty(&thread->waiting_thread_node));
467 binder_enqueue_work_ilocked(work, &thread->todo);
468}
469
470/**
471 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
472 * @thread: thread to queue work to
473 * @work: struct binder_work to add to list
474 *
475 * Adds the work to the todo list of the thread, and enables processing
476 * of the todo queue.
477 *
478 * Requires the proc->inner_lock to be held.
479 */
480static void
481binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
482 struct binder_work *work)
483{
484 WARN_ON(!list_empty(&thread->waiting_thread_node));
485 binder_enqueue_work_ilocked(work, &thread->todo);
486
487 /* (e)poll-based threads require an explicit wakeup signal when
488 * queuing their own work; they rely on these events to consume
489 * messages without I/O block. Without it, threads risk waiting
490 * indefinitely without handling the work.
491 */
492 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
493 thread->pid == current->pid && !thread->process_todo)
494 wake_up_interruptible_sync(&thread->wait);
495
496 thread->process_todo = true;
497}
498
499/**
500 * binder_enqueue_thread_work() - Add an item to the thread work list
501 * @thread: thread to queue work to
502 * @work: struct binder_work to add to list
503 *
504 * Adds the work to the todo list of the thread, and enables processing
505 * of the todo queue.
506 */
507static void
508binder_enqueue_thread_work(struct binder_thread *thread,
509 struct binder_work *work)
510{
511 binder_inner_proc_lock(thread->proc);
512 binder_enqueue_thread_work_ilocked(thread, work);
513 binder_inner_proc_unlock(thread->proc);
514}
515
516static void
517binder_dequeue_work_ilocked(struct binder_work *work)
518{
519 list_del_init(&work->entry);
520}
521
522/**
523 * binder_dequeue_work() - Removes an item from the work list
524 * @proc: binder_proc associated with list
525 * @work: struct binder_work to remove from list
526 *
527 * Removes the specified work item from whatever list it is on.
528 * Can safely be called if work is not on any list.
529 */
530static void
531binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
532{
533 binder_inner_proc_lock(proc);
534 binder_dequeue_work_ilocked(work);
535 binder_inner_proc_unlock(proc);
536}
537
538static struct binder_work *binder_dequeue_work_head_ilocked(
539 struct list_head *list)
540{
541 struct binder_work *w;
542
543 w = list_first_entry_or_null(list, struct binder_work, entry);
544 if (w)
545 list_del_init(&w->entry);
546 return w;
547}
548
549static void
550binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
551static void binder_free_thread(struct binder_thread *thread);
552static void binder_free_proc(struct binder_proc *proc);
553static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
554
555static bool binder_has_work_ilocked(struct binder_thread *thread,
556 bool do_proc_work)
557{
558 return thread->process_todo ||
559 thread->looper_need_return ||
560 (do_proc_work &&
561 !binder_worklist_empty_ilocked(&thread->proc->todo));
562}
563
564static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565{
566 bool has_work;
567
568 binder_inner_proc_lock(thread->proc);
569 has_work = binder_has_work_ilocked(thread, do_proc_work);
570 binder_inner_proc_unlock(thread->proc);
571
572 return has_work;
573}
574
575static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576{
577 return !thread->transaction_stack &&
578 binder_worklist_empty_ilocked(&thread->todo);
579}
580
581static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582 bool sync)
583{
584 struct rb_node *n;
585 struct binder_thread *thread;
586
587 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588 thread = rb_entry(n, struct binder_thread, rb_node);
589 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590 binder_available_for_proc_work_ilocked(thread)) {
591 if (sync)
592 wake_up_interruptible_sync(&thread->wait);
593 else
594 wake_up_interruptible(&thread->wait);
595 }
596 }
597}
598
599/**
600 * binder_select_thread_ilocked() - selects a thread for doing proc work.
601 * @proc: process to select a thread from
602 *
603 * Note that calling this function moves the thread off the waiting_threads
604 * list, so it can only be woken up by the caller of this function, or a
605 * signal. Therefore, callers *should* always wake up the thread this function
606 * returns.
607 *
608 * Return: If there's a thread currently waiting for process work,
609 * returns that thread. Otherwise returns NULL.
610 */
611static struct binder_thread *
612binder_select_thread_ilocked(struct binder_proc *proc)
613{
614 struct binder_thread *thread;
615
616 assert_spin_locked(&proc->inner_lock);
617 thread = list_first_entry_or_null(&proc->waiting_threads,
618 struct binder_thread,
619 waiting_thread_node);
620
621 if (thread)
622 list_del_init(&thread->waiting_thread_node);
623
624 return thread;
625}
626
627/**
628 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629 * @proc: process to wake up a thread in
630 * @thread: specific thread to wake-up (may be NULL)
631 * @sync: whether to do a synchronous wake-up
632 *
633 * This function wakes up a thread in the @proc process.
634 * The caller may provide a specific thread to wake-up in
635 * the @thread parameter. If @thread is NULL, this function
636 * will wake up threads that have called poll().
637 *
638 * Note that for this function to work as expected, callers
639 * should first call binder_select_thread() to find a thread
640 * to handle the work (if they don't have a thread already),
641 * and pass the result into the @thread parameter.
642 */
643static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644 struct binder_thread *thread,
645 bool sync)
646{
647 assert_spin_locked(&proc->inner_lock);
648
649 if (thread) {
650 if (sync)
651 wake_up_interruptible_sync(&thread->wait);
652 else
653 wake_up_interruptible(&thread->wait);
654 return;
655 }
656
657 /* Didn't find a thread waiting for proc work; this can happen
658 * in two scenarios:
659 * 1. All threads are busy handling transactions
660 * In that case, one of those threads should call back into
661 * the kernel driver soon and pick up this work.
662 * 2. Threads are using the (e)poll interface, in which case
663 * they may be blocked on the waitqueue without having been
664 * added to waiting_threads. For this case, we just iterate
665 * over all threads not handling transaction work, and
666 * wake them all up. We wake all because we don't know whether
667 * a thread that called into (e)poll is handling non-binder
668 * work currently.
669 */
670 binder_wakeup_poll_threads_ilocked(proc, sync);
671}
672
673static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674{
675 struct binder_thread *thread = binder_select_thread_ilocked(proc);
676
677 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678}
679
680static void binder_set_nice(long nice)
681{
682 long min_nice;
683
684 if (can_nice(current, nice)) {
685 set_user_nice(current, nice);
686 return;
687 }
688 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
689 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
690 "%d: nice value %ld not allowed use %ld instead\n",
691 current->pid, nice, min_nice);
692 set_user_nice(current, min_nice);
693 if (min_nice <= MAX_NICE)
694 return;
695 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
696}
697
698static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
699 binder_uintptr_t ptr)
700{
701 struct rb_node *n = proc->nodes.rb_node;
702 struct binder_node *node;
703
704 assert_spin_locked(&proc->inner_lock);
705
706 while (n) {
707 node = rb_entry(n, struct binder_node, rb_node);
708
709 if (ptr < node->ptr)
710 n = n->rb_left;
711 else if (ptr > node->ptr)
712 n = n->rb_right;
713 else {
714 /*
715 * take an implicit weak reference
716 * to ensure node stays alive until
717 * call to binder_put_node()
718 */
719 binder_inc_node_tmpref_ilocked(node);
720 return node;
721 }
722 }
723 return NULL;
724}
725
726static struct binder_node *binder_get_node(struct binder_proc *proc,
727 binder_uintptr_t ptr)
728{
729 struct binder_node *node;
730
731 binder_inner_proc_lock(proc);
732 node = binder_get_node_ilocked(proc, ptr);
733 binder_inner_proc_unlock(proc);
734 return node;
735}
736
737static struct binder_node *binder_init_node_ilocked(
738 struct binder_proc *proc,
739 struct binder_node *new_node,
740 struct flat_binder_object *fp)
741{
742 struct rb_node **p = &proc->nodes.rb_node;
743 struct rb_node *parent = NULL;
744 struct binder_node *node;
745 binder_uintptr_t ptr = fp ? fp->binder : 0;
746 binder_uintptr_t cookie = fp ? fp->cookie : 0;
747 __u32 flags = fp ? fp->flags : 0;
748
749 assert_spin_locked(&proc->inner_lock);
750
751 while (*p) {
752
753 parent = *p;
754 node = rb_entry(parent, struct binder_node, rb_node);
755
756 if (ptr < node->ptr)
757 p = &(*p)->rb_left;
758 else if (ptr > node->ptr)
759 p = &(*p)->rb_right;
760 else {
761 /*
762 * A matching node is already in
763 * the rb tree. Abandon the init
764 * and return it.
765 */
766 binder_inc_node_tmpref_ilocked(node);
767 return node;
768 }
769 }
770 node = new_node;
771 binder_stats_created(BINDER_STAT_NODE);
772 node->tmp_refs++;
773 rb_link_node(&node->rb_node, parent, p);
774 rb_insert_color(&node->rb_node, &proc->nodes);
775 node->debug_id = atomic_inc_return(&binder_last_id);
776 node->proc = proc;
777 node->ptr = ptr;
778 node->cookie = cookie;
779 node->work.type = BINDER_WORK_NODE;
780 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
781 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
782 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
783 spin_lock_init(&node->lock);
784 INIT_LIST_HEAD(&node->work.entry);
785 INIT_LIST_HEAD(&node->async_todo);
786 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
787 "%d:%d node %d u%016llx c%016llx created\n",
788 proc->pid, current->pid, node->debug_id,
789 (u64)node->ptr, (u64)node->cookie);
790
791 return node;
792}
793
794static struct binder_node *binder_new_node(struct binder_proc *proc,
795 struct flat_binder_object *fp)
796{
797 struct binder_node *node;
798 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
799
800 if (!new_node)
801 return NULL;
802 binder_inner_proc_lock(proc);
803 node = binder_init_node_ilocked(proc, new_node, fp);
804 binder_inner_proc_unlock(proc);
805 if (node != new_node)
806 /*
807 * The node was already added by another thread
808 */
809 kfree(new_node);
810
811 return node;
812}
813
814static void binder_free_node(struct binder_node *node)
815{
816 kfree(node);
817 binder_stats_deleted(BINDER_STAT_NODE);
818}
819
820static int binder_inc_node_nilocked(struct binder_node *node, int strong,
821 int internal,
822 struct list_head *target_list)
823{
824 struct binder_proc *proc = node->proc;
825
826 assert_spin_locked(&node->lock);
827 if (proc)
828 assert_spin_locked(&proc->inner_lock);
829 if (strong) {
830 if (internal) {
831 if (target_list == NULL &&
832 node->internal_strong_refs == 0 &&
833 !(node->proc &&
834 node == node->proc->context->binder_context_mgr_node &&
835 node->has_strong_ref)) {
836 pr_err("invalid inc strong node for %d\n",
837 node->debug_id);
838 return -EINVAL;
839 }
840 node->internal_strong_refs++;
841 } else
842 node->local_strong_refs++;
843 if (!node->has_strong_ref && target_list) {
844 struct binder_thread *thread = container_of(target_list,
845 struct binder_thread, todo);
846 binder_dequeue_work_ilocked(&node->work);
847 BUG_ON(&thread->todo != target_list);
848 binder_enqueue_deferred_thread_work_ilocked(thread,
849 &node->work);
850 }
851 } else {
852 if (!internal)
853 node->local_weak_refs++;
854 if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
855 binder_enqueue_work_ilocked(&node->work, target_list);
856 }
857 return 0;
858}
859
860static int binder_inc_node(struct binder_node *node, int strong, int internal,
861 struct list_head *target_list)
862{
863 int ret;
864
865 binder_node_inner_lock(node);
866 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
867 binder_node_inner_unlock(node);
868
869 return ret;
870}
871
872static bool binder_dec_node_nilocked(struct binder_node *node,
873 int strong, int internal)
874{
875 struct binder_proc *proc = node->proc;
876
877 assert_spin_locked(&node->lock);
878 if (proc)
879 assert_spin_locked(&proc->inner_lock);
880 if (strong) {
881 if (internal)
882 node->internal_strong_refs--;
883 else
884 node->local_strong_refs--;
885 if (node->local_strong_refs || node->internal_strong_refs)
886 return false;
887 } else {
888 if (!internal)
889 node->local_weak_refs--;
890 if (node->local_weak_refs || node->tmp_refs ||
891 !hlist_empty(&node->refs))
892 return false;
893 }
894
895 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
896 if (list_empty(&node->work.entry)) {
897 binder_enqueue_work_ilocked(&node->work, &proc->todo);
898 binder_wakeup_proc_ilocked(proc);
899 }
900 } else {
901 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
902 !node->local_weak_refs && !node->tmp_refs) {
903 if (proc) {
904 binder_dequeue_work_ilocked(&node->work);
905 rb_erase(&node->rb_node, &proc->nodes);
906 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
907 "refless node %d deleted\n",
908 node->debug_id);
909 } else {
910 BUG_ON(!list_empty(&node->work.entry));
911 spin_lock(&binder_dead_nodes_lock);
912 /*
913 * tmp_refs could have changed so
914 * check it again
915 */
916 if (node->tmp_refs) {
917 spin_unlock(&binder_dead_nodes_lock);
918 return false;
919 }
920 hlist_del(&node->dead_node);
921 spin_unlock(&binder_dead_nodes_lock);
922 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
923 "dead node %d deleted\n",
924 node->debug_id);
925 }
926 return true;
927 }
928 }
929 return false;
930}
931
932static void binder_dec_node(struct binder_node *node, int strong, int internal)
933{
934 bool free_node;
935
936 binder_node_inner_lock(node);
937 free_node = binder_dec_node_nilocked(node, strong, internal);
938 binder_node_inner_unlock(node);
939 if (free_node)
940 binder_free_node(node);
941}
942
943static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
944{
945 /*
946 * No call to binder_inc_node() is needed since we
947 * don't need to inform userspace of any changes to
948 * tmp_refs
949 */
950 node->tmp_refs++;
951}
952
953/**
954 * binder_inc_node_tmpref() - take a temporary reference on node
955 * @node: node to reference
956 *
957 * Take reference on node to prevent the node from being freed
958 * while referenced only by a local variable. The inner lock is
959 * needed to serialize with the node work on the queue (which
960 * isn't needed after the node is dead). If the node is dead
961 * (node->proc is NULL), use binder_dead_nodes_lock to protect
962 * node->tmp_refs against dead-node-only cases where the node
963 * lock cannot be acquired (eg traversing the dead node list to
964 * print nodes)
965 */
966static void binder_inc_node_tmpref(struct binder_node *node)
967{
968 binder_node_lock(node);
969 if (node->proc)
970 binder_inner_proc_lock(node->proc);
971 else
972 spin_lock(&binder_dead_nodes_lock);
973 binder_inc_node_tmpref_ilocked(node);
974 if (node->proc)
975 binder_inner_proc_unlock(node->proc);
976 else
977 spin_unlock(&binder_dead_nodes_lock);
978 binder_node_unlock(node);
979}
980
981/**
982 * binder_dec_node_tmpref() - remove a temporary reference on node
983 * @node: node to reference
984 *
985 * Release temporary reference on node taken via binder_inc_node_tmpref()
986 */
987static void binder_dec_node_tmpref(struct binder_node *node)
988{
989 bool free_node;
990
991 binder_node_inner_lock(node);
992 if (!node->proc)
993 spin_lock(&binder_dead_nodes_lock);
994 else
995 __acquire(&binder_dead_nodes_lock);
996 node->tmp_refs--;
997 BUG_ON(node->tmp_refs < 0);
998 if (!node->proc)
999 spin_unlock(&binder_dead_nodes_lock);
1000 else
1001 __release(&binder_dead_nodes_lock);
1002 /*
1003 * Call binder_dec_node() to check if all refcounts are 0
1004 * and cleanup is needed. Calling with strong=0 and internal=1
1005 * causes no actual reference to be released in binder_dec_node().
1006 * If that changes, a change is needed here too.
1007 */
1008 free_node = binder_dec_node_nilocked(node, 0, 1);
1009 binder_node_inner_unlock(node);
1010 if (free_node)
1011 binder_free_node(node);
1012}
1013
1014static void binder_put_node(struct binder_node *node)
1015{
1016 binder_dec_node_tmpref(node);
1017}
1018
1019static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1020 u32 desc, bool need_strong_ref)
1021{
1022 struct rb_node *n = proc->refs_by_desc.rb_node;
1023 struct binder_ref *ref;
1024
1025 while (n) {
1026 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1027
1028 if (desc < ref->data.desc) {
1029 n = n->rb_left;
1030 } else if (desc > ref->data.desc) {
1031 n = n->rb_right;
1032 } else if (need_strong_ref && !ref->data.strong) {
1033 binder_user_error("tried to use weak ref as strong ref\n");
1034 return NULL;
1035 } else {
1036 return ref;
1037 }
1038 }
1039 return NULL;
1040}
1041
1042/* Find the smallest unused descriptor the "slow way" */
1043static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1044{
1045 struct binder_ref *ref;
1046 struct rb_node *n;
1047 u32 desc;
1048
1049 desc = offset;
1050 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1051 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1052 if (ref->data.desc > desc)
1053 break;
1054 desc = ref->data.desc + 1;
1055 }
1056
1057 return desc;
1058}
1059
1060/*
1061 * Find an available reference descriptor ID. The proc->outer_lock might
1062 * be released in the process, in which case -EAGAIN is returned and the
1063 * @desc should be considered invalid.
1064 */
1065static int get_ref_desc_olocked(struct binder_proc *proc,
1066 struct binder_node *node,
1067 u32 *desc)
1068{
1069 struct dbitmap *dmap = &proc->dmap;
1070 unsigned int nbits, offset;
1071 unsigned long *new, bit;
1072
1073 /* 0 is reserved for the context manager */
1074 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1075
1076 if (!dbitmap_enabled(dmap)) {
1077 *desc = slow_desc_lookup_olocked(proc, offset);
1078 return 0;
1079 }
1080
1081 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1082 *desc = bit;
1083 return 0;
1084 }
1085
1086 /*
1087 * The dbitmap is full and needs to grow. The proc->outer_lock
1088 * is briefly released to allocate the new bitmap safely.
1089 */
1090 nbits = dbitmap_grow_nbits(dmap);
1091 binder_proc_unlock(proc);
1092 new = bitmap_zalloc(nbits, GFP_KERNEL);
1093 binder_proc_lock(proc);
1094 dbitmap_grow(dmap, new, nbits);
1095
1096 return -EAGAIN;
1097}
1098
1099/**
1100 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1101 * @proc: binder_proc that owns the ref
1102 * @node: binder_node of target
1103 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1104 *
1105 * Look up the ref for the given node and return it if it exists
1106 *
1107 * If it doesn't exist and the caller provides a newly allocated
1108 * ref, initialize the fields of the newly allocated ref and insert
1109 * into the given proc rb_trees and node refs list.
1110 *
1111 * Return: the ref for node. It is possible that another thread
1112 * allocated/initialized the ref first in which case the
1113 * returned ref would be different than the passed-in
1114 * new_ref. new_ref must be kfree'd by the caller in
1115 * this case.
1116 */
1117static struct binder_ref *binder_get_ref_for_node_olocked(
1118 struct binder_proc *proc,
1119 struct binder_node *node,
1120 struct binder_ref *new_ref)
1121{
1122 struct binder_ref *ref;
1123 struct rb_node *parent;
1124 struct rb_node **p;
1125 u32 desc;
1126
1127retry:
1128 p = &proc->refs_by_node.rb_node;
1129 parent = NULL;
1130 while (*p) {
1131 parent = *p;
1132 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1133
1134 if (node < ref->node)
1135 p = &(*p)->rb_left;
1136 else if (node > ref->node)
1137 p = &(*p)->rb_right;
1138 else
1139 return ref;
1140 }
1141 if (!new_ref)
1142 return NULL;
1143
1144 /* might release the proc->outer_lock */
1145 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1146 goto retry;
1147
1148 binder_stats_created(BINDER_STAT_REF);
1149 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1150 new_ref->proc = proc;
1151 new_ref->node = node;
1152 rb_link_node(&new_ref->rb_node_node, parent, p);
1153 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1154
1155 new_ref->data.desc = desc;
1156 p = &proc->refs_by_desc.rb_node;
1157 while (*p) {
1158 parent = *p;
1159 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1160
1161 if (new_ref->data.desc < ref->data.desc)
1162 p = &(*p)->rb_left;
1163 else if (new_ref->data.desc > ref->data.desc)
1164 p = &(*p)->rb_right;
1165 else
1166 BUG();
1167 }
1168 rb_link_node(&new_ref->rb_node_desc, parent, p);
1169 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1170
1171 binder_node_lock(node);
1172 hlist_add_head(&new_ref->node_entry, &node->refs);
1173
1174 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 "%d new ref %d desc %d for node %d\n",
1176 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1177 node->debug_id);
1178 binder_node_unlock(node);
1179 return new_ref;
1180}
1181
1182static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1183{
1184 struct dbitmap *dmap = &ref->proc->dmap;
1185 bool delete_node = false;
1186
1187 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1188 "%d delete ref %d desc %d for node %d\n",
1189 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1190 ref->node->debug_id);
1191
1192 if (dbitmap_enabled(dmap))
1193 dbitmap_clear_bit(dmap, ref->data.desc);
1194 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1195 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1196
1197 binder_node_inner_lock(ref->node);
1198 if (ref->data.strong)
1199 binder_dec_node_nilocked(ref->node, 1, 1);
1200
1201 hlist_del(&ref->node_entry);
1202 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1203 binder_node_inner_unlock(ref->node);
1204 /*
1205 * Clear ref->node unless we want the caller to free the node
1206 */
1207 if (!delete_node) {
1208 /*
1209 * The caller uses ref->node to determine
1210 * whether the node needs to be freed. Clear
1211 * it since the node is still alive.
1212 */
1213 ref->node = NULL;
1214 }
1215
1216 if (ref->death) {
1217 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1218 "%d delete ref %d desc %d has death notification\n",
1219 ref->proc->pid, ref->data.debug_id,
1220 ref->data.desc);
1221 binder_dequeue_work(ref->proc, &ref->death->work);
1222 binder_stats_deleted(BINDER_STAT_DEATH);
1223 }
1224
1225 if (ref->freeze) {
1226 binder_dequeue_work(ref->proc, &ref->freeze->work);
1227 binder_stats_deleted(BINDER_STAT_FREEZE);
1228 }
1229
1230 binder_stats_deleted(BINDER_STAT_REF);
1231}
1232
1233/**
1234 * binder_inc_ref_olocked() - increment the ref for given handle
1235 * @ref: ref to be incremented
1236 * @strong: if true, strong increment, else weak
1237 * @target_list: list to queue node work on
1238 *
1239 * Increment the ref. @ref->proc->outer_lock must be held on entry
1240 *
1241 * Return: 0, if successful, else errno
1242 */
1243static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1244 struct list_head *target_list)
1245{
1246 int ret;
1247
1248 if (strong) {
1249 if (ref->data.strong == 0) {
1250 ret = binder_inc_node(ref->node, 1, 1, target_list);
1251 if (ret)
1252 return ret;
1253 }
1254 ref->data.strong++;
1255 } else {
1256 if (ref->data.weak == 0) {
1257 ret = binder_inc_node(ref->node, 0, 1, target_list);
1258 if (ret)
1259 return ret;
1260 }
1261 ref->data.weak++;
1262 }
1263 return 0;
1264}
1265
1266/**
1267 * binder_dec_ref_olocked() - dec the ref for given handle
1268 * @ref: ref to be decremented
1269 * @strong: if true, strong decrement, else weak
1270 *
1271 * Decrement the ref.
1272 *
1273 * Return: %true if ref is cleaned up and ready to be freed.
1274 */
1275static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1276{
1277 if (strong) {
1278 if (ref->data.strong == 0) {
1279 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1280 ref->proc->pid, ref->data.debug_id,
1281 ref->data.desc, ref->data.strong,
1282 ref->data.weak);
1283 return false;
1284 }
1285 ref->data.strong--;
1286 if (ref->data.strong == 0)
1287 binder_dec_node(ref->node, strong, 1);
1288 } else {
1289 if (ref->data.weak == 0) {
1290 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1291 ref->proc->pid, ref->data.debug_id,
1292 ref->data.desc, ref->data.strong,
1293 ref->data.weak);
1294 return false;
1295 }
1296 ref->data.weak--;
1297 }
1298 if (ref->data.strong == 0 && ref->data.weak == 0) {
1299 binder_cleanup_ref_olocked(ref);
1300 return true;
1301 }
1302 return false;
1303}
1304
1305/**
1306 * binder_get_node_from_ref() - get the node from the given proc/desc
1307 * @proc: proc containing the ref
1308 * @desc: the handle associated with the ref
1309 * @need_strong_ref: if true, only return node if ref is strong
1310 * @rdata: the id/refcount data for the ref
1311 *
1312 * Given a proc and ref handle, return the associated binder_node
1313 *
1314 * Return: a binder_node or NULL if not found or not strong when strong required
1315 */
1316static struct binder_node *binder_get_node_from_ref(
1317 struct binder_proc *proc,
1318 u32 desc, bool need_strong_ref,
1319 struct binder_ref_data *rdata)
1320{
1321 struct binder_node *node;
1322 struct binder_ref *ref;
1323
1324 binder_proc_lock(proc);
1325 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1326 if (!ref)
1327 goto err_no_ref;
1328 node = ref->node;
1329 /*
1330 * Take an implicit reference on the node to ensure
1331 * it stays alive until the call to binder_put_node()
1332 */
1333 binder_inc_node_tmpref(node);
1334 if (rdata)
1335 *rdata = ref->data;
1336 binder_proc_unlock(proc);
1337
1338 return node;
1339
1340err_no_ref:
1341 binder_proc_unlock(proc);
1342 return NULL;
1343}
1344
1345/**
1346 * binder_free_ref() - free the binder_ref
1347 * @ref: ref to free
1348 *
1349 * Free the binder_ref. Free the binder_node indicated by ref->node
1350 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1351 */
1352static void binder_free_ref(struct binder_ref *ref)
1353{
1354 if (ref->node)
1355 binder_free_node(ref->node);
1356 kfree(ref->death);
1357 kfree(ref->freeze);
1358 kfree(ref);
1359}
1360
1361/* shrink descriptor bitmap if needed */
1362static void try_shrink_dmap(struct binder_proc *proc)
1363{
1364 unsigned long *new;
1365 int nbits;
1366
1367 binder_proc_lock(proc);
1368 nbits = dbitmap_shrink_nbits(&proc->dmap);
1369 binder_proc_unlock(proc);
1370
1371 if (!nbits)
1372 return;
1373
1374 new = bitmap_zalloc(nbits, GFP_KERNEL);
1375 binder_proc_lock(proc);
1376 dbitmap_shrink(&proc->dmap, new, nbits);
1377 binder_proc_unlock(proc);
1378}
1379
1380/**
1381 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1382 * @proc: proc containing the ref
1383 * @desc: the handle associated with the ref
1384 * @increment: true=inc reference, false=dec reference
1385 * @strong: true=strong reference, false=weak reference
1386 * @rdata: the id/refcount data for the ref
1387 *
1388 * Given a proc and ref handle, increment or decrement the ref
1389 * according to "increment" arg.
1390 *
1391 * Return: 0 if successful, else errno
1392 */
1393static int binder_update_ref_for_handle(struct binder_proc *proc,
1394 uint32_t desc, bool increment, bool strong,
1395 struct binder_ref_data *rdata)
1396{
1397 int ret = 0;
1398 struct binder_ref *ref;
1399 bool delete_ref = false;
1400
1401 binder_proc_lock(proc);
1402 ref = binder_get_ref_olocked(proc, desc, strong);
1403 if (!ref) {
1404 ret = -EINVAL;
1405 goto err_no_ref;
1406 }
1407 if (increment)
1408 ret = binder_inc_ref_olocked(ref, strong, NULL);
1409 else
1410 delete_ref = binder_dec_ref_olocked(ref, strong);
1411
1412 if (rdata)
1413 *rdata = ref->data;
1414 binder_proc_unlock(proc);
1415
1416 if (delete_ref) {
1417 binder_free_ref(ref);
1418 try_shrink_dmap(proc);
1419 }
1420 return ret;
1421
1422err_no_ref:
1423 binder_proc_unlock(proc);
1424 return ret;
1425}
1426
1427/**
1428 * binder_dec_ref_for_handle() - dec the ref for given handle
1429 * @proc: proc containing the ref
1430 * @desc: the handle associated with the ref
1431 * @strong: true=strong reference, false=weak reference
1432 * @rdata: the id/refcount data for the ref
1433 *
1434 * Just calls binder_update_ref_for_handle() to decrement the ref.
1435 *
1436 * Return: 0 if successful, else errno
1437 */
1438static int binder_dec_ref_for_handle(struct binder_proc *proc,
1439 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1440{
1441 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1442}
1443
1444
1445/**
1446 * binder_inc_ref_for_node() - increment the ref for given proc/node
1447 * @proc: proc containing the ref
1448 * @node: target node
1449 * @strong: true=strong reference, false=weak reference
1450 * @target_list: worklist to use if node is incremented
1451 * @rdata: the id/refcount data for the ref
1452 *
1453 * Given a proc and node, increment the ref. Create the ref if it
1454 * doesn't already exist
1455 *
1456 * Return: 0 if successful, else errno
1457 */
1458static int binder_inc_ref_for_node(struct binder_proc *proc,
1459 struct binder_node *node,
1460 bool strong,
1461 struct list_head *target_list,
1462 struct binder_ref_data *rdata)
1463{
1464 struct binder_ref *ref;
1465 struct binder_ref *new_ref = NULL;
1466 int ret = 0;
1467
1468 binder_proc_lock(proc);
1469 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1470 if (!ref) {
1471 binder_proc_unlock(proc);
1472 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1473 if (!new_ref)
1474 return -ENOMEM;
1475 binder_proc_lock(proc);
1476 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1477 }
1478 ret = binder_inc_ref_olocked(ref, strong, target_list);
1479 *rdata = ref->data;
1480 if (ret && ref == new_ref) {
1481 /*
1482 * Cleanup the failed reference here as the target
1483 * could now be dead and have already released its
1484 * references by now. Calling on the new reference
1485 * with strong=0 and a tmp_refs will not decrement
1486 * the node. The new_ref gets kfree'd below.
1487 */
1488 binder_cleanup_ref_olocked(new_ref);
1489 ref = NULL;
1490 }
1491
1492 binder_proc_unlock(proc);
1493 if (new_ref && ref != new_ref)
1494 /*
1495 * Another thread created the ref first so
1496 * free the one we allocated
1497 */
1498 kfree(new_ref);
1499 return ret;
1500}
1501
1502static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1503 struct binder_transaction *t)
1504{
1505 BUG_ON(!target_thread);
1506 assert_spin_locked(&target_thread->proc->inner_lock);
1507 BUG_ON(target_thread->transaction_stack != t);
1508 BUG_ON(target_thread->transaction_stack->from != target_thread);
1509 target_thread->transaction_stack =
1510 target_thread->transaction_stack->from_parent;
1511 t->from = NULL;
1512}
1513
1514/**
1515 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1516 * @thread: thread to decrement
1517 *
1518 * A thread needs to be kept alive while being used to create or
1519 * handle a transaction. binder_get_txn_from() is used to safely
1520 * extract t->from from a binder_transaction and keep the thread
1521 * indicated by t->from from being freed. When done with that
1522 * binder_thread, this function is called to decrement the
1523 * tmp_ref and free if appropriate (thread has been released
1524 * and no transaction being processed by the driver)
1525 */
1526static void binder_thread_dec_tmpref(struct binder_thread *thread)
1527{
1528 /*
1529 * atomic is used to protect the counter value while
1530 * it cannot reach zero or thread->is_dead is false
1531 */
1532 binder_inner_proc_lock(thread->proc);
1533 atomic_dec(&thread->tmp_ref);
1534 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1535 binder_inner_proc_unlock(thread->proc);
1536 binder_free_thread(thread);
1537 return;
1538 }
1539 binder_inner_proc_unlock(thread->proc);
1540}
1541
1542/**
1543 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1544 * @proc: proc to decrement
1545 *
1546 * A binder_proc needs to be kept alive while being used to create or
1547 * handle a transaction. proc->tmp_ref is incremented when
1548 * creating a new transaction or the binder_proc is currently in-use
1549 * by threads that are being released. When done with the binder_proc,
1550 * this function is called to decrement the counter and free the
1551 * proc if appropriate (proc has been released, all threads have
1552 * been released and not currently in-use to process a transaction).
1553 */
1554static void binder_proc_dec_tmpref(struct binder_proc *proc)
1555{
1556 binder_inner_proc_lock(proc);
1557 proc->tmp_ref--;
1558 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1559 !proc->tmp_ref) {
1560 binder_inner_proc_unlock(proc);
1561 binder_free_proc(proc);
1562 return;
1563 }
1564 binder_inner_proc_unlock(proc);
1565}
1566
1567/**
1568 * binder_get_txn_from() - safely extract the "from" thread in transaction
1569 * @t: binder transaction for t->from
1570 *
1571 * Atomically return the "from" thread and increment the tmp_ref
1572 * count for the thread to ensure it stays alive until
1573 * binder_thread_dec_tmpref() is called.
1574 *
1575 * Return: the value of t->from
1576 */
1577static struct binder_thread *binder_get_txn_from(
1578 struct binder_transaction *t)
1579{
1580 struct binder_thread *from;
1581
1582 guard(spinlock)(&t->lock);
1583 from = t->from;
1584 if (from)
1585 atomic_inc(&from->tmp_ref);
1586 return from;
1587}
1588
1589/**
1590 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1591 * @t: binder transaction for t->from
1592 *
1593 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1594 * to guarantee that the thread cannot be released while operating on it.
1595 * The caller must call binder_inner_proc_unlock() to release the inner lock
1596 * as well as call binder_dec_thread_txn() to release the reference.
1597 *
1598 * Return: the value of t->from
1599 */
1600static struct binder_thread *binder_get_txn_from_and_acq_inner(
1601 struct binder_transaction *t)
1602 __acquires(&t->from->proc->inner_lock)
1603{
1604 struct binder_thread *from;
1605
1606 from = binder_get_txn_from(t);
1607 if (!from) {
1608 __acquire(&from->proc->inner_lock);
1609 return NULL;
1610 }
1611 binder_inner_proc_lock(from->proc);
1612 if (t->from) {
1613 BUG_ON(from != t->from);
1614 return from;
1615 }
1616 binder_inner_proc_unlock(from->proc);
1617 __acquire(&from->proc->inner_lock);
1618 binder_thread_dec_tmpref(from);
1619 return NULL;
1620}
1621
1622/**
1623 * binder_free_txn_fixups() - free unprocessed fd fixups
1624 * @t: binder transaction for t->from
1625 *
1626 * If the transaction is being torn down prior to being
1627 * processed by the target process, free all of the
1628 * fd fixups and fput the file structs. It is safe to
1629 * call this function after the fixups have been
1630 * processed -- in that case, the list will be empty.
1631 */
1632static void binder_free_txn_fixups(struct binder_transaction *t)
1633{
1634 struct binder_txn_fd_fixup *fixup, *tmp;
1635
1636 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1637 fput(fixup->file);
1638 if (fixup->target_fd >= 0)
1639 put_unused_fd(fixup->target_fd);
1640 list_del(&fixup->fixup_entry);
1641 kfree(fixup);
1642 }
1643}
1644
1645static void binder_txn_latency_free(struct binder_transaction *t)
1646{
1647 int from_proc, from_thread, to_proc, to_thread;
1648
1649 spin_lock(&t->lock);
1650 from_proc = t->from ? t->from->proc->pid : 0;
1651 from_thread = t->from ? t->from->pid : 0;
1652 to_proc = t->to_proc ? t->to_proc->pid : 0;
1653 to_thread = t->to_thread ? t->to_thread->pid : 0;
1654 spin_unlock(&t->lock);
1655
1656 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1657}
1658
1659static void binder_free_transaction(struct binder_transaction *t)
1660{
1661 struct binder_proc *target_proc = t->to_proc;
1662
1663 if (target_proc) {
1664 binder_inner_proc_lock(target_proc);
1665 target_proc->outstanding_txns--;
1666 if (target_proc->outstanding_txns < 0)
1667 pr_warn("%s: Unexpected outstanding_txns %d\n",
1668 __func__, target_proc->outstanding_txns);
1669 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1670 wake_up_interruptible_all(&target_proc->freeze_wait);
1671 if (t->buffer)
1672 t->buffer->transaction = NULL;
1673 binder_inner_proc_unlock(target_proc);
1674 }
1675 if (trace_binder_txn_latency_free_enabled())
1676 binder_txn_latency_free(t);
1677 /*
1678 * If the transaction has no target_proc, then
1679 * t->buffer->transaction has already been cleared.
1680 */
1681 binder_free_txn_fixups(t);
1682 kfree(t);
1683 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1684}
1685
1686static void binder_send_failed_reply(struct binder_transaction *t,
1687 uint32_t error_code)
1688{
1689 struct binder_thread *target_thread;
1690 struct binder_transaction *next;
1691
1692 BUG_ON(t->flags & TF_ONE_WAY);
1693 while (1) {
1694 target_thread = binder_get_txn_from_and_acq_inner(t);
1695 if (target_thread) {
1696 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1697 "send failed reply for transaction %d to %d:%d\n",
1698 t->debug_id,
1699 target_thread->proc->pid,
1700 target_thread->pid);
1701
1702 binder_pop_transaction_ilocked(target_thread, t);
1703 if (target_thread->reply_error.cmd == BR_OK) {
1704 target_thread->reply_error.cmd = error_code;
1705 binder_enqueue_thread_work_ilocked(
1706 target_thread,
1707 &target_thread->reply_error.work);
1708 wake_up_interruptible(&target_thread->wait);
1709 } else {
1710 /*
1711 * Cannot get here for normal operation, but
1712 * we can if multiple synchronous transactions
1713 * are sent without blocking for responses.
1714 * Just ignore the 2nd error in this case.
1715 */
1716 pr_warn("Unexpected reply error: %u\n",
1717 target_thread->reply_error.cmd);
1718 }
1719 binder_inner_proc_unlock(target_thread->proc);
1720 binder_thread_dec_tmpref(target_thread);
1721 binder_free_transaction(t);
1722 return;
1723 }
1724 __release(&target_thread->proc->inner_lock);
1725 next = t->from_parent;
1726
1727 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1728 "send failed reply for transaction %d, target dead\n",
1729 t->debug_id);
1730
1731 binder_free_transaction(t);
1732 if (next == NULL) {
1733 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1734 "reply failed, no target thread at root\n");
1735 return;
1736 }
1737 t = next;
1738 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1739 "reply failed, no target thread -- retry %d\n",
1740 t->debug_id);
1741 }
1742}
1743
1744/**
1745 * binder_cleanup_transaction() - cleans up undelivered transaction
1746 * @t: transaction that needs to be cleaned up
1747 * @reason: reason the transaction wasn't delivered
1748 * @error_code: error to return to caller (if synchronous call)
1749 */
1750static void binder_cleanup_transaction(struct binder_transaction *t,
1751 const char *reason,
1752 uint32_t error_code)
1753{
1754 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1755 binder_send_failed_reply(t, error_code);
1756 } else {
1757 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1758 "undelivered transaction %d, %s\n",
1759 t->debug_id, reason);
1760 binder_free_transaction(t);
1761 }
1762}
1763
1764/**
1765 * binder_get_object() - gets object and checks for valid metadata
1766 * @proc: binder_proc owning the buffer
1767 * @u: sender's user pointer to base of buffer
1768 * @buffer: binder_buffer that we're parsing.
1769 * @offset: offset in the @buffer at which to validate an object.
1770 * @object: struct binder_object to read into
1771 *
1772 * Copy the binder object at the given offset into @object. If @u is
1773 * provided then the copy is from the sender's buffer. If not, then
1774 * it is copied from the target's @buffer.
1775 *
1776 * Return: If there's a valid metadata object at @offset, the
1777 * size of that object. Otherwise, it returns zero. The object
1778 * is read into the struct binder_object pointed to by @object.
1779 */
1780static size_t binder_get_object(struct binder_proc *proc,
1781 const void __user *u,
1782 struct binder_buffer *buffer,
1783 unsigned long offset,
1784 struct binder_object *object)
1785{
1786 size_t read_size;
1787 struct binder_object_header *hdr;
1788 size_t object_size = 0;
1789
1790 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1791 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1792 !IS_ALIGNED(offset, sizeof(u32)))
1793 return 0;
1794
1795 if (u) {
1796 if (copy_from_user(object, u + offset, read_size))
1797 return 0;
1798 } else {
1799 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1800 offset, read_size))
1801 return 0;
1802 }
1803
1804 /* Ok, now see if we read a complete object. */
1805 hdr = &object->hdr;
1806 switch (hdr->type) {
1807 case BINDER_TYPE_BINDER:
1808 case BINDER_TYPE_WEAK_BINDER:
1809 case BINDER_TYPE_HANDLE:
1810 case BINDER_TYPE_WEAK_HANDLE:
1811 object_size = sizeof(struct flat_binder_object);
1812 break;
1813 case BINDER_TYPE_FD:
1814 object_size = sizeof(struct binder_fd_object);
1815 break;
1816 case BINDER_TYPE_PTR:
1817 object_size = sizeof(struct binder_buffer_object);
1818 break;
1819 case BINDER_TYPE_FDA:
1820 object_size = sizeof(struct binder_fd_array_object);
1821 break;
1822 default:
1823 return 0;
1824 }
1825 if (offset <= buffer->data_size - object_size &&
1826 buffer->data_size >= object_size)
1827 return object_size;
1828 else
1829 return 0;
1830}
1831
1832/**
1833 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1834 * @proc: binder_proc owning the buffer
1835 * @b: binder_buffer containing the object
1836 * @object: struct binder_object to read into
1837 * @index: index in offset array at which the binder_buffer_object is
1838 * located
1839 * @start_offset: points to the start of the offset array
1840 * @object_offsetp: offset of @object read from @b
1841 * @num_valid: the number of valid offsets in the offset array
1842 *
1843 * Return: If @index is within the valid range of the offset array
1844 * described by @start and @num_valid, and if there's a valid
1845 * binder_buffer_object at the offset found in index @index
1846 * of the offset array, that object is returned. Otherwise,
1847 * %NULL is returned.
1848 * Note that the offset found in index @index itself is not
1849 * verified; this function assumes that @num_valid elements
1850 * from @start were previously verified to have valid offsets.
1851 * If @object_offsetp is non-NULL, then the offset within
1852 * @b is written to it.
1853 */
1854static struct binder_buffer_object *binder_validate_ptr(
1855 struct binder_proc *proc,
1856 struct binder_buffer *b,
1857 struct binder_object *object,
1858 binder_size_t index,
1859 binder_size_t start_offset,
1860 binder_size_t *object_offsetp,
1861 binder_size_t num_valid)
1862{
1863 size_t object_size;
1864 binder_size_t object_offset;
1865 unsigned long buffer_offset;
1866
1867 if (index >= num_valid)
1868 return NULL;
1869
1870 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1871 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1872 b, buffer_offset,
1873 sizeof(object_offset)))
1874 return NULL;
1875 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1876 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1877 return NULL;
1878 if (object_offsetp)
1879 *object_offsetp = object_offset;
1880
1881 return &object->bbo;
1882}
1883
1884/**
1885 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1886 * @proc: binder_proc owning the buffer
1887 * @b: transaction buffer
1888 * @objects_start_offset: offset to start of objects buffer
1889 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1890 * @fixup_offset: start offset in @buffer to fix up
1891 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1892 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1893 *
1894 * Return: %true if a fixup in buffer @buffer at offset @offset is
1895 * allowed.
1896 *
1897 * For safety reasons, we only allow fixups inside a buffer to happen
1898 * at increasing offsets; additionally, we only allow fixup on the last
1899 * buffer object that was verified, or one of its parents.
1900 *
1901 * Example of what is allowed:
1902 *
1903 * A
1904 * B (parent = A, offset = 0)
1905 * C (parent = A, offset = 16)
1906 * D (parent = C, offset = 0)
1907 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1908 *
1909 * Examples of what is not allowed:
1910 *
1911 * Decreasing offsets within the same parent:
1912 * A
1913 * C (parent = A, offset = 16)
1914 * B (parent = A, offset = 0) // decreasing offset within A
1915 *
1916 * Referring to a parent that wasn't the last object or any of its parents:
1917 * A
1918 * B (parent = A, offset = 0)
1919 * C (parent = A, offset = 0)
1920 * C (parent = A, offset = 16)
1921 * D (parent = B, offset = 0) // B is not A or any of A's parents
1922 */
1923static bool binder_validate_fixup(struct binder_proc *proc,
1924 struct binder_buffer *b,
1925 binder_size_t objects_start_offset,
1926 binder_size_t buffer_obj_offset,
1927 binder_size_t fixup_offset,
1928 binder_size_t last_obj_offset,
1929 binder_size_t last_min_offset)
1930{
1931 if (!last_obj_offset) {
1932 /* Nothing to fix up in */
1933 return false;
1934 }
1935
1936 while (last_obj_offset != buffer_obj_offset) {
1937 unsigned long buffer_offset;
1938 struct binder_object last_object;
1939 struct binder_buffer_object *last_bbo;
1940 size_t object_size = binder_get_object(proc, NULL, b,
1941 last_obj_offset,
1942 &last_object);
1943 if (object_size != sizeof(*last_bbo))
1944 return false;
1945
1946 last_bbo = &last_object.bbo;
1947 /*
1948 * Safe to retrieve the parent of last_obj, since it
1949 * was already previously verified by the driver.
1950 */
1951 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1952 return false;
1953 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1954 buffer_offset = objects_start_offset +
1955 sizeof(binder_size_t) * last_bbo->parent;
1956 if (binder_alloc_copy_from_buffer(&proc->alloc,
1957 &last_obj_offset,
1958 b, buffer_offset,
1959 sizeof(last_obj_offset)))
1960 return false;
1961 }
1962 return (fixup_offset >= last_min_offset);
1963}
1964
1965/**
1966 * struct binder_task_work_cb - for deferred close
1967 *
1968 * @twork: callback_head for task work
1969 * @file: file to close
1970 *
1971 * Structure to pass task work to be handled after
1972 * returning from binder_ioctl() via task_work_add().
1973 */
1974struct binder_task_work_cb {
1975 struct callback_head twork;
1976 struct file *file;
1977};
1978
1979/**
1980 * binder_do_fd_close() - close list of file descriptors
1981 * @twork: callback head for task work
1982 *
1983 * It is not safe to call ksys_close() during the binder_ioctl()
1984 * function if there is a chance that binder's own file descriptor
1985 * might be closed. This is to meet the requirements for using
1986 * fdget() (see comments for __fget_light()). Therefore use
1987 * task_work_add() to schedule the close operation once we have
1988 * returned from binder_ioctl(). This function is a callback
1989 * for that mechanism and does the actual ksys_close() on the
1990 * given file descriptor.
1991 */
1992static void binder_do_fd_close(struct callback_head *twork)
1993{
1994 struct binder_task_work_cb *twcb = container_of(twork,
1995 struct binder_task_work_cb, twork);
1996
1997 fput(twcb->file);
1998 kfree(twcb);
1999}
2000
2001/**
2002 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2003 * @fd: file-descriptor to close
2004 *
2005 * See comments in binder_do_fd_close(). This function is used to schedule
2006 * a file-descriptor to be closed after returning from binder_ioctl().
2007 */
2008static void binder_deferred_fd_close(int fd)
2009{
2010 struct binder_task_work_cb *twcb;
2011
2012 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2013 if (!twcb)
2014 return;
2015 init_task_work(&twcb->twork, binder_do_fd_close);
2016 twcb->file = file_close_fd(fd);
2017 if (twcb->file) {
2018 // pin it until binder_do_fd_close(); see comments there
2019 get_file(twcb->file);
2020 filp_close(twcb->file, current->files);
2021 task_work_add(current, &twcb->twork, TWA_RESUME);
2022 } else {
2023 kfree(twcb);
2024 }
2025}
2026
2027static void binder_transaction_buffer_release(struct binder_proc *proc,
2028 struct binder_thread *thread,
2029 struct binder_buffer *buffer,
2030 binder_size_t off_end_offset,
2031 bool is_failure)
2032{
2033 int debug_id = buffer->debug_id;
2034 binder_size_t off_start_offset, buffer_offset;
2035
2036 binder_debug(BINDER_DEBUG_TRANSACTION,
2037 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2038 proc->pid, buffer->debug_id,
2039 buffer->data_size, buffer->offsets_size,
2040 (unsigned long long)off_end_offset);
2041
2042 if (buffer->target_node)
2043 binder_dec_node(buffer->target_node, 1, 0);
2044
2045 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2046
2047 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2048 buffer_offset += sizeof(binder_size_t)) {
2049 struct binder_object_header *hdr;
2050 size_t object_size = 0;
2051 struct binder_object object;
2052 binder_size_t object_offset;
2053
2054 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2055 buffer, buffer_offset,
2056 sizeof(object_offset)))
2057 object_size = binder_get_object(proc, NULL, buffer,
2058 object_offset, &object);
2059 if (object_size == 0) {
2060 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2061 debug_id, (u64)object_offset, buffer->data_size);
2062 continue;
2063 }
2064 hdr = &object.hdr;
2065 switch (hdr->type) {
2066 case BINDER_TYPE_BINDER:
2067 case BINDER_TYPE_WEAK_BINDER: {
2068 struct flat_binder_object *fp;
2069 struct binder_node *node;
2070
2071 fp = to_flat_binder_object(hdr);
2072 node = binder_get_node(proc, fp->binder);
2073 if (node == NULL) {
2074 pr_err("transaction release %d bad node %016llx\n",
2075 debug_id, (u64)fp->binder);
2076 break;
2077 }
2078 binder_debug(BINDER_DEBUG_TRANSACTION,
2079 " node %d u%016llx\n",
2080 node->debug_id, (u64)node->ptr);
2081 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2082 0);
2083 binder_put_node(node);
2084 } break;
2085 case BINDER_TYPE_HANDLE:
2086 case BINDER_TYPE_WEAK_HANDLE: {
2087 struct flat_binder_object *fp;
2088 struct binder_ref_data rdata;
2089 int ret;
2090
2091 fp = to_flat_binder_object(hdr);
2092 ret = binder_dec_ref_for_handle(proc, fp->handle,
2093 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2094
2095 if (ret) {
2096 pr_err("transaction release %d bad handle %d, ret = %d\n",
2097 debug_id, fp->handle, ret);
2098 break;
2099 }
2100 binder_debug(BINDER_DEBUG_TRANSACTION,
2101 " ref %d desc %d\n",
2102 rdata.debug_id, rdata.desc);
2103 } break;
2104
2105 case BINDER_TYPE_FD: {
2106 /*
2107 * No need to close the file here since user-space
2108 * closes it for successfully delivered
2109 * transactions. For transactions that weren't
2110 * delivered, the new fd was never allocated so
2111 * there is no need to close and the fput on the
2112 * file is done when the transaction is torn
2113 * down.
2114 */
2115 } break;
2116 case BINDER_TYPE_PTR:
2117 /*
2118 * Nothing to do here, this will get cleaned up when the
2119 * transaction buffer gets freed
2120 */
2121 break;
2122 case BINDER_TYPE_FDA: {
2123 struct binder_fd_array_object *fda;
2124 struct binder_buffer_object *parent;
2125 struct binder_object ptr_object;
2126 binder_size_t fda_offset;
2127 size_t fd_index;
2128 binder_size_t fd_buf_size;
2129 binder_size_t num_valid;
2130
2131 if (is_failure) {
2132 /*
2133 * The fd fixups have not been applied so no
2134 * fds need to be closed.
2135 */
2136 continue;
2137 }
2138
2139 num_valid = (buffer_offset - off_start_offset) /
2140 sizeof(binder_size_t);
2141 fda = to_binder_fd_array_object(hdr);
2142 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2143 fda->parent,
2144 off_start_offset,
2145 NULL,
2146 num_valid);
2147 if (!parent) {
2148 pr_err("transaction release %d bad parent offset\n",
2149 debug_id);
2150 continue;
2151 }
2152 fd_buf_size = sizeof(u32) * fda->num_fds;
2153 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2154 pr_err("transaction release %d invalid number of fds (%lld)\n",
2155 debug_id, (u64)fda->num_fds);
2156 continue;
2157 }
2158 if (fd_buf_size > parent->length ||
2159 fda->parent_offset > parent->length - fd_buf_size) {
2160 /* No space for all file descriptors here. */
2161 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2162 debug_id, (u64)fda->num_fds);
2163 continue;
2164 }
2165 /*
2166 * the source data for binder_buffer_object is visible
2167 * to user-space and the @buffer element is the user
2168 * pointer to the buffer_object containing the fd_array.
2169 * Convert the address to an offset relative to
2170 * the base of the transaction buffer.
2171 */
2172 fda_offset = parent->buffer - buffer->user_data +
2173 fda->parent_offset;
2174 for (fd_index = 0; fd_index < fda->num_fds;
2175 fd_index++) {
2176 u32 fd;
2177 int err;
2178 binder_size_t offset = fda_offset +
2179 fd_index * sizeof(fd);
2180
2181 err = binder_alloc_copy_from_buffer(
2182 &proc->alloc, &fd, buffer,
2183 offset, sizeof(fd));
2184 WARN_ON(err);
2185 if (!err) {
2186 binder_deferred_fd_close(fd);
2187 /*
2188 * Need to make sure the thread goes
2189 * back to userspace to complete the
2190 * deferred close
2191 */
2192 if (thread)
2193 thread->looper_need_return = true;
2194 }
2195 }
2196 } break;
2197 default:
2198 pr_err("transaction release %d bad object type %x\n",
2199 debug_id, hdr->type);
2200 break;
2201 }
2202 }
2203}
2204
2205/* Clean up all the objects in the buffer */
2206static inline void binder_release_entire_buffer(struct binder_proc *proc,
2207 struct binder_thread *thread,
2208 struct binder_buffer *buffer,
2209 bool is_failure)
2210{
2211 binder_size_t off_end_offset;
2212
2213 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2214 off_end_offset += buffer->offsets_size;
2215
2216 binder_transaction_buffer_release(proc, thread, buffer,
2217 off_end_offset, is_failure);
2218}
2219
2220static int binder_translate_binder(struct flat_binder_object *fp,
2221 struct binder_transaction *t,
2222 struct binder_thread *thread)
2223{
2224 struct binder_node *node;
2225 struct binder_proc *proc = thread->proc;
2226 struct binder_proc *target_proc = t->to_proc;
2227 struct binder_ref_data rdata;
2228 int ret = 0;
2229
2230 node = binder_get_node(proc, fp->binder);
2231 if (!node) {
2232 node = binder_new_node(proc, fp);
2233 if (!node)
2234 return -ENOMEM;
2235 }
2236 if (fp->cookie != node->cookie) {
2237 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2238 proc->pid, thread->pid, (u64)fp->binder,
2239 node->debug_id, (u64)fp->cookie,
2240 (u64)node->cookie);
2241 ret = -EINVAL;
2242 goto done;
2243 }
2244 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2245 ret = -EPERM;
2246 goto done;
2247 }
2248
2249 ret = binder_inc_ref_for_node(target_proc, node,
2250 fp->hdr.type == BINDER_TYPE_BINDER,
2251 &thread->todo, &rdata);
2252 if (ret)
2253 goto done;
2254
2255 if (fp->hdr.type == BINDER_TYPE_BINDER)
2256 fp->hdr.type = BINDER_TYPE_HANDLE;
2257 else
2258 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2259 fp->binder = 0;
2260 fp->handle = rdata.desc;
2261 fp->cookie = 0;
2262
2263 trace_binder_transaction_node_to_ref(t, node, &rdata);
2264 binder_debug(BINDER_DEBUG_TRANSACTION,
2265 " node %d u%016llx -> ref %d desc %d\n",
2266 node->debug_id, (u64)node->ptr,
2267 rdata.debug_id, rdata.desc);
2268done:
2269 binder_put_node(node);
2270 return ret;
2271}
2272
2273static int binder_translate_handle(struct flat_binder_object *fp,
2274 struct binder_transaction *t,
2275 struct binder_thread *thread)
2276{
2277 struct binder_proc *proc = thread->proc;
2278 struct binder_proc *target_proc = t->to_proc;
2279 struct binder_node *node;
2280 struct binder_ref_data src_rdata;
2281 int ret = 0;
2282
2283 node = binder_get_node_from_ref(proc, fp->handle,
2284 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2285 if (!node) {
2286 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2287 proc->pid, thread->pid, fp->handle);
2288 return -EINVAL;
2289 }
2290 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2291 ret = -EPERM;
2292 goto done;
2293 }
2294
2295 binder_node_lock(node);
2296 if (node->proc == target_proc) {
2297 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2298 fp->hdr.type = BINDER_TYPE_BINDER;
2299 else
2300 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2301 fp->binder = node->ptr;
2302 fp->cookie = node->cookie;
2303 if (node->proc)
2304 binder_inner_proc_lock(node->proc);
2305 else
2306 __acquire(&node->proc->inner_lock);
2307 binder_inc_node_nilocked(node,
2308 fp->hdr.type == BINDER_TYPE_BINDER,
2309 0, NULL);
2310 if (node->proc)
2311 binder_inner_proc_unlock(node->proc);
2312 else
2313 __release(&node->proc->inner_lock);
2314 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2315 binder_debug(BINDER_DEBUG_TRANSACTION,
2316 " ref %d desc %d -> node %d u%016llx\n",
2317 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2318 (u64)node->ptr);
2319 binder_node_unlock(node);
2320 } else {
2321 struct binder_ref_data dest_rdata;
2322
2323 binder_node_unlock(node);
2324 ret = binder_inc_ref_for_node(target_proc, node,
2325 fp->hdr.type == BINDER_TYPE_HANDLE,
2326 NULL, &dest_rdata);
2327 if (ret)
2328 goto done;
2329
2330 fp->binder = 0;
2331 fp->handle = dest_rdata.desc;
2332 fp->cookie = 0;
2333 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2334 &dest_rdata);
2335 binder_debug(BINDER_DEBUG_TRANSACTION,
2336 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2337 src_rdata.debug_id, src_rdata.desc,
2338 dest_rdata.debug_id, dest_rdata.desc,
2339 node->debug_id);
2340 }
2341done:
2342 binder_put_node(node);
2343 return ret;
2344}
2345
2346static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2347 struct binder_transaction *t,
2348 struct binder_thread *thread,
2349 struct binder_transaction *in_reply_to)
2350{
2351 struct binder_proc *proc = thread->proc;
2352 struct binder_proc *target_proc = t->to_proc;
2353 struct binder_txn_fd_fixup *fixup;
2354 struct file *file;
2355 int ret = 0;
2356 bool target_allows_fd;
2357
2358 if (in_reply_to)
2359 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2360 else
2361 target_allows_fd = t->buffer->target_node->accept_fds;
2362 if (!target_allows_fd) {
2363 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2364 proc->pid, thread->pid,
2365 in_reply_to ? "reply" : "transaction",
2366 fd);
2367 ret = -EPERM;
2368 goto err_fd_not_accepted;
2369 }
2370
2371 file = fget(fd);
2372 if (!file) {
2373 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2374 proc->pid, thread->pid, fd);
2375 ret = -EBADF;
2376 goto err_fget;
2377 }
2378 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2379 if (ret < 0) {
2380 ret = -EPERM;
2381 goto err_security;
2382 }
2383
2384 /*
2385 * Add fixup record for this transaction. The allocation
2386 * of the fd in the target needs to be done from a
2387 * target thread.
2388 */
2389 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2390 if (!fixup) {
2391 ret = -ENOMEM;
2392 goto err_alloc;
2393 }
2394 fixup->file = file;
2395 fixup->offset = fd_offset;
2396 fixup->target_fd = -1;
2397 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2398 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2399
2400 return ret;
2401
2402err_alloc:
2403err_security:
2404 fput(file);
2405err_fget:
2406err_fd_not_accepted:
2407 return ret;
2408}
2409
2410/**
2411 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2412 * @offset: offset in target buffer to fixup
2413 * @skip_size: bytes to skip in copy (fixup will be written later)
2414 * @fixup_data: data to write at fixup offset
2415 * @node: list node
2416 *
2417 * This is used for the pointer fixup list (pf) which is created and consumed
2418 * during binder_transaction() and is only accessed locally. No
2419 * locking is necessary.
2420 *
2421 * The list is ordered by @offset.
2422 */
2423struct binder_ptr_fixup {
2424 binder_size_t offset;
2425 size_t skip_size;
2426 binder_uintptr_t fixup_data;
2427 struct list_head node;
2428};
2429
2430/**
2431 * struct binder_sg_copy - scatter-gather data to be copied
2432 * @offset: offset in target buffer
2433 * @sender_uaddr: user address in source buffer
2434 * @length: bytes to copy
2435 * @node: list node
2436 *
2437 * This is used for the sg copy list (sgc) which is created and consumed
2438 * during binder_transaction() and is only accessed locally. No
2439 * locking is necessary.
2440 *
2441 * The list is ordered by @offset.
2442 */
2443struct binder_sg_copy {
2444 binder_size_t offset;
2445 const void __user *sender_uaddr;
2446 size_t length;
2447 struct list_head node;
2448};
2449
2450/**
2451 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2452 * @alloc: binder_alloc associated with @buffer
2453 * @buffer: binder buffer in target process
2454 * @sgc_head: list_head of scatter-gather copy list
2455 * @pf_head: list_head of pointer fixup list
2456 *
2457 * Processes all elements of @sgc_head, applying fixups from @pf_head
2458 * and copying the scatter-gather data from the source process' user
2459 * buffer to the target's buffer. It is expected that the list creation
2460 * and processing all occurs during binder_transaction() so these lists
2461 * are only accessed in local context.
2462 *
2463 * Return: 0=success, else -errno
2464 */
2465static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2466 struct binder_buffer *buffer,
2467 struct list_head *sgc_head,
2468 struct list_head *pf_head)
2469{
2470 int ret = 0;
2471 struct binder_sg_copy *sgc, *tmpsgc;
2472 struct binder_ptr_fixup *tmppf;
2473 struct binder_ptr_fixup *pf =
2474 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2475 node);
2476
2477 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2478 size_t bytes_copied = 0;
2479
2480 while (bytes_copied < sgc->length) {
2481 size_t copy_size;
2482 size_t bytes_left = sgc->length - bytes_copied;
2483 size_t offset = sgc->offset + bytes_copied;
2484
2485 /*
2486 * We copy up to the fixup (pointed to by pf)
2487 */
2488 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2489 : bytes_left;
2490 if (!ret && copy_size)
2491 ret = binder_alloc_copy_user_to_buffer(
2492 alloc, buffer,
2493 offset,
2494 sgc->sender_uaddr + bytes_copied,
2495 copy_size);
2496 bytes_copied += copy_size;
2497 if (copy_size != bytes_left) {
2498 BUG_ON(!pf);
2499 /* we stopped at a fixup offset */
2500 if (pf->skip_size) {
2501 /*
2502 * we are just skipping. This is for
2503 * BINDER_TYPE_FDA where the translated
2504 * fds will be fixed up when we get
2505 * to target context.
2506 */
2507 bytes_copied += pf->skip_size;
2508 } else {
2509 /* apply the fixup indicated by pf */
2510 if (!ret)
2511 ret = binder_alloc_copy_to_buffer(
2512 alloc, buffer,
2513 pf->offset,
2514 &pf->fixup_data,
2515 sizeof(pf->fixup_data));
2516 bytes_copied += sizeof(pf->fixup_data);
2517 }
2518 list_del(&pf->node);
2519 kfree(pf);
2520 pf = list_first_entry_or_null(pf_head,
2521 struct binder_ptr_fixup, node);
2522 }
2523 }
2524 list_del(&sgc->node);
2525 kfree(sgc);
2526 }
2527 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2528 BUG_ON(pf->skip_size == 0);
2529 list_del(&pf->node);
2530 kfree(pf);
2531 }
2532 BUG_ON(!list_empty(sgc_head));
2533
2534 return ret > 0 ? -EINVAL : ret;
2535}
2536
2537/**
2538 * binder_cleanup_deferred_txn_lists() - free specified lists
2539 * @sgc_head: list_head of scatter-gather copy list
2540 * @pf_head: list_head of pointer fixup list
2541 *
2542 * Called to clean up @sgc_head and @pf_head if there is an
2543 * error.
2544 */
2545static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2546 struct list_head *pf_head)
2547{
2548 struct binder_sg_copy *sgc, *tmpsgc;
2549 struct binder_ptr_fixup *pf, *tmppf;
2550
2551 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2552 list_del(&sgc->node);
2553 kfree(sgc);
2554 }
2555 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2556 list_del(&pf->node);
2557 kfree(pf);
2558 }
2559}
2560
2561/**
2562 * binder_defer_copy() - queue a scatter-gather buffer for copy
2563 * @sgc_head: list_head of scatter-gather copy list
2564 * @offset: binder buffer offset in target process
2565 * @sender_uaddr: user address in source process
2566 * @length: bytes to copy
2567 *
2568 * Specify a scatter-gather block to be copied. The actual copy must
2569 * be deferred until all the needed fixups are identified and queued.
2570 * Then the copy and fixups are done together so un-translated values
2571 * from the source are never visible in the target buffer.
2572 *
2573 * We are guaranteed that repeated calls to this function will have
2574 * monotonically increasing @offset values so the list will naturally
2575 * be ordered.
2576 *
2577 * Return: 0=success, else -errno
2578 */
2579static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2580 const void __user *sender_uaddr, size_t length)
2581{
2582 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2583
2584 if (!bc)
2585 return -ENOMEM;
2586
2587 bc->offset = offset;
2588 bc->sender_uaddr = sender_uaddr;
2589 bc->length = length;
2590 INIT_LIST_HEAD(&bc->node);
2591
2592 /*
2593 * We are guaranteed that the deferred copies are in-order
2594 * so just add to the tail.
2595 */
2596 list_add_tail(&bc->node, sgc_head);
2597
2598 return 0;
2599}
2600
2601/**
2602 * binder_add_fixup() - queue a fixup to be applied to sg copy
2603 * @pf_head: list_head of binder ptr fixup list
2604 * @offset: binder buffer offset in target process
2605 * @fixup: bytes to be copied for fixup
2606 * @skip_size: bytes to skip when copying (fixup will be applied later)
2607 *
2608 * Add the specified fixup to a list ordered by @offset. When copying
2609 * the scatter-gather buffers, the fixup will be copied instead of
2610 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2611 * will be applied later (in target process context), so we just skip
2612 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2613 * value in @fixup.
2614 *
2615 * This function is called *mostly* in @offset order, but there are
2616 * exceptions. Since out-of-order inserts are relatively uncommon,
2617 * we insert the new element by searching backward from the tail of
2618 * the list.
2619 *
2620 * Return: 0=success, else -errno
2621 */
2622static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2623 binder_uintptr_t fixup, size_t skip_size)
2624{
2625 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2626 struct binder_ptr_fixup *tmppf;
2627
2628 if (!pf)
2629 return -ENOMEM;
2630
2631 pf->offset = offset;
2632 pf->fixup_data = fixup;
2633 pf->skip_size = skip_size;
2634 INIT_LIST_HEAD(&pf->node);
2635
2636 /* Fixups are *mostly* added in-order, but there are some
2637 * exceptions. Look backwards through list for insertion point.
2638 */
2639 list_for_each_entry_reverse(tmppf, pf_head, node) {
2640 if (tmppf->offset < pf->offset) {
2641 list_add(&pf->node, &tmppf->node);
2642 return 0;
2643 }
2644 }
2645 /*
2646 * if we get here, then the new offset is the lowest so
2647 * insert at the head
2648 */
2649 list_add(&pf->node, pf_head);
2650 return 0;
2651}
2652
2653static int binder_translate_fd_array(struct list_head *pf_head,
2654 struct binder_fd_array_object *fda,
2655 const void __user *sender_ubuffer,
2656 struct binder_buffer_object *parent,
2657 struct binder_buffer_object *sender_uparent,
2658 struct binder_transaction *t,
2659 struct binder_thread *thread,
2660 struct binder_transaction *in_reply_to)
2661{
2662 binder_size_t fdi, fd_buf_size;
2663 binder_size_t fda_offset;
2664 const void __user *sender_ufda_base;
2665 struct binder_proc *proc = thread->proc;
2666 int ret;
2667
2668 if (fda->num_fds == 0)
2669 return 0;
2670
2671 fd_buf_size = sizeof(u32) * fda->num_fds;
2672 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2673 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2674 proc->pid, thread->pid, (u64)fda->num_fds);
2675 return -EINVAL;
2676 }
2677 if (fd_buf_size > parent->length ||
2678 fda->parent_offset > parent->length - fd_buf_size) {
2679 /* No space for all file descriptors here. */
2680 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2681 proc->pid, thread->pid, (u64)fda->num_fds);
2682 return -EINVAL;
2683 }
2684 /*
2685 * the source data for binder_buffer_object is visible
2686 * to user-space and the @buffer element is the user
2687 * pointer to the buffer_object containing the fd_array.
2688 * Convert the address to an offset relative to
2689 * the base of the transaction buffer.
2690 */
2691 fda_offset = parent->buffer - t->buffer->user_data +
2692 fda->parent_offset;
2693 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2694 fda->parent_offset;
2695
2696 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2697 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2698 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2699 proc->pid, thread->pid);
2700 return -EINVAL;
2701 }
2702 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2703 if (ret)
2704 return ret;
2705
2706 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2707 u32 fd;
2708 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2709 binder_size_t sender_uoffset = fdi * sizeof(fd);
2710
2711 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2712 if (!ret)
2713 ret = binder_translate_fd(fd, offset, t, thread,
2714 in_reply_to);
2715 if (ret)
2716 return ret > 0 ? -EINVAL : ret;
2717 }
2718 return 0;
2719}
2720
2721static int binder_fixup_parent(struct list_head *pf_head,
2722 struct binder_transaction *t,
2723 struct binder_thread *thread,
2724 struct binder_buffer_object *bp,
2725 binder_size_t off_start_offset,
2726 binder_size_t num_valid,
2727 binder_size_t last_fixup_obj_off,
2728 binder_size_t last_fixup_min_off)
2729{
2730 struct binder_buffer_object *parent;
2731 struct binder_buffer *b = t->buffer;
2732 struct binder_proc *proc = thread->proc;
2733 struct binder_proc *target_proc = t->to_proc;
2734 struct binder_object object;
2735 binder_size_t buffer_offset;
2736 binder_size_t parent_offset;
2737
2738 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2739 return 0;
2740
2741 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2742 off_start_offset, &parent_offset,
2743 num_valid);
2744 if (!parent) {
2745 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2746 proc->pid, thread->pid);
2747 return -EINVAL;
2748 }
2749
2750 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2751 parent_offset, bp->parent_offset,
2752 last_fixup_obj_off,
2753 last_fixup_min_off)) {
2754 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2755 proc->pid, thread->pid);
2756 return -EINVAL;
2757 }
2758
2759 if (parent->length < sizeof(binder_uintptr_t) ||
2760 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2761 /* No space for a pointer here! */
2762 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2763 proc->pid, thread->pid);
2764 return -EINVAL;
2765 }
2766
2767 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2768
2769 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2770}
2771
2772/**
2773 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2774 * @t1: the pending async txn in the frozen process
2775 * @t2: the new async txn to supersede the outdated pending one
2776 *
2777 * Return: true if t2 can supersede t1
2778 * false if t2 can not supersede t1
2779 */
2780static bool binder_can_update_transaction(struct binder_transaction *t1,
2781 struct binder_transaction *t2)
2782{
2783 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2784 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2785 return false;
2786 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2787 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2788 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2789 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2790 return true;
2791 return false;
2792}
2793
2794/**
2795 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2796 * @t: new async transaction
2797 * @target_list: list to find outdated transaction
2798 *
2799 * Return: the outdated transaction if found
2800 * NULL if no outdated transacton can be found
2801 *
2802 * Requires the proc->inner_lock to be held.
2803 */
2804static struct binder_transaction *
2805binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2806 struct list_head *target_list)
2807{
2808 struct binder_work *w;
2809
2810 list_for_each_entry(w, target_list, entry) {
2811 struct binder_transaction *t_queued;
2812
2813 if (w->type != BINDER_WORK_TRANSACTION)
2814 continue;
2815 t_queued = container_of(w, struct binder_transaction, work);
2816 if (binder_can_update_transaction(t_queued, t))
2817 return t_queued;
2818 }
2819 return NULL;
2820}
2821
2822/**
2823 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2824 * @t: transaction to send
2825 * @proc: process to send the transaction to
2826 * @thread: thread in @proc to send the transaction to (may be NULL)
2827 *
2828 * This function queues a transaction to the specified process. It will try
2829 * to find a thread in the target process to handle the transaction and
2830 * wake it up. If no thread is found, the work is queued to the proc
2831 * waitqueue.
2832 *
2833 * If the @thread parameter is not NULL, the transaction is always queued
2834 * to the waitlist of that specific thread.
2835 *
2836 * Return: 0 if the transaction was successfully queued
2837 * BR_DEAD_REPLY if the target process or thread is dead
2838 * BR_FROZEN_REPLY if the target process or thread is frozen and
2839 * the sync transaction was rejected
2840 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2841 * and the async transaction was successfully queued
2842 */
2843static int binder_proc_transaction(struct binder_transaction *t,
2844 struct binder_proc *proc,
2845 struct binder_thread *thread)
2846{
2847 struct binder_node *node = t->buffer->target_node;
2848 bool oneway = !!(t->flags & TF_ONE_WAY);
2849 bool pending_async = false;
2850 struct binder_transaction *t_outdated = NULL;
2851 bool frozen = false;
2852
2853 BUG_ON(!node);
2854 binder_node_lock(node);
2855 if (oneway) {
2856 BUG_ON(thread);
2857 if (node->has_async_transaction)
2858 pending_async = true;
2859 else
2860 node->has_async_transaction = true;
2861 }
2862
2863 binder_inner_proc_lock(proc);
2864 if (proc->is_frozen) {
2865 frozen = true;
2866 proc->sync_recv |= !oneway;
2867 proc->async_recv |= oneway;
2868 }
2869
2870 if ((frozen && !oneway) || proc->is_dead ||
2871 (thread && thread->is_dead)) {
2872 binder_inner_proc_unlock(proc);
2873 binder_node_unlock(node);
2874 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2875 }
2876
2877 if (!thread && !pending_async)
2878 thread = binder_select_thread_ilocked(proc);
2879
2880 if (thread) {
2881 binder_enqueue_thread_work_ilocked(thread, &t->work);
2882 } else if (!pending_async) {
2883 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2884 } else {
2885 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2886 t_outdated = binder_find_outdated_transaction_ilocked(t,
2887 &node->async_todo);
2888 if (t_outdated) {
2889 binder_debug(BINDER_DEBUG_TRANSACTION,
2890 "txn %d supersedes %d\n",
2891 t->debug_id, t_outdated->debug_id);
2892 list_del_init(&t_outdated->work.entry);
2893 proc->outstanding_txns--;
2894 }
2895 }
2896 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2897 }
2898
2899 if (!pending_async)
2900 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2901
2902 proc->outstanding_txns++;
2903 binder_inner_proc_unlock(proc);
2904 binder_node_unlock(node);
2905
2906 /*
2907 * To reduce potential contention, free the outdated transaction and
2908 * buffer after releasing the locks.
2909 */
2910 if (t_outdated) {
2911 struct binder_buffer *buffer = t_outdated->buffer;
2912
2913 t_outdated->buffer = NULL;
2914 buffer->transaction = NULL;
2915 trace_binder_transaction_update_buffer_release(buffer);
2916 binder_release_entire_buffer(proc, NULL, buffer, false);
2917 binder_alloc_free_buf(&proc->alloc, buffer);
2918 kfree(t_outdated);
2919 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2920 }
2921
2922 if (oneway && frozen)
2923 return BR_TRANSACTION_PENDING_FROZEN;
2924
2925 return 0;
2926}
2927
2928/**
2929 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2930 * @node: struct binder_node for which to get refs
2931 * @procp: returns @node->proc if valid
2932 * @error: if no @procp then returns BR_DEAD_REPLY
2933 *
2934 * User-space normally keeps the node alive when creating a transaction
2935 * since it has a reference to the target. The local strong ref keeps it
2936 * alive if the sending process dies before the target process processes
2937 * the transaction. If the source process is malicious or has a reference
2938 * counting bug, relying on the local strong ref can fail.
2939 *
2940 * Since user-space can cause the local strong ref to go away, we also take
2941 * a tmpref on the node to ensure it survives while we are constructing
2942 * the transaction. We also need a tmpref on the proc while we are
2943 * constructing the transaction, so we take that here as well.
2944 *
2945 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2946 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2947 * target proc has died, @error is set to BR_DEAD_REPLY.
2948 */
2949static struct binder_node *binder_get_node_refs_for_txn(
2950 struct binder_node *node,
2951 struct binder_proc **procp,
2952 uint32_t *error)
2953{
2954 struct binder_node *target_node = NULL;
2955
2956 binder_node_inner_lock(node);
2957 if (node->proc) {
2958 target_node = node;
2959 binder_inc_node_nilocked(node, 1, 0, NULL);
2960 binder_inc_node_tmpref_ilocked(node);
2961 node->proc->tmp_ref++;
2962 *procp = node->proc;
2963 } else
2964 *error = BR_DEAD_REPLY;
2965 binder_node_inner_unlock(node);
2966
2967 return target_node;
2968}
2969
2970static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2971 uint32_t command, int32_t param)
2972{
2973 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2974
2975 if (!from) {
2976 /* annotation for sparse */
2977 __release(&from->proc->inner_lock);
2978 return;
2979 }
2980
2981 /* don't override existing errors */
2982 if (from->ee.command == BR_OK)
2983 binder_set_extended_error(&from->ee, id, command, param);
2984 binder_inner_proc_unlock(from->proc);
2985 binder_thread_dec_tmpref(from);
2986}
2987
2988/**
2989 * binder_netlink_report() - report a transaction failure via netlink
2990 * @proc: the binder proc sending the transaction
2991 * @t: the binder transaction that failed
2992 * @data_size: the user provided data size for the transaction
2993 * @error: enum binder_driver_return_protocol returned to sender
2994 *
2995 * Note that t->buffer is not safe to access here, as it may have been
2996 * released (or not yet allocated). Callers should guarantee all the
2997 * transaction items used here are safe to access.
2998 */
2999static void binder_netlink_report(struct binder_proc *proc,
3000 struct binder_transaction *t,
3001 u32 data_size,
3002 u32 error)
3003{
3004 const char *context = proc->context->name;
3005 struct sk_buff *skb;
3006 void *hdr;
3007
3008 if (!genl_has_listeners(&binder_nl_family, &init_net,
3009 BINDER_NLGRP_REPORT))
3010 return;
3011
3012 trace_binder_netlink_report(context, t, data_size, error);
3013
3014 skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
3015 if (!skb)
3016 return;
3017
3018 hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
3019 if (!hdr)
3020 goto free_skb;
3021
3022 if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
3023 nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
3024 nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
3025 nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
3026 goto cancel_skb;
3027
3028 if (t->to_proc &&
3029 nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
3030 goto cancel_skb;
3031
3032 if (t->to_thread &&
3033 nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
3034 goto cancel_skb;
3035
3036 if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
3037 goto cancel_skb;
3038
3039 if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
3040 nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
3041 nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
3042 goto cancel_skb;
3043
3044 genlmsg_end(skb, hdr);
3045 genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
3046 GFP_KERNEL);
3047 return;
3048
3049cancel_skb:
3050 genlmsg_cancel(skb, hdr);
3051free_skb:
3052 nlmsg_free(skb);
3053}
3054
3055static void binder_transaction(struct binder_proc *proc,
3056 struct binder_thread *thread,
3057 struct binder_transaction_data *tr, int reply,
3058 binder_size_t extra_buffers_size)
3059{
3060 int ret;
3061 struct binder_transaction *t;
3062 struct binder_work *w;
3063 struct binder_work *tcomplete;
3064 binder_size_t buffer_offset = 0;
3065 binder_size_t off_start_offset, off_end_offset;
3066 binder_size_t off_min;
3067 binder_size_t sg_buf_offset, sg_buf_end_offset;
3068 binder_size_t user_offset = 0;
3069 struct binder_proc *target_proc = NULL;
3070 struct binder_thread *target_thread = NULL;
3071 struct binder_node *target_node = NULL;
3072 struct binder_transaction *in_reply_to = NULL;
3073 struct binder_transaction_log_entry *e;
3074 uint32_t return_error = 0;
3075 uint32_t return_error_param = 0;
3076 uint32_t return_error_line = 0;
3077 binder_size_t last_fixup_obj_off = 0;
3078 binder_size_t last_fixup_min_off = 0;
3079 struct binder_context *context = proc->context;
3080 int t_debug_id = atomic_inc_return(&binder_last_id);
3081 ktime_t t_start_time = ktime_get();
3082 struct lsm_context lsmctx = { };
3083 struct list_head sgc_head;
3084 struct list_head pf_head;
3085 const void __user *user_buffer = (const void __user *)
3086 (uintptr_t)tr->data.ptr.buffer;
3087 INIT_LIST_HEAD(&sgc_head);
3088 INIT_LIST_HEAD(&pf_head);
3089
3090 e = binder_transaction_log_add(&binder_transaction_log);
3091 e->debug_id = t_debug_id;
3092 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3093 e->from_proc = proc->pid;
3094 e->from_thread = thread->pid;
3095 e->target_handle = tr->target.handle;
3096 e->data_size = tr->data_size;
3097 e->offsets_size = tr->offsets_size;
3098 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3099
3100 binder_inner_proc_lock(proc);
3101 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3102 binder_inner_proc_unlock(proc);
3103
3104 t = kzalloc(sizeof(*t), GFP_KERNEL);
3105 if (!t) {
3106 binder_txn_error("%d:%d cannot allocate transaction\n",
3107 thread->pid, proc->pid);
3108 return_error = BR_FAILED_REPLY;
3109 return_error_param = -ENOMEM;
3110 return_error_line = __LINE__;
3111 goto err_alloc_t_failed;
3112 }
3113 INIT_LIST_HEAD(&t->fd_fixups);
3114 binder_stats_created(BINDER_STAT_TRANSACTION);
3115 spin_lock_init(&t->lock);
3116 t->debug_id = t_debug_id;
3117 t->start_time = t_start_time;
3118 t->from_pid = proc->pid;
3119 t->from_tid = thread->pid;
3120 t->sender_euid = task_euid(proc->tsk);
3121 t->code = tr->code;
3122 t->flags = tr->flags;
3123 t->priority = task_nice(current);
3124 t->work.type = BINDER_WORK_TRANSACTION;
3125 t->is_async = !reply && (tr->flags & TF_ONE_WAY);
3126 t->is_reply = reply;
3127 if (!reply && !(tr->flags & TF_ONE_WAY))
3128 t->from = thread;
3129
3130 if (reply) {
3131 binder_inner_proc_lock(proc);
3132 in_reply_to = thread->transaction_stack;
3133 if (in_reply_to == NULL) {
3134 binder_inner_proc_unlock(proc);
3135 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3136 proc->pid, thread->pid);
3137 return_error = BR_FAILED_REPLY;
3138 return_error_param = -EPROTO;
3139 return_error_line = __LINE__;
3140 goto err_empty_call_stack;
3141 }
3142 if (in_reply_to->to_thread != thread) {
3143 spin_lock(&in_reply_to->lock);
3144 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3145 proc->pid, thread->pid, in_reply_to->debug_id,
3146 in_reply_to->to_proc ?
3147 in_reply_to->to_proc->pid : 0,
3148 in_reply_to->to_thread ?
3149 in_reply_to->to_thread->pid : 0);
3150 spin_unlock(&in_reply_to->lock);
3151 binder_inner_proc_unlock(proc);
3152 return_error = BR_FAILED_REPLY;
3153 return_error_param = -EPROTO;
3154 return_error_line = __LINE__;
3155 in_reply_to = NULL;
3156 goto err_bad_call_stack;
3157 }
3158 thread->transaction_stack = in_reply_to->to_parent;
3159 binder_inner_proc_unlock(proc);
3160 binder_set_nice(in_reply_to->saved_priority);
3161 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3162 if (target_thread == NULL) {
3163 /* annotation for sparse */
3164 __release(&target_thread->proc->inner_lock);
3165 binder_txn_error("%d:%d reply target not found\n",
3166 thread->pid, proc->pid);
3167 return_error = BR_DEAD_REPLY;
3168 return_error_line = __LINE__;
3169 goto err_dead_binder;
3170 }
3171 if (target_thread->transaction_stack != in_reply_to) {
3172 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3173 proc->pid, thread->pid,
3174 target_thread->transaction_stack ?
3175 target_thread->transaction_stack->debug_id : 0,
3176 in_reply_to->debug_id);
3177 binder_inner_proc_unlock(target_thread->proc);
3178 return_error = BR_FAILED_REPLY;
3179 return_error_param = -EPROTO;
3180 return_error_line = __LINE__;
3181 in_reply_to = NULL;
3182 target_thread = NULL;
3183 goto err_dead_binder;
3184 }
3185 target_proc = target_thread->proc;
3186 target_proc->tmp_ref++;
3187 binder_inner_proc_unlock(target_thread->proc);
3188 } else {
3189 if (tr->target.handle) {
3190 struct binder_ref *ref;
3191
3192 /*
3193 * There must already be a strong ref
3194 * on this node. If so, do a strong
3195 * increment on the node to ensure it
3196 * stays alive until the transaction is
3197 * done.
3198 */
3199 binder_proc_lock(proc);
3200 ref = binder_get_ref_olocked(proc, tr->target.handle,
3201 true);
3202 if (ref) {
3203 target_node = binder_get_node_refs_for_txn(
3204 ref->node, &target_proc,
3205 &return_error);
3206 } else {
3207 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3208 proc->pid, thread->pid, tr->target.handle);
3209 return_error = BR_FAILED_REPLY;
3210 }
3211 binder_proc_unlock(proc);
3212 } else {
3213 mutex_lock(&context->context_mgr_node_lock);
3214 target_node = context->binder_context_mgr_node;
3215 if (target_node)
3216 target_node = binder_get_node_refs_for_txn(
3217 target_node, &target_proc,
3218 &return_error);
3219 else
3220 return_error = BR_DEAD_REPLY;
3221 mutex_unlock(&context->context_mgr_node_lock);
3222 if (target_node && target_proc->pid == proc->pid) {
3223 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3224 proc->pid, thread->pid);
3225 return_error = BR_FAILED_REPLY;
3226 return_error_param = -EINVAL;
3227 return_error_line = __LINE__;
3228 goto err_invalid_target_handle;
3229 }
3230 }
3231 if (!target_node) {
3232 binder_txn_error("%d:%d cannot find target node\n",
3233 proc->pid, thread->pid);
3234 /* return_error is set above */
3235 return_error_param = -EINVAL;
3236 return_error_line = __LINE__;
3237 goto err_dead_binder;
3238 }
3239 e->to_node = target_node->debug_id;
3240 if (WARN_ON(proc == target_proc)) {
3241 binder_txn_error("%d:%d self transactions not allowed\n",
3242 thread->pid, proc->pid);
3243 return_error = BR_FAILED_REPLY;
3244 return_error_param = -EINVAL;
3245 return_error_line = __LINE__;
3246 goto err_invalid_target_handle;
3247 }
3248 if (security_binder_transaction(proc->cred,
3249 target_proc->cred) < 0) {
3250 binder_txn_error("%d:%d transaction credentials failed\n",
3251 thread->pid, proc->pid);
3252 return_error = BR_FAILED_REPLY;
3253 return_error_param = -EPERM;
3254 return_error_line = __LINE__;
3255 goto err_invalid_target_handle;
3256 }
3257 binder_inner_proc_lock(proc);
3258
3259 w = list_first_entry_or_null(&thread->todo,
3260 struct binder_work, entry);
3261 if (!(tr->flags & TF_ONE_WAY) && w &&
3262 w->type == BINDER_WORK_TRANSACTION) {
3263 /*
3264 * Do not allow new outgoing transaction from a
3265 * thread that has a transaction at the head of
3266 * its todo list. Only need to check the head
3267 * because binder_select_thread_ilocked picks a
3268 * thread from proc->waiting_threads to enqueue
3269 * the transaction, and nothing is queued to the
3270 * todo list while the thread is on waiting_threads.
3271 */
3272 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3273 proc->pid, thread->pid);
3274 binder_inner_proc_unlock(proc);
3275 return_error = BR_FAILED_REPLY;
3276 return_error_param = -EPROTO;
3277 return_error_line = __LINE__;
3278 goto err_bad_todo_list;
3279 }
3280
3281 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3282 struct binder_transaction *tmp;
3283
3284 tmp = thread->transaction_stack;
3285 if (tmp->to_thread != thread) {
3286 spin_lock(&tmp->lock);
3287 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3288 proc->pid, thread->pid, tmp->debug_id,
3289 tmp->to_proc ? tmp->to_proc->pid : 0,
3290 tmp->to_thread ?
3291 tmp->to_thread->pid : 0);
3292 spin_unlock(&tmp->lock);
3293 binder_inner_proc_unlock(proc);
3294 return_error = BR_FAILED_REPLY;
3295 return_error_param = -EPROTO;
3296 return_error_line = __LINE__;
3297 goto err_bad_call_stack;
3298 }
3299 while (tmp) {
3300 struct binder_thread *from;
3301
3302 spin_lock(&tmp->lock);
3303 from = tmp->from;
3304 if (from && from->proc == target_proc) {
3305 atomic_inc(&from->tmp_ref);
3306 target_thread = from;
3307 spin_unlock(&tmp->lock);
3308 break;
3309 }
3310 spin_unlock(&tmp->lock);
3311 tmp = tmp->from_parent;
3312 }
3313 }
3314 binder_inner_proc_unlock(proc);
3315 }
3316
3317 t->to_proc = target_proc;
3318 t->to_thread = target_thread;
3319 if (target_thread)
3320 e->to_thread = target_thread->pid;
3321 e->to_proc = target_proc->pid;
3322
3323 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3324 if (tcomplete == NULL) {
3325 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3326 thread->pid, proc->pid);
3327 return_error = BR_FAILED_REPLY;
3328 return_error_param = -ENOMEM;
3329 return_error_line = __LINE__;
3330 goto err_alloc_tcomplete_failed;
3331 }
3332 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3333
3334 if (reply)
3335 binder_debug(BINDER_DEBUG_TRANSACTION,
3336 "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3337 proc->pid, thread->pid, t->debug_id,
3338 target_proc->pid, target_thread->pid,
3339 (u64)tr->data_size, (u64)tr->offsets_size,
3340 (u64)extra_buffers_size);
3341 else
3342 binder_debug(BINDER_DEBUG_TRANSACTION,
3343 "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3344 proc->pid, thread->pid, t->debug_id,
3345 target_proc->pid, target_node->debug_id,
3346 (u64)tr->data_size, (u64)tr->offsets_size,
3347 (u64)extra_buffers_size);
3348
3349 if (target_node && target_node->txn_security_ctx) {
3350 u32 secid;
3351 size_t added_size;
3352
3353 security_cred_getsecid(proc->cred, &secid);
3354 ret = security_secid_to_secctx(secid, &lsmctx);
3355 if (ret < 0) {
3356 binder_txn_error("%d:%d failed to get security context\n",
3357 thread->pid, proc->pid);
3358 return_error = BR_FAILED_REPLY;
3359 return_error_param = ret;
3360 return_error_line = __LINE__;
3361 goto err_get_secctx_failed;
3362 }
3363 added_size = ALIGN(lsmctx.len, sizeof(u64));
3364 extra_buffers_size += added_size;
3365 if (extra_buffers_size < added_size) {
3366 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3367 thread->pid, proc->pid);
3368 return_error = BR_FAILED_REPLY;
3369 return_error_param = -EINVAL;
3370 return_error_line = __LINE__;
3371 goto err_bad_extra_size;
3372 }
3373 }
3374
3375 trace_binder_transaction(reply, t, target_node);
3376
3377 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3378 tr->offsets_size, extra_buffers_size,
3379 !reply && (t->flags & TF_ONE_WAY));
3380 if (IS_ERR(t->buffer)) {
3381 char *s;
3382
3383 ret = PTR_ERR(t->buffer);
3384 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3385 : (ret == -ENOSPC) ? ": no space left"
3386 : (ret == -ENOMEM) ? ": memory allocation failed"
3387 : "";
3388 binder_txn_error("cannot allocate buffer%s", s);
3389
3390 return_error_param = PTR_ERR(t->buffer);
3391 return_error = return_error_param == -ESRCH ?
3392 BR_DEAD_REPLY : BR_FAILED_REPLY;
3393 return_error_line = __LINE__;
3394 t->buffer = NULL;
3395 goto err_binder_alloc_buf_failed;
3396 }
3397 if (lsmctx.context) {
3398 int err;
3399 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3400 ALIGN(tr->offsets_size, sizeof(void *)) +
3401 ALIGN(extra_buffers_size, sizeof(void *)) -
3402 ALIGN(lsmctx.len, sizeof(u64));
3403
3404 t->security_ctx = t->buffer->user_data + buf_offset;
3405 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3406 t->buffer, buf_offset,
3407 lsmctx.context, lsmctx.len);
3408 if (err) {
3409 t->security_ctx = 0;
3410 WARN_ON(1);
3411 }
3412 security_release_secctx(&lsmctx);
3413 lsmctx.context = NULL;
3414 }
3415 t->buffer->debug_id = t->debug_id;
3416 t->buffer->transaction = t;
3417 t->buffer->target_node = target_node;
3418 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3419 trace_binder_transaction_alloc_buf(t->buffer);
3420
3421 if (binder_alloc_copy_user_to_buffer(
3422 &target_proc->alloc,
3423 t->buffer,
3424 ALIGN(tr->data_size, sizeof(void *)),
3425 (const void __user *)
3426 (uintptr_t)tr->data.ptr.offsets,
3427 tr->offsets_size)) {
3428 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3429 proc->pid, thread->pid);
3430 return_error = BR_FAILED_REPLY;
3431 return_error_param = -EFAULT;
3432 return_error_line = __LINE__;
3433 goto err_copy_data_failed;
3434 }
3435 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3436 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3437 proc->pid, thread->pid, (u64)tr->offsets_size);
3438 return_error = BR_FAILED_REPLY;
3439 return_error_param = -EINVAL;
3440 return_error_line = __LINE__;
3441 goto err_bad_offset;
3442 }
3443 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3444 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3445 proc->pid, thread->pid,
3446 (u64)extra_buffers_size);
3447 return_error = BR_FAILED_REPLY;
3448 return_error_param = -EINVAL;
3449 return_error_line = __LINE__;
3450 goto err_bad_offset;
3451 }
3452 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3453 buffer_offset = off_start_offset;
3454 off_end_offset = off_start_offset + tr->offsets_size;
3455 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3456 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3457 ALIGN(lsmctx.len, sizeof(u64));
3458 off_min = 0;
3459 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3460 buffer_offset += sizeof(binder_size_t)) {
3461 struct binder_object_header *hdr;
3462 size_t object_size;
3463 struct binder_object object;
3464 binder_size_t object_offset;
3465 binder_size_t copy_size;
3466
3467 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3468 &object_offset,
3469 t->buffer,
3470 buffer_offset,
3471 sizeof(object_offset))) {
3472 binder_txn_error("%d:%d copy offset from buffer failed\n",
3473 thread->pid, proc->pid);
3474 return_error = BR_FAILED_REPLY;
3475 return_error_param = -EINVAL;
3476 return_error_line = __LINE__;
3477 goto err_bad_offset;
3478 }
3479
3480 /*
3481 * Copy the source user buffer up to the next object
3482 * that will be processed.
3483 */
3484 copy_size = object_offset - user_offset;
3485 if (copy_size && (user_offset > object_offset ||
3486 object_offset > tr->data_size ||
3487 binder_alloc_copy_user_to_buffer(
3488 &target_proc->alloc,
3489 t->buffer, user_offset,
3490 user_buffer + user_offset,
3491 copy_size))) {
3492 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3493 proc->pid, thread->pid);
3494 return_error = BR_FAILED_REPLY;
3495 return_error_param = -EFAULT;
3496 return_error_line = __LINE__;
3497 goto err_copy_data_failed;
3498 }
3499 object_size = binder_get_object(target_proc, user_buffer,
3500 t->buffer, object_offset, &object);
3501 if (object_size == 0 || object_offset < off_min) {
3502 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3503 proc->pid, thread->pid,
3504 (u64)object_offset,
3505 (u64)off_min,
3506 (u64)t->buffer->data_size);
3507 return_error = BR_FAILED_REPLY;
3508 return_error_param = -EINVAL;
3509 return_error_line = __LINE__;
3510 goto err_bad_offset;
3511 }
3512 /*
3513 * Set offset to the next buffer fragment to be
3514 * copied
3515 */
3516 user_offset = object_offset + object_size;
3517
3518 hdr = &object.hdr;
3519 off_min = object_offset + object_size;
3520 switch (hdr->type) {
3521 case BINDER_TYPE_BINDER:
3522 case BINDER_TYPE_WEAK_BINDER: {
3523 struct flat_binder_object *fp;
3524
3525 fp = to_flat_binder_object(hdr);
3526 ret = binder_translate_binder(fp, t, thread);
3527
3528 if (ret < 0 ||
3529 binder_alloc_copy_to_buffer(&target_proc->alloc,
3530 t->buffer,
3531 object_offset,
3532 fp, sizeof(*fp))) {
3533 binder_txn_error("%d:%d translate binder failed\n",
3534 thread->pid, proc->pid);
3535 return_error = BR_FAILED_REPLY;
3536 return_error_param = ret;
3537 return_error_line = __LINE__;
3538 goto err_translate_failed;
3539 }
3540 } break;
3541 case BINDER_TYPE_HANDLE:
3542 case BINDER_TYPE_WEAK_HANDLE: {
3543 struct flat_binder_object *fp;
3544
3545 fp = to_flat_binder_object(hdr);
3546 ret = binder_translate_handle(fp, t, thread);
3547 if (ret < 0 ||
3548 binder_alloc_copy_to_buffer(&target_proc->alloc,
3549 t->buffer,
3550 object_offset,
3551 fp, sizeof(*fp))) {
3552 binder_txn_error("%d:%d translate handle failed\n",
3553 thread->pid, proc->pid);
3554 return_error = BR_FAILED_REPLY;
3555 return_error_param = ret;
3556 return_error_line = __LINE__;
3557 goto err_translate_failed;
3558 }
3559 } break;
3560
3561 case BINDER_TYPE_FD: {
3562 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3563 binder_size_t fd_offset = object_offset +
3564 (uintptr_t)&fp->fd - (uintptr_t)fp;
3565 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3566 thread, in_reply_to);
3567
3568 fp->pad_binder = 0;
3569 if (ret < 0 ||
3570 binder_alloc_copy_to_buffer(&target_proc->alloc,
3571 t->buffer,
3572 object_offset,
3573 fp, sizeof(*fp))) {
3574 binder_txn_error("%d:%d translate fd failed\n",
3575 thread->pid, proc->pid);
3576 return_error = BR_FAILED_REPLY;
3577 return_error_param = ret;
3578 return_error_line = __LINE__;
3579 goto err_translate_failed;
3580 }
3581 } break;
3582 case BINDER_TYPE_FDA: {
3583 struct binder_object ptr_object;
3584 binder_size_t parent_offset;
3585 struct binder_object user_object;
3586 size_t user_parent_size;
3587 struct binder_fd_array_object *fda =
3588 to_binder_fd_array_object(hdr);
3589 size_t num_valid = (buffer_offset - off_start_offset) /
3590 sizeof(binder_size_t);
3591 struct binder_buffer_object *parent =
3592 binder_validate_ptr(target_proc, t->buffer,
3593 &ptr_object, fda->parent,
3594 off_start_offset,
3595 &parent_offset,
3596 num_valid);
3597 if (!parent) {
3598 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3599 proc->pid, thread->pid);
3600 return_error = BR_FAILED_REPLY;
3601 return_error_param = -EINVAL;
3602 return_error_line = __LINE__;
3603 goto err_bad_parent;
3604 }
3605 if (!binder_validate_fixup(target_proc, t->buffer,
3606 off_start_offset,
3607 parent_offset,
3608 fda->parent_offset,
3609 last_fixup_obj_off,
3610 last_fixup_min_off)) {
3611 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3612 proc->pid, thread->pid);
3613 return_error = BR_FAILED_REPLY;
3614 return_error_param = -EINVAL;
3615 return_error_line = __LINE__;
3616 goto err_bad_parent;
3617 }
3618 /*
3619 * We need to read the user version of the parent
3620 * object to get the original user offset
3621 */
3622 user_parent_size =
3623 binder_get_object(proc, user_buffer, t->buffer,
3624 parent_offset, &user_object);
3625 if (user_parent_size != sizeof(user_object.bbo)) {
3626 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3627 proc->pid, thread->pid,
3628 user_parent_size,
3629 sizeof(user_object.bbo));
3630 return_error = BR_FAILED_REPLY;
3631 return_error_param = -EINVAL;
3632 return_error_line = __LINE__;
3633 goto err_bad_parent;
3634 }
3635 ret = binder_translate_fd_array(&pf_head, fda,
3636 user_buffer, parent,
3637 &user_object.bbo, t,
3638 thread, in_reply_to);
3639 if (!ret)
3640 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3641 t->buffer,
3642 object_offset,
3643 fda, sizeof(*fda));
3644 if (ret) {
3645 binder_txn_error("%d:%d translate fd array failed\n",
3646 thread->pid, proc->pid);
3647 return_error = BR_FAILED_REPLY;
3648 return_error_param = ret > 0 ? -EINVAL : ret;
3649 return_error_line = __LINE__;
3650 goto err_translate_failed;
3651 }
3652 last_fixup_obj_off = parent_offset;
3653 last_fixup_min_off =
3654 fda->parent_offset + sizeof(u32) * fda->num_fds;
3655 } break;
3656 case BINDER_TYPE_PTR: {
3657 struct binder_buffer_object *bp =
3658 to_binder_buffer_object(hdr);
3659 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3660 size_t num_valid;
3661
3662 if (bp->length > buf_left) {
3663 binder_user_error("%d:%d got transaction with too large buffer\n",
3664 proc->pid, thread->pid);
3665 return_error = BR_FAILED_REPLY;
3666 return_error_param = -EINVAL;
3667 return_error_line = __LINE__;
3668 goto err_bad_offset;
3669 }
3670 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3671 (const void __user *)(uintptr_t)bp->buffer,
3672 bp->length);
3673 if (ret) {
3674 binder_txn_error("%d:%d deferred copy failed\n",
3675 thread->pid, proc->pid);
3676 return_error = BR_FAILED_REPLY;
3677 return_error_param = ret;
3678 return_error_line = __LINE__;
3679 goto err_translate_failed;
3680 }
3681 /* Fixup buffer pointer to target proc address space */
3682 bp->buffer = t->buffer->user_data + sg_buf_offset;
3683 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3684
3685 num_valid = (buffer_offset - off_start_offset) /
3686 sizeof(binder_size_t);
3687 ret = binder_fixup_parent(&pf_head, t,
3688 thread, bp,
3689 off_start_offset,
3690 num_valid,
3691 last_fixup_obj_off,
3692 last_fixup_min_off);
3693 if (ret < 0 ||
3694 binder_alloc_copy_to_buffer(&target_proc->alloc,
3695 t->buffer,
3696 object_offset,
3697 bp, sizeof(*bp))) {
3698 binder_txn_error("%d:%d failed to fixup parent\n",
3699 thread->pid, proc->pid);
3700 return_error = BR_FAILED_REPLY;
3701 return_error_param = ret;
3702 return_error_line = __LINE__;
3703 goto err_translate_failed;
3704 }
3705 last_fixup_obj_off = object_offset;
3706 last_fixup_min_off = 0;
3707 } break;
3708 default:
3709 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3710 proc->pid, thread->pid, hdr->type);
3711 return_error = BR_FAILED_REPLY;
3712 return_error_param = -EINVAL;
3713 return_error_line = __LINE__;
3714 goto err_bad_object_type;
3715 }
3716 }
3717 /* Done processing objects, copy the rest of the buffer */
3718 if (binder_alloc_copy_user_to_buffer(
3719 &target_proc->alloc,
3720 t->buffer, user_offset,
3721 user_buffer + user_offset,
3722 tr->data_size - user_offset)) {
3723 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3724 proc->pid, thread->pid);
3725 return_error = BR_FAILED_REPLY;
3726 return_error_param = -EFAULT;
3727 return_error_line = __LINE__;
3728 goto err_copy_data_failed;
3729 }
3730
3731 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3732 &sgc_head, &pf_head);
3733 if (ret) {
3734 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3735 proc->pid, thread->pid);
3736 return_error = BR_FAILED_REPLY;
3737 return_error_param = ret;
3738 return_error_line = __LINE__;
3739 goto err_copy_data_failed;
3740 }
3741 if (t->buffer->oneway_spam_suspect) {
3742 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3743 binder_netlink_report(proc, t, tr->data_size,
3744 BR_ONEWAY_SPAM_SUSPECT);
3745 } else {
3746 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3747 }
3748
3749 if (reply) {
3750 binder_enqueue_thread_work(thread, tcomplete);
3751 binder_inner_proc_lock(target_proc);
3752 if (target_thread->is_dead) {
3753 return_error = BR_DEAD_REPLY;
3754 binder_inner_proc_unlock(target_proc);
3755 goto err_dead_proc_or_thread;
3756 }
3757 BUG_ON(t->buffer->async_transaction != 0);
3758 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3759 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3760 target_proc->outstanding_txns++;
3761 binder_inner_proc_unlock(target_proc);
3762 wake_up_interruptible_sync(&target_thread->wait);
3763 binder_free_transaction(in_reply_to);
3764 } else if (!(t->flags & TF_ONE_WAY)) {
3765 BUG_ON(t->buffer->async_transaction != 0);
3766 binder_inner_proc_lock(proc);
3767 /*
3768 * Defer the TRANSACTION_COMPLETE, so we don't return to
3769 * userspace immediately; this allows the target process to
3770 * immediately start processing this transaction, reducing
3771 * latency. We will then return the TRANSACTION_COMPLETE when
3772 * the target replies (or there is an error).
3773 */
3774 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3775 t->from_parent = thread->transaction_stack;
3776 thread->transaction_stack = t;
3777 binder_inner_proc_unlock(proc);
3778 return_error = binder_proc_transaction(t,
3779 target_proc, target_thread);
3780 if (return_error) {
3781 binder_inner_proc_lock(proc);
3782 binder_pop_transaction_ilocked(thread, t);
3783 binder_inner_proc_unlock(proc);
3784 goto err_dead_proc_or_thread;
3785 }
3786 } else {
3787 /*
3788 * Make a transaction copy. It is not safe to access 't' after
3789 * binder_proc_transaction() reported a pending frozen. The
3790 * target could thaw and consume the transaction at any point.
3791 * Instead, use a safe 't_copy' for binder_netlink_report().
3792 */
3793 struct binder_transaction t_copy = *t;
3794
3795 BUG_ON(target_node == NULL);
3796 BUG_ON(t->buffer->async_transaction != 1);
3797 return_error = binder_proc_transaction(t, target_proc, NULL);
3798 /*
3799 * Let the caller know when async transaction reaches a frozen
3800 * process and is put in a pending queue, waiting for the target
3801 * process to be unfrozen.
3802 */
3803 if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
3804 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3805 binder_netlink_report(proc, &t_copy, tr->data_size,
3806 return_error);
3807 }
3808 binder_enqueue_thread_work(thread, tcomplete);
3809 if (return_error &&
3810 return_error != BR_TRANSACTION_PENDING_FROZEN)
3811 goto err_dead_proc_or_thread;
3812 }
3813 if (target_thread)
3814 binder_thread_dec_tmpref(target_thread);
3815 binder_proc_dec_tmpref(target_proc);
3816 if (target_node)
3817 binder_dec_node_tmpref(target_node);
3818 /*
3819 * write barrier to synchronize with initialization
3820 * of log entry
3821 */
3822 smp_wmb();
3823 WRITE_ONCE(e->debug_id_done, t_debug_id);
3824 return;
3825
3826err_dead_proc_or_thread:
3827 binder_txn_error("%d:%d dead process or thread\n",
3828 thread->pid, proc->pid);
3829 return_error_line = __LINE__;
3830 binder_dequeue_work(proc, tcomplete);
3831err_translate_failed:
3832err_bad_object_type:
3833err_bad_offset:
3834err_bad_parent:
3835err_copy_data_failed:
3836 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3837 binder_free_txn_fixups(t);
3838 trace_binder_transaction_failed_buffer_release(t->buffer);
3839 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3840 buffer_offset, true);
3841 if (target_node)
3842 binder_dec_node_tmpref(target_node);
3843 target_node = NULL;
3844 t->buffer->transaction = NULL;
3845 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3846err_binder_alloc_buf_failed:
3847err_bad_extra_size:
3848 if (lsmctx.context)
3849 security_release_secctx(&lsmctx);
3850err_get_secctx_failed:
3851 kfree(tcomplete);
3852 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3853err_alloc_tcomplete_failed:
3854 if (trace_binder_txn_latency_free_enabled())
3855 binder_txn_latency_free(t);
3856err_bad_todo_list:
3857err_bad_call_stack:
3858err_empty_call_stack:
3859err_dead_binder:
3860err_invalid_target_handle:
3861 if (target_node) {
3862 binder_dec_node(target_node, 1, 0);
3863 binder_dec_node_tmpref(target_node);
3864 }
3865
3866 binder_netlink_report(proc, t, tr->data_size, return_error);
3867 kfree(t);
3868 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3869err_alloc_t_failed:
3870
3871 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3872 "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3873 proc->pid, thread->pid, reply ? "reply" :
3874 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3875 target_proc ? target_proc->pid : 0,
3876 target_thread ? target_thread->pid : 0,
3877 t_debug_id, return_error, return_error_param,
3878 tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3879 return_error_line);
3880
3881 if (target_thread)
3882 binder_thread_dec_tmpref(target_thread);
3883 if (target_proc)
3884 binder_proc_dec_tmpref(target_proc);
3885
3886 {
3887 struct binder_transaction_log_entry *fe;
3888
3889 e->return_error = return_error;
3890 e->return_error_param = return_error_param;
3891 e->return_error_line = return_error_line;
3892 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3893 *fe = *e;
3894 /*
3895 * write barrier to synchronize with initialization
3896 * of log entry
3897 */
3898 smp_wmb();
3899 WRITE_ONCE(e->debug_id_done, t_debug_id);
3900 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3901 }
3902
3903 BUG_ON(thread->return_error.cmd != BR_OK);
3904 if (in_reply_to) {
3905 binder_set_txn_from_error(in_reply_to, t_debug_id,
3906 return_error, return_error_param);
3907 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3908 binder_enqueue_thread_work(thread, &thread->return_error.work);
3909 binder_send_failed_reply(in_reply_to, return_error);
3910 } else {
3911 binder_inner_proc_lock(proc);
3912 binder_set_extended_error(&thread->ee, t_debug_id,
3913 return_error, return_error_param);
3914 binder_inner_proc_unlock(proc);
3915 thread->return_error.cmd = return_error;
3916 binder_enqueue_thread_work(thread, &thread->return_error.work);
3917 }
3918}
3919
3920static int
3921binder_request_freeze_notification(struct binder_proc *proc,
3922 struct binder_thread *thread,
3923 struct binder_handle_cookie *handle_cookie)
3924{
3925 struct binder_ref_freeze *freeze;
3926 struct binder_ref *ref;
3927
3928 freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3929 if (!freeze)
3930 return -ENOMEM;
3931 binder_proc_lock(proc);
3932 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3933 if (!ref) {
3934 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3935 proc->pid, thread->pid, handle_cookie->handle);
3936 binder_proc_unlock(proc);
3937 kfree(freeze);
3938 return -EINVAL;
3939 }
3940
3941 binder_node_lock(ref->node);
3942 if (ref->freeze) {
3943 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3944 proc->pid, thread->pid);
3945 binder_node_unlock(ref->node);
3946 binder_proc_unlock(proc);
3947 kfree(freeze);
3948 return -EINVAL;
3949 }
3950
3951 binder_stats_created(BINDER_STAT_FREEZE);
3952 INIT_LIST_HEAD(&freeze->work.entry);
3953 freeze->cookie = handle_cookie->cookie;
3954 freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3955 ref->freeze = freeze;
3956
3957 if (ref->node->proc) {
3958 binder_inner_proc_lock(ref->node->proc);
3959 freeze->is_frozen = ref->node->proc->is_frozen;
3960 binder_inner_proc_unlock(ref->node->proc);
3961
3962 binder_inner_proc_lock(proc);
3963 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3964 binder_wakeup_proc_ilocked(proc);
3965 binder_inner_proc_unlock(proc);
3966 }
3967
3968 binder_node_unlock(ref->node);
3969 binder_proc_unlock(proc);
3970 return 0;
3971}
3972
3973static int
3974binder_clear_freeze_notification(struct binder_proc *proc,
3975 struct binder_thread *thread,
3976 struct binder_handle_cookie *handle_cookie)
3977{
3978 struct binder_ref_freeze *freeze;
3979 struct binder_ref *ref;
3980
3981 binder_proc_lock(proc);
3982 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3983 if (!ref) {
3984 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3985 proc->pid, thread->pid, handle_cookie->handle);
3986 binder_proc_unlock(proc);
3987 return -EINVAL;
3988 }
3989
3990 binder_node_lock(ref->node);
3991
3992 if (!ref->freeze) {
3993 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3994 proc->pid, thread->pid);
3995 binder_node_unlock(ref->node);
3996 binder_proc_unlock(proc);
3997 return -EINVAL;
3998 }
3999 freeze = ref->freeze;
4000 binder_inner_proc_lock(proc);
4001 if (freeze->cookie != handle_cookie->cookie) {
4002 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
4003 proc->pid, thread->pid, (u64)freeze->cookie,
4004 (u64)handle_cookie->cookie);
4005 binder_inner_proc_unlock(proc);
4006 binder_node_unlock(ref->node);
4007 binder_proc_unlock(proc);
4008 return -EINVAL;
4009 }
4010 ref->freeze = NULL;
4011 /*
4012 * Take the existing freeze object and overwrite its work type. There are three cases here:
4013 * 1. No pending notification. In this case just add the work to the queue.
4014 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
4015 * should resend with the new work type.
4016 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
4017 * needs to be done here.
4018 */
4019 freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
4020 if (list_empty(&freeze->work.entry)) {
4021 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4022 binder_wakeup_proc_ilocked(proc);
4023 } else if (freeze->sent) {
4024 freeze->resend = true;
4025 }
4026 binder_inner_proc_unlock(proc);
4027 binder_node_unlock(ref->node);
4028 binder_proc_unlock(proc);
4029 return 0;
4030}
4031
4032static int
4033binder_freeze_notification_done(struct binder_proc *proc,
4034 struct binder_thread *thread,
4035 binder_uintptr_t cookie)
4036{
4037 struct binder_ref_freeze *freeze = NULL;
4038 struct binder_work *w;
4039
4040 binder_inner_proc_lock(proc);
4041 list_for_each_entry(w, &proc->delivered_freeze, entry) {
4042 struct binder_ref_freeze *tmp_freeze =
4043 container_of(w, struct binder_ref_freeze, work);
4044
4045 if (tmp_freeze->cookie == cookie) {
4046 freeze = tmp_freeze;
4047 break;
4048 }
4049 }
4050 if (!freeze) {
4051 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
4052 proc->pid, thread->pid, (u64)cookie);
4053 binder_inner_proc_unlock(proc);
4054 return -EINVAL;
4055 }
4056 binder_dequeue_work_ilocked(&freeze->work);
4057 freeze->sent = false;
4058 if (freeze->resend) {
4059 freeze->resend = false;
4060 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4061 binder_wakeup_proc_ilocked(proc);
4062 }
4063 binder_inner_proc_unlock(proc);
4064 return 0;
4065}
4066
4067/**
4068 * binder_free_buf() - free the specified buffer
4069 * @proc: binder proc that owns buffer
4070 * @thread: binder thread performing the buffer release
4071 * @buffer: buffer to be freed
4072 * @is_failure: failed to send transaction
4073 *
4074 * If the buffer is for an async transaction, enqueue the next async
4075 * transaction from the node.
4076 *
4077 * Cleanup the buffer and free it.
4078 */
4079static void
4080binder_free_buf(struct binder_proc *proc,
4081 struct binder_thread *thread,
4082 struct binder_buffer *buffer, bool is_failure)
4083{
4084 binder_inner_proc_lock(proc);
4085 if (buffer->transaction) {
4086 buffer->transaction->buffer = NULL;
4087 buffer->transaction = NULL;
4088 }
4089 binder_inner_proc_unlock(proc);
4090 if (buffer->async_transaction && buffer->target_node) {
4091 struct binder_node *buf_node;
4092 struct binder_work *w;
4093
4094 buf_node = buffer->target_node;
4095 binder_node_inner_lock(buf_node);
4096 BUG_ON(!buf_node->has_async_transaction);
4097 BUG_ON(buf_node->proc != proc);
4098 w = binder_dequeue_work_head_ilocked(
4099 &buf_node->async_todo);
4100 if (!w) {
4101 buf_node->has_async_transaction = false;
4102 } else {
4103 binder_enqueue_work_ilocked(
4104 w, &proc->todo);
4105 binder_wakeup_proc_ilocked(proc);
4106 }
4107 binder_node_inner_unlock(buf_node);
4108 }
4109 trace_binder_transaction_buffer_release(buffer);
4110 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4111 binder_alloc_free_buf(&proc->alloc, buffer);
4112}
4113
4114static int binder_thread_write(struct binder_proc *proc,
4115 struct binder_thread *thread,
4116 binder_uintptr_t binder_buffer, size_t size,
4117 binder_size_t *consumed)
4118{
4119 uint32_t cmd;
4120 struct binder_context *context = proc->context;
4121 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4122 void __user *ptr = buffer + *consumed;
4123 void __user *end = buffer + size;
4124
4125 while (ptr < end && thread->return_error.cmd == BR_OK) {
4126 int ret;
4127
4128 if (get_user(cmd, (uint32_t __user *)ptr))
4129 return -EFAULT;
4130 ptr += sizeof(uint32_t);
4131 trace_binder_command(cmd);
4132 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4133 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4134 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4135 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4136 }
4137 switch (cmd) {
4138 case BC_INCREFS:
4139 case BC_ACQUIRE:
4140 case BC_RELEASE:
4141 case BC_DECREFS: {
4142 uint32_t target;
4143 const char *debug_string;
4144 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4145 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4146 struct binder_ref_data rdata;
4147
4148 if (get_user(target, (uint32_t __user *)ptr))
4149 return -EFAULT;
4150
4151 ptr += sizeof(uint32_t);
4152 ret = -1;
4153 if (increment && !target) {
4154 struct binder_node *ctx_mgr_node;
4155
4156 mutex_lock(&context->context_mgr_node_lock);
4157 ctx_mgr_node = context->binder_context_mgr_node;
4158 if (ctx_mgr_node) {
4159 if (ctx_mgr_node->proc == proc) {
4160 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4161 proc->pid, thread->pid);
4162 mutex_unlock(&context->context_mgr_node_lock);
4163 return -EINVAL;
4164 }
4165 ret = binder_inc_ref_for_node(
4166 proc, ctx_mgr_node,
4167 strong, NULL, &rdata);
4168 }
4169 mutex_unlock(&context->context_mgr_node_lock);
4170 }
4171 if (ret)
4172 ret = binder_update_ref_for_handle(
4173 proc, target, increment, strong,
4174 &rdata);
4175 if (!ret && rdata.desc != target) {
4176 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4177 proc->pid, thread->pid,
4178 target, rdata.desc);
4179 }
4180 switch (cmd) {
4181 case BC_INCREFS:
4182 debug_string = "IncRefs";
4183 break;
4184 case BC_ACQUIRE:
4185 debug_string = "Acquire";
4186 break;
4187 case BC_RELEASE:
4188 debug_string = "Release";
4189 break;
4190 case BC_DECREFS:
4191 default:
4192 debug_string = "DecRefs";
4193 break;
4194 }
4195 if (ret) {
4196 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4197 proc->pid, thread->pid, debug_string,
4198 strong, target, ret);
4199 break;
4200 }
4201 binder_debug(BINDER_DEBUG_USER_REFS,
4202 "%d:%d %s ref %d desc %d s %d w %d\n",
4203 proc->pid, thread->pid, debug_string,
4204 rdata.debug_id, rdata.desc, rdata.strong,
4205 rdata.weak);
4206 break;
4207 }
4208 case BC_INCREFS_DONE:
4209 case BC_ACQUIRE_DONE: {
4210 binder_uintptr_t node_ptr;
4211 binder_uintptr_t cookie;
4212 struct binder_node *node;
4213 bool free_node;
4214
4215 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4216 return -EFAULT;
4217 ptr += sizeof(binder_uintptr_t);
4218 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4219 return -EFAULT;
4220 ptr += sizeof(binder_uintptr_t);
4221 node = binder_get_node(proc, node_ptr);
4222 if (node == NULL) {
4223 binder_user_error("%d:%d %s u%016llx no match\n",
4224 proc->pid, thread->pid,
4225 cmd == BC_INCREFS_DONE ?
4226 "BC_INCREFS_DONE" :
4227 "BC_ACQUIRE_DONE",
4228 (u64)node_ptr);
4229 break;
4230 }
4231 if (cookie != node->cookie) {
4232 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4233 proc->pid, thread->pid,
4234 cmd == BC_INCREFS_DONE ?
4235 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4236 (u64)node_ptr, node->debug_id,
4237 (u64)cookie, (u64)node->cookie);
4238 binder_put_node(node);
4239 break;
4240 }
4241 binder_node_inner_lock(node);
4242 if (cmd == BC_ACQUIRE_DONE) {
4243 if (node->pending_strong_ref == 0) {
4244 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4245 proc->pid, thread->pid,
4246 node->debug_id);
4247 binder_node_inner_unlock(node);
4248 binder_put_node(node);
4249 break;
4250 }
4251 node->pending_strong_ref = 0;
4252 } else {
4253 if (node->pending_weak_ref == 0) {
4254 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4255 proc->pid, thread->pid,
4256 node->debug_id);
4257 binder_node_inner_unlock(node);
4258 binder_put_node(node);
4259 break;
4260 }
4261 node->pending_weak_ref = 0;
4262 }
4263 free_node = binder_dec_node_nilocked(node,
4264 cmd == BC_ACQUIRE_DONE, 0);
4265 WARN_ON(free_node);
4266 binder_debug(BINDER_DEBUG_USER_REFS,
4267 "%d:%d %s node %d ls %d lw %d tr %d\n",
4268 proc->pid, thread->pid,
4269 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4270 node->debug_id, node->local_strong_refs,
4271 node->local_weak_refs, node->tmp_refs);
4272 binder_node_inner_unlock(node);
4273 binder_put_node(node);
4274 break;
4275 }
4276 case BC_ATTEMPT_ACQUIRE:
4277 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4278 return -EINVAL;
4279 case BC_ACQUIRE_RESULT:
4280 pr_err("BC_ACQUIRE_RESULT not supported\n");
4281 return -EINVAL;
4282
4283 case BC_FREE_BUFFER: {
4284 binder_uintptr_t data_ptr;
4285 struct binder_buffer *buffer;
4286
4287 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4288 return -EFAULT;
4289 ptr += sizeof(binder_uintptr_t);
4290
4291 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4292 data_ptr);
4293 if (IS_ERR_OR_NULL(buffer)) {
4294 if (PTR_ERR(buffer) == -EPERM) {
4295 binder_user_error(
4296 "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4297 proc->pid, thread->pid,
4298 (unsigned long)data_ptr - proc->alloc.vm_start);
4299 } else {
4300 binder_user_error(
4301 "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4302 proc->pid, thread->pid,
4303 (unsigned long)data_ptr - proc->alloc.vm_start);
4304 }
4305 break;
4306 }
4307 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4308 "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4309 proc->pid, thread->pid,
4310 (unsigned long)data_ptr - proc->alloc.vm_start,
4311 buffer->debug_id,
4312 buffer->transaction ? "active" : "finished");
4313 binder_free_buf(proc, thread, buffer, false);
4314 break;
4315 }
4316
4317 case BC_TRANSACTION_SG:
4318 case BC_REPLY_SG: {
4319 struct binder_transaction_data_sg tr;
4320
4321 if (copy_from_user(&tr, ptr, sizeof(tr)))
4322 return -EFAULT;
4323 ptr += sizeof(tr);
4324 binder_transaction(proc, thread, &tr.transaction_data,
4325 cmd == BC_REPLY_SG, tr.buffers_size);
4326 break;
4327 }
4328 case BC_TRANSACTION:
4329 case BC_REPLY: {
4330 struct binder_transaction_data tr;
4331
4332 if (copy_from_user(&tr, ptr, sizeof(tr)))
4333 return -EFAULT;
4334 ptr += sizeof(tr);
4335 binder_transaction(proc, thread, &tr,
4336 cmd == BC_REPLY, 0);
4337 break;
4338 }
4339
4340 case BC_REGISTER_LOOPER:
4341 binder_debug(BINDER_DEBUG_THREADS,
4342 "%d:%d BC_REGISTER_LOOPER\n",
4343 proc->pid, thread->pid);
4344 binder_inner_proc_lock(proc);
4345 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4346 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4347 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4348 proc->pid, thread->pid);
4349 } else if (proc->requested_threads == 0) {
4350 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4351 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4352 proc->pid, thread->pid);
4353 } else {
4354 proc->requested_threads--;
4355 proc->requested_threads_started++;
4356 }
4357 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4358 binder_inner_proc_unlock(proc);
4359 break;
4360 case BC_ENTER_LOOPER:
4361 binder_debug(BINDER_DEBUG_THREADS,
4362 "%d:%d BC_ENTER_LOOPER\n",
4363 proc->pid, thread->pid);
4364 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4365 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4366 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4367 proc->pid, thread->pid);
4368 }
4369 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4370 break;
4371 case BC_EXIT_LOOPER:
4372 binder_debug(BINDER_DEBUG_THREADS,
4373 "%d:%d BC_EXIT_LOOPER\n",
4374 proc->pid, thread->pid);
4375 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4376 break;
4377
4378 case BC_REQUEST_DEATH_NOTIFICATION:
4379 case BC_CLEAR_DEATH_NOTIFICATION: {
4380 uint32_t target;
4381 binder_uintptr_t cookie;
4382 struct binder_ref *ref;
4383 struct binder_ref_death *death = NULL;
4384
4385 if (get_user(target, (uint32_t __user *)ptr))
4386 return -EFAULT;
4387 ptr += sizeof(uint32_t);
4388 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4389 return -EFAULT;
4390 ptr += sizeof(binder_uintptr_t);
4391 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4392 /*
4393 * Allocate memory for death notification
4394 * before taking lock
4395 */
4396 death = kzalloc(sizeof(*death), GFP_KERNEL);
4397 if (death == NULL) {
4398 WARN_ON(thread->return_error.cmd !=
4399 BR_OK);
4400 thread->return_error.cmd = BR_ERROR;
4401 binder_enqueue_thread_work(
4402 thread,
4403 &thread->return_error.work);
4404 binder_debug(
4405 BINDER_DEBUG_FAILED_TRANSACTION,
4406 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4407 proc->pid, thread->pid);
4408 break;
4409 }
4410 }
4411 binder_proc_lock(proc);
4412 ref = binder_get_ref_olocked(proc, target, false);
4413 if (ref == NULL) {
4414 binder_user_error("%d:%d %s invalid ref %d\n",
4415 proc->pid, thread->pid,
4416 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4417 "BC_REQUEST_DEATH_NOTIFICATION" :
4418 "BC_CLEAR_DEATH_NOTIFICATION",
4419 target);
4420 binder_proc_unlock(proc);
4421 kfree(death);
4422 break;
4423 }
4424
4425 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4426 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4427 proc->pid, thread->pid,
4428 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4429 "BC_REQUEST_DEATH_NOTIFICATION" :
4430 "BC_CLEAR_DEATH_NOTIFICATION",
4431 (u64)cookie, ref->data.debug_id,
4432 ref->data.desc, ref->data.strong,
4433 ref->data.weak, ref->node->debug_id);
4434
4435 binder_node_lock(ref->node);
4436 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4437 if (ref->death) {
4438 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4439 proc->pid, thread->pid);
4440 binder_node_unlock(ref->node);
4441 binder_proc_unlock(proc);
4442 kfree(death);
4443 break;
4444 }
4445 binder_stats_created(BINDER_STAT_DEATH);
4446 INIT_LIST_HEAD(&death->work.entry);
4447 death->cookie = cookie;
4448 ref->death = death;
4449 if (ref->node->proc == NULL) {
4450 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4451
4452 binder_inner_proc_lock(proc);
4453 binder_enqueue_work_ilocked(
4454 &ref->death->work, &proc->todo);
4455 binder_wakeup_proc_ilocked(proc);
4456 binder_inner_proc_unlock(proc);
4457 }
4458 } else {
4459 if (ref->death == NULL) {
4460 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4461 proc->pid, thread->pid);
4462 binder_node_unlock(ref->node);
4463 binder_proc_unlock(proc);
4464 break;
4465 }
4466 death = ref->death;
4467 if (death->cookie != cookie) {
4468 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4469 proc->pid, thread->pid,
4470 (u64)death->cookie,
4471 (u64)cookie);
4472 binder_node_unlock(ref->node);
4473 binder_proc_unlock(proc);
4474 break;
4475 }
4476 ref->death = NULL;
4477 binder_inner_proc_lock(proc);
4478 if (list_empty(&death->work.entry)) {
4479 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4480 if (thread->looper &
4481 (BINDER_LOOPER_STATE_REGISTERED |
4482 BINDER_LOOPER_STATE_ENTERED))
4483 binder_enqueue_thread_work_ilocked(
4484 thread,
4485 &death->work);
4486 else {
4487 binder_enqueue_work_ilocked(
4488 &death->work,
4489 &proc->todo);
4490 binder_wakeup_proc_ilocked(
4491 proc);
4492 }
4493 } else {
4494 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4495 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4496 }
4497 binder_inner_proc_unlock(proc);
4498 }
4499 binder_node_unlock(ref->node);
4500 binder_proc_unlock(proc);
4501 } break;
4502 case BC_DEAD_BINDER_DONE: {
4503 struct binder_work *w;
4504 binder_uintptr_t cookie;
4505 struct binder_ref_death *death = NULL;
4506
4507 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4508 return -EFAULT;
4509
4510 ptr += sizeof(cookie);
4511 binder_inner_proc_lock(proc);
4512 list_for_each_entry(w, &proc->delivered_death,
4513 entry) {
4514 struct binder_ref_death *tmp_death =
4515 container_of(w,
4516 struct binder_ref_death,
4517 work);
4518
4519 if (tmp_death->cookie == cookie) {
4520 death = tmp_death;
4521 break;
4522 }
4523 }
4524 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4525 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4526 proc->pid, thread->pid, (u64)cookie,
4527 death);
4528 if (death == NULL) {
4529 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4530 proc->pid, thread->pid, (u64)cookie);
4531 binder_inner_proc_unlock(proc);
4532 break;
4533 }
4534 binder_dequeue_work_ilocked(&death->work);
4535 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4536 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4537 if (thread->looper &
4538 (BINDER_LOOPER_STATE_REGISTERED |
4539 BINDER_LOOPER_STATE_ENTERED))
4540 binder_enqueue_thread_work_ilocked(
4541 thread, &death->work);
4542 else {
4543 binder_enqueue_work_ilocked(
4544 &death->work,
4545 &proc->todo);
4546 binder_wakeup_proc_ilocked(proc);
4547 }
4548 }
4549 binder_inner_proc_unlock(proc);
4550 } break;
4551
4552 case BC_REQUEST_FREEZE_NOTIFICATION: {
4553 struct binder_handle_cookie handle_cookie;
4554 int error;
4555
4556 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4557 return -EFAULT;
4558 ptr += sizeof(handle_cookie);
4559 error = binder_request_freeze_notification(proc, thread,
4560 &handle_cookie);
4561 if (error)
4562 return error;
4563 } break;
4564
4565 case BC_CLEAR_FREEZE_NOTIFICATION: {
4566 struct binder_handle_cookie handle_cookie;
4567 int error;
4568
4569 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4570 return -EFAULT;
4571 ptr += sizeof(handle_cookie);
4572 error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4573 if (error)
4574 return error;
4575 } break;
4576
4577 case BC_FREEZE_NOTIFICATION_DONE: {
4578 binder_uintptr_t cookie;
4579 int error;
4580
4581 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4582 return -EFAULT;
4583
4584 ptr += sizeof(cookie);
4585 error = binder_freeze_notification_done(proc, thread, cookie);
4586 if (error)
4587 return error;
4588 } break;
4589
4590 default:
4591 pr_err("%d:%d unknown command %u\n",
4592 proc->pid, thread->pid, cmd);
4593 return -EINVAL;
4594 }
4595 *consumed = ptr - buffer;
4596 }
4597 return 0;
4598}
4599
4600static void binder_stat_br(struct binder_proc *proc,
4601 struct binder_thread *thread, uint32_t cmd)
4602{
4603 trace_binder_return(cmd);
4604 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4605 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4606 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4607 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4608 }
4609}
4610
4611static int binder_put_node_cmd(struct binder_proc *proc,
4612 struct binder_thread *thread,
4613 void __user **ptrp,
4614 binder_uintptr_t node_ptr,
4615 binder_uintptr_t node_cookie,
4616 int node_debug_id,
4617 uint32_t cmd, const char *cmd_name)
4618{
4619 void __user *ptr = *ptrp;
4620
4621 if (put_user(cmd, (uint32_t __user *)ptr))
4622 return -EFAULT;
4623 ptr += sizeof(uint32_t);
4624
4625 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4626 return -EFAULT;
4627 ptr += sizeof(binder_uintptr_t);
4628
4629 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4630 return -EFAULT;
4631 ptr += sizeof(binder_uintptr_t);
4632
4633 binder_stat_br(proc, thread, cmd);
4634 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4635 proc->pid, thread->pid, cmd_name, node_debug_id,
4636 (u64)node_ptr, (u64)node_cookie);
4637
4638 *ptrp = ptr;
4639 return 0;
4640}
4641
4642static int binder_wait_for_work(struct binder_thread *thread,
4643 bool do_proc_work)
4644{
4645 DEFINE_WAIT(wait);
4646 struct binder_proc *proc = thread->proc;
4647 int ret = 0;
4648
4649 binder_inner_proc_lock(proc);
4650 for (;;) {
4651 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4652 if (binder_has_work_ilocked(thread, do_proc_work))
4653 break;
4654 if (do_proc_work)
4655 list_add(&thread->waiting_thread_node,
4656 &proc->waiting_threads);
4657 binder_inner_proc_unlock(proc);
4658 schedule();
4659 binder_inner_proc_lock(proc);
4660 list_del_init(&thread->waiting_thread_node);
4661 if (signal_pending(current)) {
4662 ret = -EINTR;
4663 break;
4664 }
4665 }
4666 finish_wait(&thread->wait, &wait);
4667 binder_inner_proc_unlock(proc);
4668
4669 return ret;
4670}
4671
4672/**
4673 * binder_apply_fd_fixups() - finish fd translation
4674 * @proc: binder_proc associated @t->buffer
4675 * @t: binder transaction with list of fd fixups
4676 *
4677 * Now that we are in the context of the transaction target
4678 * process, we can allocate and install fds. Process the
4679 * list of fds to translate and fixup the buffer with the
4680 * new fds first and only then install the files.
4681 *
4682 * If we fail to allocate an fd, skip the install and release
4683 * any fds that have already been allocated.
4684 *
4685 * Return: 0 on success, a negative errno code on failure.
4686 */
4687static int binder_apply_fd_fixups(struct binder_proc *proc,
4688 struct binder_transaction *t)
4689{
4690 struct binder_txn_fd_fixup *fixup, *tmp;
4691 int ret = 0;
4692
4693 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4694 int fd = get_unused_fd_flags(O_CLOEXEC);
4695
4696 if (fd < 0) {
4697 binder_debug(BINDER_DEBUG_TRANSACTION,
4698 "failed fd fixup txn %d fd %d\n",
4699 t->debug_id, fd);
4700 ret = -ENOMEM;
4701 goto err;
4702 }
4703 binder_debug(BINDER_DEBUG_TRANSACTION,
4704 "fd fixup txn %d fd %d\n",
4705 t->debug_id, fd);
4706 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4707 fixup->target_fd = fd;
4708 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4709 fixup->offset, &fd,
4710 sizeof(u32))) {
4711 ret = -EINVAL;
4712 goto err;
4713 }
4714 }
4715 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4716 fd_install(fixup->target_fd, fixup->file);
4717 list_del(&fixup->fixup_entry);
4718 kfree(fixup);
4719 }
4720
4721 return ret;
4722
4723err:
4724 binder_free_txn_fixups(t);
4725 return ret;
4726}
4727
4728static int binder_thread_read(struct binder_proc *proc,
4729 struct binder_thread *thread,
4730 binder_uintptr_t binder_buffer, size_t size,
4731 binder_size_t *consumed, int non_block)
4732{
4733 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4734 void __user *ptr = buffer + *consumed;
4735 void __user *end = buffer + size;
4736
4737 int ret = 0;
4738 int wait_for_proc_work;
4739
4740 if (*consumed == 0) {
4741 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4742 return -EFAULT;
4743 ptr += sizeof(uint32_t);
4744 }
4745
4746retry:
4747 binder_inner_proc_lock(proc);
4748 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4749 binder_inner_proc_unlock(proc);
4750
4751 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4752
4753 trace_binder_wait_for_work(wait_for_proc_work,
4754 !!thread->transaction_stack,
4755 !binder_worklist_empty(proc, &thread->todo));
4756 if (wait_for_proc_work) {
4757 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4758 BINDER_LOOPER_STATE_ENTERED))) {
4759 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4760 proc->pid, thread->pid, thread->looper);
4761 wait_event_interruptible(binder_user_error_wait,
4762 binder_stop_on_user_error < 2);
4763 }
4764 binder_set_nice(proc->default_priority);
4765 }
4766
4767 if (non_block) {
4768 if (!binder_has_work(thread, wait_for_proc_work))
4769 ret = -EAGAIN;
4770 } else {
4771 ret = binder_wait_for_work(thread, wait_for_proc_work);
4772 }
4773
4774 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4775
4776 if (ret)
4777 return ret;
4778
4779 while (1) {
4780 uint32_t cmd;
4781 struct binder_transaction_data_secctx tr;
4782 struct binder_transaction_data *trd = &tr.transaction_data;
4783 struct binder_work *w = NULL;
4784 struct list_head *list = NULL;
4785 struct binder_transaction *t = NULL;
4786 struct binder_thread *t_from;
4787 size_t trsize = sizeof(*trd);
4788
4789 binder_inner_proc_lock(proc);
4790 if (!binder_worklist_empty_ilocked(&thread->todo))
4791 list = &thread->todo;
4792 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4793 wait_for_proc_work)
4794 list = &proc->todo;
4795 else {
4796 binder_inner_proc_unlock(proc);
4797
4798 /* no data added */
4799 if (ptr - buffer == 4 && !thread->looper_need_return)
4800 goto retry;
4801 break;
4802 }
4803
4804 if (end - ptr < sizeof(tr) + 4) {
4805 binder_inner_proc_unlock(proc);
4806 break;
4807 }
4808 w = binder_dequeue_work_head_ilocked(list);
4809 if (binder_worklist_empty_ilocked(&thread->todo))
4810 thread->process_todo = false;
4811
4812 switch (w->type) {
4813 case BINDER_WORK_TRANSACTION: {
4814 binder_inner_proc_unlock(proc);
4815 t = container_of(w, struct binder_transaction, work);
4816 } break;
4817 case BINDER_WORK_RETURN_ERROR: {
4818 struct binder_error *e = container_of(
4819 w, struct binder_error, work);
4820
4821 WARN_ON(e->cmd == BR_OK);
4822 binder_inner_proc_unlock(proc);
4823 if (put_user(e->cmd, (uint32_t __user *)ptr))
4824 return -EFAULT;
4825 cmd = e->cmd;
4826 e->cmd = BR_OK;
4827 ptr += sizeof(uint32_t);
4828
4829 binder_stat_br(proc, thread, cmd);
4830 } break;
4831 case BINDER_WORK_TRANSACTION_COMPLETE:
4832 case BINDER_WORK_TRANSACTION_PENDING:
4833 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4834 if (proc->oneway_spam_detection_enabled &&
4835 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4836 cmd = BR_ONEWAY_SPAM_SUSPECT;
4837 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4838 cmd = BR_TRANSACTION_PENDING_FROZEN;
4839 else
4840 cmd = BR_TRANSACTION_COMPLETE;
4841 binder_inner_proc_unlock(proc);
4842 kfree(w);
4843 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4844 if (put_user(cmd, (uint32_t __user *)ptr))
4845 return -EFAULT;
4846 ptr += sizeof(uint32_t);
4847
4848 binder_stat_br(proc, thread, cmd);
4849 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4850 "%d:%d BR_TRANSACTION_COMPLETE\n",
4851 proc->pid, thread->pid);
4852 } break;
4853 case BINDER_WORK_NODE: {
4854 struct binder_node *node = container_of(w, struct binder_node, work);
4855 int strong, weak;
4856 binder_uintptr_t node_ptr = node->ptr;
4857 binder_uintptr_t node_cookie = node->cookie;
4858 int node_debug_id = node->debug_id;
4859 int has_weak_ref;
4860 int has_strong_ref;
4861 void __user *orig_ptr = ptr;
4862
4863 BUG_ON(proc != node->proc);
4864 strong = node->internal_strong_refs ||
4865 node->local_strong_refs;
4866 weak = !hlist_empty(&node->refs) ||
4867 node->local_weak_refs ||
4868 node->tmp_refs || strong;
4869 has_strong_ref = node->has_strong_ref;
4870 has_weak_ref = node->has_weak_ref;
4871
4872 if (weak && !has_weak_ref) {
4873 node->has_weak_ref = 1;
4874 node->pending_weak_ref = 1;
4875 node->local_weak_refs++;
4876 }
4877 if (strong && !has_strong_ref) {
4878 node->has_strong_ref = 1;
4879 node->pending_strong_ref = 1;
4880 node->local_strong_refs++;
4881 }
4882 if (!strong && has_strong_ref)
4883 node->has_strong_ref = 0;
4884 if (!weak && has_weak_ref)
4885 node->has_weak_ref = 0;
4886 if (!weak && !strong) {
4887 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4888 "%d:%d node %d u%016llx c%016llx deleted\n",
4889 proc->pid, thread->pid,
4890 node_debug_id,
4891 (u64)node_ptr,
4892 (u64)node_cookie);
4893 rb_erase(&node->rb_node, &proc->nodes);
4894 binder_inner_proc_unlock(proc);
4895 binder_node_lock(node);
4896 /*
4897 * Acquire the node lock before freeing the
4898 * node to serialize with other threads that
4899 * may have been holding the node lock while
4900 * decrementing this node (avoids race where
4901 * this thread frees while the other thread
4902 * is unlocking the node after the final
4903 * decrement)
4904 */
4905 binder_node_unlock(node);
4906 binder_free_node(node);
4907 } else
4908 binder_inner_proc_unlock(proc);
4909
4910 if (weak && !has_weak_ref)
4911 ret = binder_put_node_cmd(
4912 proc, thread, &ptr, node_ptr,
4913 node_cookie, node_debug_id,
4914 BR_INCREFS, "BR_INCREFS");
4915 if (!ret && strong && !has_strong_ref)
4916 ret = binder_put_node_cmd(
4917 proc, thread, &ptr, node_ptr,
4918 node_cookie, node_debug_id,
4919 BR_ACQUIRE, "BR_ACQUIRE");
4920 if (!ret && !strong && has_strong_ref)
4921 ret = binder_put_node_cmd(
4922 proc, thread, &ptr, node_ptr,
4923 node_cookie, node_debug_id,
4924 BR_RELEASE, "BR_RELEASE");
4925 if (!ret && !weak && has_weak_ref)
4926 ret = binder_put_node_cmd(
4927 proc, thread, &ptr, node_ptr,
4928 node_cookie, node_debug_id,
4929 BR_DECREFS, "BR_DECREFS");
4930 if (orig_ptr == ptr)
4931 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4932 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4933 proc->pid, thread->pid,
4934 node_debug_id,
4935 (u64)node_ptr,
4936 (u64)node_cookie);
4937 if (ret)
4938 return ret;
4939 } break;
4940 case BINDER_WORK_DEAD_BINDER:
4941 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4942 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4943 struct binder_ref_death *death;
4944 uint32_t cmd;
4945 binder_uintptr_t cookie;
4946
4947 death = container_of(w, struct binder_ref_death, work);
4948 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4949 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4950 else
4951 cmd = BR_DEAD_BINDER;
4952 cookie = death->cookie;
4953
4954 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4955 "%d:%d %s %016llx\n",
4956 proc->pid, thread->pid,
4957 cmd == BR_DEAD_BINDER ?
4958 "BR_DEAD_BINDER" :
4959 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4960 (u64)cookie);
4961 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4962 binder_inner_proc_unlock(proc);
4963 kfree(death);
4964 binder_stats_deleted(BINDER_STAT_DEATH);
4965 } else {
4966 binder_enqueue_work_ilocked(
4967 w, &proc->delivered_death);
4968 binder_inner_proc_unlock(proc);
4969 }
4970 if (put_user(cmd, (uint32_t __user *)ptr))
4971 return -EFAULT;
4972 ptr += sizeof(uint32_t);
4973 if (put_user(cookie,
4974 (binder_uintptr_t __user *)ptr))
4975 return -EFAULT;
4976 ptr += sizeof(binder_uintptr_t);
4977 binder_stat_br(proc, thread, cmd);
4978 if (cmd == BR_DEAD_BINDER)
4979 goto done; /* DEAD_BINDER notifications can cause transactions */
4980 } break;
4981
4982 case BINDER_WORK_FROZEN_BINDER: {
4983 struct binder_ref_freeze *freeze;
4984 struct binder_frozen_state_info info;
4985
4986 memset(&info, 0, sizeof(info));
4987 freeze = container_of(w, struct binder_ref_freeze, work);
4988 info.is_frozen = freeze->is_frozen;
4989 info.cookie = freeze->cookie;
4990 freeze->sent = true;
4991 binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4992 binder_inner_proc_unlock(proc);
4993
4994 if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4995 return -EFAULT;
4996 ptr += sizeof(uint32_t);
4997 if (copy_to_user(ptr, &info, sizeof(info)))
4998 return -EFAULT;
4999 ptr += sizeof(info);
5000 binder_stat_br(proc, thread, BR_FROZEN_BINDER);
5001 goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
5002 } break;
5003
5004 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5005 struct binder_ref_freeze *freeze =
5006 container_of(w, struct binder_ref_freeze, work);
5007 binder_uintptr_t cookie = freeze->cookie;
5008
5009 binder_inner_proc_unlock(proc);
5010 kfree(freeze);
5011 binder_stats_deleted(BINDER_STAT_FREEZE);
5012 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
5013 return -EFAULT;
5014 ptr += sizeof(uint32_t);
5015 if (put_user(cookie, (binder_uintptr_t __user *)ptr))
5016 return -EFAULT;
5017 ptr += sizeof(binder_uintptr_t);
5018 binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
5019 } break;
5020
5021 default:
5022 binder_inner_proc_unlock(proc);
5023 pr_err("%d:%d: bad work type %d\n",
5024 proc->pid, thread->pid, w->type);
5025 break;
5026 }
5027
5028 if (!t)
5029 continue;
5030
5031 BUG_ON(t->buffer == NULL);
5032 if (t->buffer->target_node) {
5033 struct binder_node *target_node = t->buffer->target_node;
5034
5035 trd->target.ptr = target_node->ptr;
5036 trd->cookie = target_node->cookie;
5037 t->saved_priority = task_nice(current);
5038 if (t->priority < target_node->min_priority &&
5039 !(t->flags & TF_ONE_WAY))
5040 binder_set_nice(t->priority);
5041 else if (!(t->flags & TF_ONE_WAY) ||
5042 t->saved_priority > target_node->min_priority)
5043 binder_set_nice(target_node->min_priority);
5044 cmd = BR_TRANSACTION;
5045 } else {
5046 trd->target.ptr = 0;
5047 trd->cookie = 0;
5048 cmd = BR_REPLY;
5049 }
5050 trd->code = t->code;
5051 trd->flags = t->flags;
5052 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
5053
5054 t_from = binder_get_txn_from(t);
5055 if (t_from) {
5056 struct task_struct *sender = t_from->proc->tsk;
5057
5058 trd->sender_pid =
5059 task_tgid_nr_ns(sender,
5060 task_active_pid_ns(current));
5061 } else {
5062 trd->sender_pid = 0;
5063 }
5064
5065 ret = binder_apply_fd_fixups(proc, t);
5066 if (ret) {
5067 struct binder_buffer *buffer = t->buffer;
5068 bool oneway = !!(t->flags & TF_ONE_WAY);
5069 int tid = t->debug_id;
5070
5071 if (t_from)
5072 binder_thread_dec_tmpref(t_from);
5073 buffer->transaction = NULL;
5074 binder_cleanup_transaction(t, "fd fixups failed",
5075 BR_FAILED_REPLY);
5076 binder_free_buf(proc, thread, buffer, true);
5077 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5078 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5079 proc->pid, thread->pid,
5080 oneway ? "async " :
5081 (cmd == BR_REPLY ? "reply " : ""),
5082 tid, BR_FAILED_REPLY, ret, __LINE__);
5083 if (cmd == BR_REPLY) {
5084 cmd = BR_FAILED_REPLY;
5085 if (put_user(cmd, (uint32_t __user *)ptr))
5086 return -EFAULT;
5087 ptr += sizeof(uint32_t);
5088 binder_stat_br(proc, thread, cmd);
5089 break;
5090 }
5091 continue;
5092 }
5093 trd->data_size = t->buffer->data_size;
5094 trd->offsets_size = t->buffer->offsets_size;
5095 trd->data.ptr.buffer = t->buffer->user_data;
5096 trd->data.ptr.offsets = trd->data.ptr.buffer +
5097 ALIGN(t->buffer->data_size,
5098 sizeof(void *));
5099
5100 tr.secctx = t->security_ctx;
5101 if (t->security_ctx) {
5102 cmd = BR_TRANSACTION_SEC_CTX;
5103 trsize = sizeof(tr);
5104 }
5105 if (put_user(cmd, (uint32_t __user *)ptr)) {
5106 if (t_from)
5107 binder_thread_dec_tmpref(t_from);
5108
5109 binder_cleanup_transaction(t, "put_user failed",
5110 BR_FAILED_REPLY);
5111
5112 return -EFAULT;
5113 }
5114 ptr += sizeof(uint32_t);
5115 if (copy_to_user(ptr, &tr, trsize)) {
5116 if (t_from)
5117 binder_thread_dec_tmpref(t_from);
5118
5119 binder_cleanup_transaction(t, "copy_to_user failed",
5120 BR_FAILED_REPLY);
5121
5122 return -EFAULT;
5123 }
5124 ptr += trsize;
5125
5126 trace_binder_transaction_received(t);
5127 binder_stat_br(proc, thread, cmd);
5128 binder_debug(BINDER_DEBUG_TRANSACTION,
5129 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5130 proc->pid, thread->pid,
5131 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5132 (cmd == BR_TRANSACTION_SEC_CTX) ?
5133 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5134 t->debug_id, t_from ? t_from->proc->pid : 0,
5135 t_from ? t_from->pid : 0, cmd,
5136 t->buffer->data_size, t->buffer->offsets_size);
5137
5138 if (t_from)
5139 binder_thread_dec_tmpref(t_from);
5140 t->buffer->allow_user_free = 1;
5141 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5142 binder_inner_proc_lock(thread->proc);
5143 t->to_parent = thread->transaction_stack;
5144 t->to_thread = thread;
5145 thread->transaction_stack = t;
5146 binder_inner_proc_unlock(thread->proc);
5147 } else {
5148 binder_free_transaction(t);
5149 }
5150 break;
5151 }
5152
5153done:
5154
5155 *consumed = ptr - buffer;
5156 binder_inner_proc_lock(proc);
5157 if (proc->requested_threads == 0 &&
5158 list_empty(&thread->proc->waiting_threads) &&
5159 proc->requested_threads_started < proc->max_threads &&
5160 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5161 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5162 /*spawn a new thread if we leave this out */) {
5163 proc->requested_threads++;
5164 binder_inner_proc_unlock(proc);
5165 binder_debug(BINDER_DEBUG_THREADS,
5166 "%d:%d BR_SPAWN_LOOPER\n",
5167 proc->pid, thread->pid);
5168 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5169 return -EFAULT;
5170 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5171 } else
5172 binder_inner_proc_unlock(proc);
5173 return 0;
5174}
5175
5176static void binder_release_work(struct binder_proc *proc,
5177 struct list_head *list)
5178{
5179 struct binder_work *w;
5180 enum binder_work_type wtype;
5181
5182 while (1) {
5183 binder_inner_proc_lock(proc);
5184 w = binder_dequeue_work_head_ilocked(list);
5185 wtype = w ? w->type : 0;
5186 binder_inner_proc_unlock(proc);
5187 if (!w)
5188 return;
5189
5190 switch (wtype) {
5191 case BINDER_WORK_TRANSACTION: {
5192 struct binder_transaction *t;
5193
5194 t = container_of(w, struct binder_transaction, work);
5195
5196 binder_cleanup_transaction(t, "process died.",
5197 BR_DEAD_REPLY);
5198 } break;
5199 case BINDER_WORK_RETURN_ERROR: {
5200 struct binder_error *e = container_of(
5201 w, struct binder_error, work);
5202
5203 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5204 "undelivered TRANSACTION_ERROR: %u\n",
5205 e->cmd);
5206 } break;
5207 case BINDER_WORK_TRANSACTION_PENDING:
5208 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5209 case BINDER_WORK_TRANSACTION_COMPLETE: {
5210 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5211 "undelivered TRANSACTION_COMPLETE\n");
5212 kfree(w);
5213 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5214 } break;
5215 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5216 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5217 struct binder_ref_death *death;
5218
5219 death = container_of(w, struct binder_ref_death, work);
5220 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5221 "undelivered death notification, %016llx\n",
5222 (u64)death->cookie);
5223 kfree(death);
5224 binder_stats_deleted(BINDER_STAT_DEATH);
5225 } break;
5226 case BINDER_WORK_NODE:
5227 break;
5228 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5229 struct binder_ref_freeze *freeze;
5230
5231 freeze = container_of(w, struct binder_ref_freeze, work);
5232 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5233 "undelivered freeze notification, %016llx\n",
5234 (u64)freeze->cookie);
5235 kfree(freeze);
5236 binder_stats_deleted(BINDER_STAT_FREEZE);
5237 } break;
5238 default:
5239 pr_err("unexpected work type, %d, not freed\n",
5240 wtype);
5241 break;
5242 }
5243 }
5244
5245}
5246
5247static struct binder_thread *binder_get_thread_ilocked(
5248 struct binder_proc *proc, struct binder_thread *new_thread)
5249{
5250 struct binder_thread *thread = NULL;
5251 struct rb_node *parent = NULL;
5252 struct rb_node **p = &proc->threads.rb_node;
5253
5254 while (*p) {
5255 parent = *p;
5256 thread = rb_entry(parent, struct binder_thread, rb_node);
5257
5258 if (current->pid < thread->pid)
5259 p = &(*p)->rb_left;
5260 else if (current->pid > thread->pid)
5261 p = &(*p)->rb_right;
5262 else
5263 return thread;
5264 }
5265 if (!new_thread)
5266 return NULL;
5267 thread = new_thread;
5268 binder_stats_created(BINDER_STAT_THREAD);
5269 thread->proc = proc;
5270 thread->pid = current->pid;
5271 atomic_set(&thread->tmp_ref, 0);
5272 init_waitqueue_head(&thread->wait);
5273 INIT_LIST_HEAD(&thread->todo);
5274 rb_link_node(&thread->rb_node, parent, p);
5275 rb_insert_color(&thread->rb_node, &proc->threads);
5276 thread->looper_need_return = true;
5277 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5278 thread->return_error.cmd = BR_OK;
5279 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5280 thread->reply_error.cmd = BR_OK;
5281 thread->ee.command = BR_OK;
5282 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5283 return thread;
5284}
5285
5286static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5287{
5288 struct binder_thread *thread;
5289 struct binder_thread *new_thread;
5290
5291 binder_inner_proc_lock(proc);
5292 thread = binder_get_thread_ilocked(proc, NULL);
5293 binder_inner_proc_unlock(proc);
5294 if (!thread) {
5295 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5296 if (new_thread == NULL)
5297 return NULL;
5298 binder_inner_proc_lock(proc);
5299 thread = binder_get_thread_ilocked(proc, new_thread);
5300 binder_inner_proc_unlock(proc);
5301 if (thread != new_thread)
5302 kfree(new_thread);
5303 }
5304 return thread;
5305}
5306
5307static void binder_free_proc(struct binder_proc *proc)
5308{
5309 struct binder_device *device;
5310
5311 BUG_ON(!list_empty(&proc->todo));
5312 BUG_ON(!list_empty(&proc->delivered_death));
5313 if (proc->outstanding_txns)
5314 pr_warn("%s: Unexpected outstanding_txns %d\n",
5315 __func__, proc->outstanding_txns);
5316 device = container_of(proc->context, struct binder_device, context);
5317 if (refcount_dec_and_test(&device->ref)) {
5318 binder_remove_device(device);
5319 kfree(proc->context->name);
5320 kfree(device);
5321 }
5322 binder_alloc_deferred_release(&proc->alloc);
5323 put_task_struct(proc->tsk);
5324 put_cred(proc->cred);
5325 binder_stats_deleted(BINDER_STAT_PROC);
5326 dbitmap_free(&proc->dmap);
5327 kfree(proc);
5328}
5329
5330static void binder_free_thread(struct binder_thread *thread)
5331{
5332 BUG_ON(!list_empty(&thread->todo));
5333 binder_stats_deleted(BINDER_STAT_THREAD);
5334 binder_proc_dec_tmpref(thread->proc);
5335 kfree(thread);
5336}
5337
5338static int binder_thread_release(struct binder_proc *proc,
5339 struct binder_thread *thread)
5340{
5341 struct binder_transaction *t;
5342 struct binder_transaction *send_reply = NULL;
5343 int active_transactions = 0;
5344 struct binder_transaction *last_t = NULL;
5345
5346 binder_inner_proc_lock(thread->proc);
5347 /*
5348 * take a ref on the proc so it survives
5349 * after we remove this thread from proc->threads.
5350 * The corresponding dec is when we actually
5351 * free the thread in binder_free_thread()
5352 */
5353 proc->tmp_ref++;
5354 /*
5355 * take a ref on this thread to ensure it
5356 * survives while we are releasing it
5357 */
5358 atomic_inc(&thread->tmp_ref);
5359 rb_erase(&thread->rb_node, &proc->threads);
5360 t = thread->transaction_stack;
5361 if (t) {
5362 spin_lock(&t->lock);
5363 if (t->to_thread == thread)
5364 send_reply = t;
5365 } else {
5366 __acquire(&t->lock);
5367 }
5368 thread->is_dead = true;
5369
5370 while (t) {
5371 last_t = t;
5372 active_transactions++;
5373 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5374 "release %d:%d transaction %d %s, still active\n",
5375 proc->pid, thread->pid,
5376 t->debug_id,
5377 (t->to_thread == thread) ? "in" : "out");
5378
5379 if (t->to_thread == thread) {
5380 thread->proc->outstanding_txns--;
5381 t->to_proc = NULL;
5382 t->to_thread = NULL;
5383 if (t->buffer) {
5384 t->buffer->transaction = NULL;
5385 t->buffer = NULL;
5386 }
5387 t = t->to_parent;
5388 } else if (t->from == thread) {
5389 t->from = NULL;
5390 t = t->from_parent;
5391 } else
5392 BUG();
5393 spin_unlock(&last_t->lock);
5394 if (t)
5395 spin_lock(&t->lock);
5396 else
5397 __acquire(&t->lock);
5398 }
5399 /* annotation for sparse, lock not acquired in last iteration above */
5400 __release(&t->lock);
5401
5402 /*
5403 * If this thread used poll, make sure we remove the waitqueue from any
5404 * poll data structures holding it.
5405 */
5406 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5407 wake_up_pollfree(&thread->wait);
5408
5409 binder_inner_proc_unlock(thread->proc);
5410
5411 /*
5412 * This is needed to avoid races between wake_up_pollfree() above and
5413 * someone else removing the last entry from the queue for other reasons
5414 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5415 * descriptor being closed). Such other users hold an RCU read lock, so
5416 * we can be sure they're done after we call synchronize_rcu().
5417 */
5418 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5419 synchronize_rcu();
5420
5421 if (send_reply)
5422 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5423 binder_release_work(proc, &thread->todo);
5424 binder_thread_dec_tmpref(thread);
5425 return active_transactions;
5426}
5427
5428static __poll_t binder_poll(struct file *filp,
5429 struct poll_table_struct *wait)
5430{
5431 struct binder_proc *proc = filp->private_data;
5432 struct binder_thread *thread = NULL;
5433 bool wait_for_proc_work;
5434
5435 thread = binder_get_thread(proc);
5436 if (!thread)
5437 return EPOLLERR;
5438
5439 binder_inner_proc_lock(thread->proc);
5440 thread->looper |= BINDER_LOOPER_STATE_POLL;
5441 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5442
5443 binder_inner_proc_unlock(thread->proc);
5444
5445 poll_wait(filp, &thread->wait, wait);
5446
5447 if (binder_has_work(thread, wait_for_proc_work))
5448 return EPOLLIN;
5449
5450 return 0;
5451}
5452
5453static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5454 struct binder_thread *thread)
5455{
5456 int ret = 0;
5457 struct binder_proc *proc = filp->private_data;
5458 void __user *ubuf = (void __user *)arg;
5459 struct binder_write_read bwr;
5460
5461 if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5462 return -EFAULT;
5463
5464 binder_debug(BINDER_DEBUG_READ_WRITE,
5465 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5466 proc->pid, thread->pid,
5467 (u64)bwr.write_size, (u64)bwr.write_buffer,
5468 (u64)bwr.read_size, (u64)bwr.read_buffer);
5469
5470 if (bwr.write_size > 0) {
5471 ret = binder_thread_write(proc, thread,
5472 bwr.write_buffer,
5473 bwr.write_size,
5474 &bwr.write_consumed);
5475 trace_binder_write_done(ret);
5476 if (ret < 0) {
5477 bwr.read_consumed = 0;
5478 goto out;
5479 }
5480 }
5481 if (bwr.read_size > 0) {
5482 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5483 bwr.read_size,
5484 &bwr.read_consumed,
5485 filp->f_flags & O_NONBLOCK);
5486 trace_binder_read_done(ret);
5487 binder_inner_proc_lock(proc);
5488 if (!binder_worklist_empty_ilocked(&proc->todo))
5489 binder_wakeup_proc_ilocked(proc);
5490 binder_inner_proc_unlock(proc);
5491 if (ret < 0)
5492 goto out;
5493 }
5494 binder_debug(BINDER_DEBUG_READ_WRITE,
5495 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5496 proc->pid, thread->pid,
5497 (u64)bwr.write_consumed, (u64)bwr.write_size,
5498 (u64)bwr.read_consumed, (u64)bwr.read_size);
5499out:
5500 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5501 ret = -EFAULT;
5502 return ret;
5503}
5504
5505static int binder_ioctl_set_ctx_mgr(struct file *filp,
5506 struct flat_binder_object *fbo)
5507{
5508 int ret = 0;
5509 struct binder_proc *proc = filp->private_data;
5510 struct binder_context *context = proc->context;
5511 struct binder_node *new_node;
5512 kuid_t curr_euid = current_euid();
5513
5514 guard(mutex)(&context->context_mgr_node_lock);
5515 if (context->binder_context_mgr_node) {
5516 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5517 return -EBUSY;
5518 }
5519 ret = security_binder_set_context_mgr(proc->cred);
5520 if (ret < 0)
5521 return ret;
5522 if (uid_valid(context->binder_context_mgr_uid)) {
5523 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5524 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5525 from_kuid(&init_user_ns, curr_euid),
5526 from_kuid(&init_user_ns,
5527 context->binder_context_mgr_uid));
5528 return -EPERM;
5529 }
5530 } else {
5531 context->binder_context_mgr_uid = curr_euid;
5532 }
5533 new_node = binder_new_node(proc, fbo);
5534 if (!new_node)
5535 return -ENOMEM;
5536 binder_node_lock(new_node);
5537 new_node->local_weak_refs++;
5538 new_node->local_strong_refs++;
5539 new_node->has_strong_ref = 1;
5540 new_node->has_weak_ref = 1;
5541 context->binder_context_mgr_node = new_node;
5542 binder_node_unlock(new_node);
5543 binder_put_node(new_node);
5544 return ret;
5545}
5546
5547static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5548 struct binder_node_info_for_ref *info)
5549{
5550 struct binder_node *node;
5551 struct binder_context *context = proc->context;
5552 __u32 handle = info->handle;
5553
5554 if (info->strong_count || info->weak_count || info->reserved1 ||
5555 info->reserved2 || info->reserved3) {
5556 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5557 proc->pid);
5558 return -EINVAL;
5559 }
5560
5561 /* This ioctl may only be used by the context manager */
5562 mutex_lock(&context->context_mgr_node_lock);
5563 if (!context->binder_context_mgr_node ||
5564 context->binder_context_mgr_node->proc != proc) {
5565 mutex_unlock(&context->context_mgr_node_lock);
5566 return -EPERM;
5567 }
5568 mutex_unlock(&context->context_mgr_node_lock);
5569
5570 node = binder_get_node_from_ref(proc, handle, true, NULL);
5571 if (!node)
5572 return -EINVAL;
5573
5574 info->strong_count = node->local_strong_refs +
5575 node->internal_strong_refs;
5576 info->weak_count = node->local_weak_refs;
5577
5578 binder_put_node(node);
5579
5580 return 0;
5581}
5582
5583static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5584 struct binder_node_debug_info *info)
5585{
5586 struct rb_node *n;
5587 binder_uintptr_t ptr = info->ptr;
5588
5589 memset(info, 0, sizeof(*info));
5590
5591 binder_inner_proc_lock(proc);
5592 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5593 struct binder_node *node = rb_entry(n, struct binder_node,
5594 rb_node);
5595 if (node->ptr > ptr) {
5596 info->ptr = node->ptr;
5597 info->cookie = node->cookie;
5598 info->has_strong_ref = node->has_strong_ref;
5599 info->has_weak_ref = node->has_weak_ref;
5600 break;
5601 }
5602 }
5603 binder_inner_proc_unlock(proc);
5604
5605 return 0;
5606}
5607
5608static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5609{
5610 struct rb_node *n;
5611 struct binder_thread *thread;
5612
5613 if (proc->outstanding_txns > 0)
5614 return true;
5615
5616 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5617 thread = rb_entry(n, struct binder_thread, rb_node);
5618 if (thread->transaction_stack)
5619 return true;
5620 }
5621 return false;
5622}
5623
5624static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5625{
5626 struct binder_node *prev = NULL;
5627 struct rb_node *n;
5628 struct binder_ref *ref;
5629
5630 binder_inner_proc_lock(proc);
5631 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5632 struct binder_node *node;
5633
5634 node = rb_entry(n, struct binder_node, rb_node);
5635 binder_inc_node_tmpref_ilocked(node);
5636 binder_inner_proc_unlock(proc);
5637 if (prev)
5638 binder_put_node(prev);
5639 binder_node_lock(node);
5640 hlist_for_each_entry(ref, &node->refs, node_entry) {
5641 /*
5642 * Need the node lock to synchronize
5643 * with new notification requests and the
5644 * inner lock to synchronize with queued
5645 * freeze notifications.
5646 */
5647 binder_inner_proc_lock(ref->proc);
5648 if (!ref->freeze) {
5649 binder_inner_proc_unlock(ref->proc);
5650 continue;
5651 }
5652 ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5653 if (list_empty(&ref->freeze->work.entry)) {
5654 ref->freeze->is_frozen = is_frozen;
5655 binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5656 binder_wakeup_proc_ilocked(ref->proc);
5657 } else {
5658 if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5659 ref->freeze->resend = true;
5660 ref->freeze->is_frozen = is_frozen;
5661 }
5662 binder_inner_proc_unlock(ref->proc);
5663 }
5664 prev = node;
5665 binder_node_unlock(node);
5666 binder_inner_proc_lock(proc);
5667 if (proc->is_dead)
5668 break;
5669 }
5670 binder_inner_proc_unlock(proc);
5671 if (prev)
5672 binder_put_node(prev);
5673}
5674
5675static int binder_ioctl_freeze(struct binder_freeze_info *info,
5676 struct binder_proc *target_proc)
5677{
5678 int ret = 0;
5679
5680 if (!info->enable) {
5681 binder_inner_proc_lock(target_proc);
5682 target_proc->sync_recv = false;
5683 target_proc->async_recv = false;
5684 target_proc->is_frozen = false;
5685 binder_inner_proc_unlock(target_proc);
5686 binder_add_freeze_work(target_proc, false);
5687 return 0;
5688 }
5689
5690 /*
5691 * Freezing the target. Prevent new transactions by
5692 * setting frozen state. If timeout specified, wait
5693 * for transactions to drain.
5694 */
5695 binder_inner_proc_lock(target_proc);
5696 target_proc->sync_recv = false;
5697 target_proc->async_recv = false;
5698 target_proc->is_frozen = true;
5699 binder_inner_proc_unlock(target_proc);
5700
5701 if (info->timeout_ms > 0)
5702 ret = wait_event_interruptible_timeout(
5703 target_proc->freeze_wait,
5704 (!target_proc->outstanding_txns),
5705 msecs_to_jiffies(info->timeout_ms));
5706
5707 /* Check pending transactions that wait for reply */
5708 if (ret >= 0) {
5709 binder_inner_proc_lock(target_proc);
5710 if (binder_txns_pending_ilocked(target_proc))
5711 ret = -EAGAIN;
5712 binder_inner_proc_unlock(target_proc);
5713 }
5714
5715 if (ret < 0) {
5716 binder_inner_proc_lock(target_proc);
5717 target_proc->is_frozen = false;
5718 binder_inner_proc_unlock(target_proc);
5719 } else {
5720 binder_add_freeze_work(target_proc, true);
5721 }
5722
5723 return ret;
5724}
5725
5726static int binder_ioctl_get_freezer_info(
5727 struct binder_frozen_status_info *info)
5728{
5729 struct binder_proc *target_proc;
5730 bool found = false;
5731 __u32 txns_pending;
5732
5733 info->sync_recv = 0;
5734 info->async_recv = 0;
5735
5736 mutex_lock(&binder_procs_lock);
5737 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5738 if (target_proc->pid == info->pid) {
5739 found = true;
5740 binder_inner_proc_lock(target_proc);
5741 txns_pending = binder_txns_pending_ilocked(target_proc);
5742 info->sync_recv |= target_proc->sync_recv |
5743 (txns_pending << 1);
5744 info->async_recv |= target_proc->async_recv;
5745 binder_inner_proc_unlock(target_proc);
5746 }
5747 }
5748 mutex_unlock(&binder_procs_lock);
5749
5750 if (!found)
5751 return -EINVAL;
5752
5753 return 0;
5754}
5755
5756static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5757 void __user *ubuf)
5758{
5759 struct binder_extended_error ee;
5760
5761 binder_inner_proc_lock(thread->proc);
5762 ee = thread->ee;
5763 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5764 binder_inner_proc_unlock(thread->proc);
5765
5766 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5767 return -EFAULT;
5768
5769 return 0;
5770}
5771
5772static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5773{
5774 int ret;
5775 struct binder_proc *proc = filp->private_data;
5776 struct binder_thread *thread;
5777 void __user *ubuf = (void __user *)arg;
5778
5779 trace_binder_ioctl(cmd, arg);
5780
5781 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5782 if (ret)
5783 goto err_unlocked;
5784
5785 thread = binder_get_thread(proc);
5786 if (thread == NULL) {
5787 ret = -ENOMEM;
5788 goto err;
5789 }
5790
5791 switch (cmd) {
5792 case BINDER_WRITE_READ:
5793 ret = binder_ioctl_write_read(filp, arg, thread);
5794 if (ret)
5795 goto err;
5796 break;
5797 case BINDER_SET_MAX_THREADS: {
5798 u32 max_threads;
5799
5800 if (copy_from_user(&max_threads, ubuf,
5801 sizeof(max_threads))) {
5802 ret = -EINVAL;
5803 goto err;
5804 }
5805 binder_inner_proc_lock(proc);
5806 proc->max_threads = max_threads;
5807 binder_inner_proc_unlock(proc);
5808 break;
5809 }
5810 case BINDER_SET_CONTEXT_MGR_EXT: {
5811 struct flat_binder_object fbo;
5812
5813 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5814 ret = -EINVAL;
5815 goto err;
5816 }
5817 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5818 if (ret)
5819 goto err;
5820 break;
5821 }
5822 case BINDER_SET_CONTEXT_MGR:
5823 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5824 if (ret)
5825 goto err;
5826 break;
5827 case BINDER_THREAD_EXIT:
5828 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5829 proc->pid, thread->pid);
5830 binder_thread_release(proc, thread);
5831 thread = NULL;
5832 break;
5833 case BINDER_VERSION: {
5834 struct binder_version __user *ver = ubuf;
5835
5836 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5837 &ver->protocol_version)) {
5838 ret = -EINVAL;
5839 goto err;
5840 }
5841 break;
5842 }
5843 case BINDER_GET_NODE_INFO_FOR_REF: {
5844 struct binder_node_info_for_ref info;
5845
5846 if (copy_from_user(&info, ubuf, sizeof(info))) {
5847 ret = -EFAULT;
5848 goto err;
5849 }
5850
5851 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5852 if (ret < 0)
5853 goto err;
5854
5855 if (copy_to_user(ubuf, &info, sizeof(info))) {
5856 ret = -EFAULT;
5857 goto err;
5858 }
5859
5860 break;
5861 }
5862 case BINDER_GET_NODE_DEBUG_INFO: {
5863 struct binder_node_debug_info info;
5864
5865 if (copy_from_user(&info, ubuf, sizeof(info))) {
5866 ret = -EFAULT;
5867 goto err;
5868 }
5869
5870 ret = binder_ioctl_get_node_debug_info(proc, &info);
5871 if (ret < 0)
5872 goto err;
5873
5874 if (copy_to_user(ubuf, &info, sizeof(info))) {
5875 ret = -EFAULT;
5876 goto err;
5877 }
5878 break;
5879 }
5880 case BINDER_FREEZE: {
5881 struct binder_freeze_info info;
5882 struct binder_proc **target_procs = NULL, *target_proc;
5883 int target_procs_count = 0, i = 0;
5884
5885 ret = 0;
5886
5887 if (copy_from_user(&info, ubuf, sizeof(info))) {
5888 ret = -EFAULT;
5889 goto err;
5890 }
5891
5892 mutex_lock(&binder_procs_lock);
5893 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5894 if (target_proc->pid == info.pid)
5895 target_procs_count++;
5896 }
5897
5898 if (target_procs_count == 0) {
5899 mutex_unlock(&binder_procs_lock);
5900 ret = -EINVAL;
5901 goto err;
5902 }
5903
5904 target_procs = kcalloc(target_procs_count,
5905 sizeof(struct binder_proc *),
5906 GFP_KERNEL);
5907
5908 if (!target_procs) {
5909 mutex_unlock(&binder_procs_lock);
5910 ret = -ENOMEM;
5911 goto err;
5912 }
5913
5914 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5915 if (target_proc->pid != info.pid)
5916 continue;
5917
5918 binder_inner_proc_lock(target_proc);
5919 target_proc->tmp_ref++;
5920 binder_inner_proc_unlock(target_proc);
5921
5922 target_procs[i++] = target_proc;
5923 }
5924 mutex_unlock(&binder_procs_lock);
5925
5926 for (i = 0; i < target_procs_count; i++) {
5927 if (ret >= 0)
5928 ret = binder_ioctl_freeze(&info,
5929 target_procs[i]);
5930
5931 binder_proc_dec_tmpref(target_procs[i]);
5932 }
5933
5934 kfree(target_procs);
5935
5936 if (ret < 0)
5937 goto err;
5938 break;
5939 }
5940 case BINDER_GET_FROZEN_INFO: {
5941 struct binder_frozen_status_info info;
5942
5943 if (copy_from_user(&info, ubuf, sizeof(info))) {
5944 ret = -EFAULT;
5945 goto err;
5946 }
5947
5948 ret = binder_ioctl_get_freezer_info(&info);
5949 if (ret < 0)
5950 goto err;
5951
5952 if (copy_to_user(ubuf, &info, sizeof(info))) {
5953 ret = -EFAULT;
5954 goto err;
5955 }
5956 break;
5957 }
5958 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5959 uint32_t enable;
5960
5961 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5962 ret = -EFAULT;
5963 goto err;
5964 }
5965 binder_inner_proc_lock(proc);
5966 proc->oneway_spam_detection_enabled = (bool)enable;
5967 binder_inner_proc_unlock(proc);
5968 break;
5969 }
5970 case BINDER_GET_EXTENDED_ERROR:
5971 ret = binder_ioctl_get_extended_error(thread, ubuf);
5972 if (ret < 0)
5973 goto err;
5974 break;
5975 default:
5976 ret = -EINVAL;
5977 goto err;
5978 }
5979 ret = 0;
5980err:
5981 if (thread)
5982 thread->looper_need_return = false;
5983 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5984 if (ret && ret != -EINTR)
5985 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5986err_unlocked:
5987 trace_binder_ioctl_done(ret);
5988 return ret;
5989}
5990
5991static void binder_vma_open(struct vm_area_struct *vma)
5992{
5993 struct binder_proc *proc = vma->vm_private_data;
5994
5995 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5996 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5997 proc->pid, vma->vm_start, vma->vm_end,
5998 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5999 (unsigned long)pgprot_val(vma->vm_page_prot));
6000}
6001
6002static void binder_vma_close(struct vm_area_struct *vma)
6003{
6004 struct binder_proc *proc = vma->vm_private_data;
6005
6006 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6007 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
6008 proc->pid, vma->vm_start, vma->vm_end,
6009 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6010 (unsigned long)pgprot_val(vma->vm_page_prot));
6011 binder_alloc_vma_close(&proc->alloc);
6012}
6013
6014VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
6015{
6016 return VM_FAULT_SIGBUS;
6017}
6018EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
6019
6020static const struct vm_operations_struct binder_vm_ops = {
6021 .open = binder_vma_open,
6022 .close = binder_vma_close,
6023 .fault = binder_vm_fault,
6024};
6025
6026static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
6027{
6028 struct binder_proc *proc = filp->private_data;
6029
6030 if (proc->tsk != current->group_leader)
6031 return -EINVAL;
6032
6033 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6034 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
6035 __func__, proc->pid, vma->vm_start, vma->vm_end,
6036 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6037 (unsigned long)pgprot_val(vma->vm_page_prot));
6038
6039 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
6040 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
6041 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
6042 return -EPERM;
6043 }
6044 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
6045
6046 vma->vm_ops = &binder_vm_ops;
6047 vma->vm_private_data = proc;
6048
6049 return binder_alloc_mmap_handler(&proc->alloc, vma);
6050}
6051
6052static int binder_open(struct inode *nodp, struct file *filp)
6053{
6054 struct binder_proc *proc, *itr;
6055 struct binder_device *binder_dev;
6056 struct binderfs_info *info;
6057 struct dentry *binder_binderfs_dir_entry_proc = NULL;
6058 bool existing_pid = false;
6059
6060 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6061 current->group_leader->pid, current->pid);
6062
6063 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6064 if (proc == NULL)
6065 return -ENOMEM;
6066
6067 dbitmap_init(&proc->dmap);
6068 spin_lock_init(&proc->inner_lock);
6069 spin_lock_init(&proc->outer_lock);
6070 get_task_struct(current->group_leader);
6071 proc->tsk = current->group_leader;
6072 proc->cred = get_cred(filp->f_cred);
6073 INIT_LIST_HEAD(&proc->todo);
6074 init_waitqueue_head(&proc->freeze_wait);
6075 proc->default_priority = task_nice(current);
6076 /* binderfs stashes devices in i_private */
6077 if (is_binderfs_device(nodp)) {
6078 binder_dev = nodp->i_private;
6079 info = nodp->i_sb->s_fs_info;
6080 binder_binderfs_dir_entry_proc = info->proc_log_dir;
6081 } else {
6082 binder_dev = container_of(filp->private_data,
6083 struct binder_device, miscdev);
6084 }
6085 refcount_inc(&binder_dev->ref);
6086 proc->context = &binder_dev->context;
6087 binder_alloc_init(&proc->alloc);
6088
6089 binder_stats_created(BINDER_STAT_PROC);
6090 proc->pid = current->group_leader->pid;
6091 INIT_LIST_HEAD(&proc->delivered_death);
6092 INIT_LIST_HEAD(&proc->delivered_freeze);
6093 INIT_LIST_HEAD(&proc->waiting_threads);
6094 filp->private_data = proc;
6095
6096 mutex_lock(&binder_procs_lock);
6097 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6098 if (itr->pid == proc->pid) {
6099 existing_pid = true;
6100 break;
6101 }
6102 }
6103 hlist_add_head(&proc->proc_node, &binder_procs);
6104 mutex_unlock(&binder_procs_lock);
6105
6106 if (binder_debugfs_dir_entry_proc && !existing_pid) {
6107 char strbuf[11];
6108
6109 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6110 /*
6111 * proc debug entries are shared between contexts.
6112 * Only create for the first PID to avoid debugfs log spamming
6113 * The printing code will anyway print all contexts for a given
6114 * PID so this is not a problem.
6115 */
6116 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6117 binder_debugfs_dir_entry_proc,
6118 (void *)(unsigned long)proc->pid,
6119 &proc_fops);
6120 }
6121
6122 if (binder_binderfs_dir_entry_proc && !existing_pid) {
6123 char strbuf[11];
6124 struct dentry *binderfs_entry;
6125
6126 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6127 /*
6128 * Similar to debugfs, the process specific log file is shared
6129 * between contexts. Only create for the first PID.
6130 * This is ok since same as debugfs, the log file will contain
6131 * information on all contexts of a given PID.
6132 */
6133 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6134 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6135 if (!IS_ERR(binderfs_entry)) {
6136 proc->binderfs_entry = binderfs_entry;
6137 } else {
6138 int error;
6139
6140 error = PTR_ERR(binderfs_entry);
6141 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6142 strbuf, error);
6143 }
6144 }
6145
6146 return 0;
6147}
6148
6149static int binder_flush(struct file *filp, fl_owner_t id)
6150{
6151 struct binder_proc *proc = filp->private_data;
6152
6153 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6154
6155 return 0;
6156}
6157
6158static void binder_deferred_flush(struct binder_proc *proc)
6159{
6160 struct rb_node *n;
6161 int wake_count = 0;
6162
6163 binder_inner_proc_lock(proc);
6164 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6165 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6166
6167 thread->looper_need_return = true;
6168 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6169 wake_up_interruptible(&thread->wait);
6170 wake_count++;
6171 }
6172 }
6173 binder_inner_proc_unlock(proc);
6174
6175 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6176 "binder_flush: %d woke %d threads\n", proc->pid,
6177 wake_count);
6178}
6179
6180static int binder_release(struct inode *nodp, struct file *filp)
6181{
6182 struct binder_proc *proc = filp->private_data;
6183
6184 debugfs_remove(proc->debugfs_entry);
6185
6186 if (proc->binderfs_entry) {
6187 simple_recursive_removal(proc->binderfs_entry, NULL);
6188 proc->binderfs_entry = NULL;
6189 }
6190
6191 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6192
6193 return 0;
6194}
6195
6196static int binder_node_release(struct binder_node *node, int refs)
6197{
6198 struct binder_ref *ref;
6199 int death = 0;
6200 struct binder_proc *proc = node->proc;
6201
6202 binder_release_work(proc, &node->async_todo);
6203
6204 binder_node_lock(node);
6205 binder_inner_proc_lock(proc);
6206 binder_dequeue_work_ilocked(&node->work);
6207 /*
6208 * The caller must have taken a temporary ref on the node,
6209 */
6210 BUG_ON(!node->tmp_refs);
6211 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6212 binder_inner_proc_unlock(proc);
6213 binder_node_unlock(node);
6214 binder_free_node(node);
6215
6216 return refs;
6217 }
6218
6219 node->proc = NULL;
6220 node->local_strong_refs = 0;
6221 node->local_weak_refs = 0;
6222 binder_inner_proc_unlock(proc);
6223
6224 spin_lock(&binder_dead_nodes_lock);
6225 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6226 spin_unlock(&binder_dead_nodes_lock);
6227
6228 hlist_for_each_entry(ref, &node->refs, node_entry) {
6229 refs++;
6230 /*
6231 * Need the node lock to synchronize
6232 * with new notification requests and the
6233 * inner lock to synchronize with queued
6234 * death notifications.
6235 */
6236 binder_inner_proc_lock(ref->proc);
6237 if (!ref->death) {
6238 binder_inner_proc_unlock(ref->proc);
6239 continue;
6240 }
6241
6242 death++;
6243
6244 BUG_ON(!list_empty(&ref->death->work.entry));
6245 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6246 binder_enqueue_work_ilocked(&ref->death->work,
6247 &ref->proc->todo);
6248 binder_wakeup_proc_ilocked(ref->proc);
6249 binder_inner_proc_unlock(ref->proc);
6250 }
6251
6252 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6253 "node %d now dead, refs %d, death %d\n",
6254 node->debug_id, refs, death);
6255 binder_node_unlock(node);
6256 binder_put_node(node);
6257
6258 return refs;
6259}
6260
6261static void binder_deferred_release(struct binder_proc *proc)
6262{
6263 struct binder_context *context = proc->context;
6264 struct rb_node *n;
6265 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6266
6267 mutex_lock(&binder_procs_lock);
6268 hlist_del(&proc->proc_node);
6269 mutex_unlock(&binder_procs_lock);
6270
6271 mutex_lock(&context->context_mgr_node_lock);
6272 if (context->binder_context_mgr_node &&
6273 context->binder_context_mgr_node->proc == proc) {
6274 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6275 "%s: %d context_mgr_node gone\n",
6276 __func__, proc->pid);
6277 context->binder_context_mgr_node = NULL;
6278 }
6279 mutex_unlock(&context->context_mgr_node_lock);
6280 binder_inner_proc_lock(proc);
6281 /*
6282 * Make sure proc stays alive after we
6283 * remove all the threads
6284 */
6285 proc->tmp_ref++;
6286
6287 proc->is_dead = true;
6288 proc->is_frozen = false;
6289 proc->sync_recv = false;
6290 proc->async_recv = false;
6291 threads = 0;
6292 active_transactions = 0;
6293 while ((n = rb_first(&proc->threads))) {
6294 struct binder_thread *thread;
6295
6296 thread = rb_entry(n, struct binder_thread, rb_node);
6297 binder_inner_proc_unlock(proc);
6298 threads++;
6299 active_transactions += binder_thread_release(proc, thread);
6300 binder_inner_proc_lock(proc);
6301 }
6302
6303 nodes = 0;
6304 incoming_refs = 0;
6305 while ((n = rb_first(&proc->nodes))) {
6306 struct binder_node *node;
6307
6308 node = rb_entry(n, struct binder_node, rb_node);
6309 nodes++;
6310 /*
6311 * take a temporary ref on the node before
6312 * calling binder_node_release() which will either
6313 * kfree() the node or call binder_put_node()
6314 */
6315 binder_inc_node_tmpref_ilocked(node);
6316 rb_erase(&node->rb_node, &proc->nodes);
6317 binder_inner_proc_unlock(proc);
6318 incoming_refs = binder_node_release(node, incoming_refs);
6319 binder_inner_proc_lock(proc);
6320 }
6321 binder_inner_proc_unlock(proc);
6322
6323 outgoing_refs = 0;
6324 binder_proc_lock(proc);
6325 while ((n = rb_first(&proc->refs_by_desc))) {
6326 struct binder_ref *ref;
6327
6328 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6329 outgoing_refs++;
6330 binder_cleanup_ref_olocked(ref);
6331 binder_proc_unlock(proc);
6332 binder_free_ref(ref);
6333 binder_proc_lock(proc);
6334 }
6335 binder_proc_unlock(proc);
6336
6337 binder_release_work(proc, &proc->todo);
6338 binder_release_work(proc, &proc->delivered_death);
6339 binder_release_work(proc, &proc->delivered_freeze);
6340
6341 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6342 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6343 __func__, proc->pid, threads, nodes, incoming_refs,
6344 outgoing_refs, active_transactions);
6345
6346 binder_proc_dec_tmpref(proc);
6347}
6348
6349static void binder_deferred_func(struct work_struct *work)
6350{
6351 struct binder_proc *proc;
6352
6353 int defer;
6354
6355 do {
6356 mutex_lock(&binder_deferred_lock);
6357 if (!hlist_empty(&binder_deferred_list)) {
6358 proc = hlist_entry(binder_deferred_list.first,
6359 struct binder_proc, deferred_work_node);
6360 hlist_del_init(&proc->deferred_work_node);
6361 defer = proc->deferred_work;
6362 proc->deferred_work = 0;
6363 } else {
6364 proc = NULL;
6365 defer = 0;
6366 }
6367 mutex_unlock(&binder_deferred_lock);
6368
6369 if (defer & BINDER_DEFERRED_FLUSH)
6370 binder_deferred_flush(proc);
6371
6372 if (defer & BINDER_DEFERRED_RELEASE)
6373 binder_deferred_release(proc); /* frees proc */
6374 } while (proc);
6375}
6376static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6377
6378static void
6379binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6380{
6381 guard(mutex)(&binder_deferred_lock);
6382 proc->deferred_work |= defer;
6383 if (hlist_unhashed(&proc->deferred_work_node)) {
6384 hlist_add_head(&proc->deferred_work_node,
6385 &binder_deferred_list);
6386 schedule_work(&binder_deferred_work);
6387 }
6388}
6389
6390static void print_binder_transaction_ilocked(struct seq_file *m,
6391 struct binder_proc *proc,
6392 const char *prefix,
6393 struct binder_transaction *t)
6394{
6395 struct binder_proc *to_proc;
6396 struct binder_buffer *buffer = t->buffer;
6397 ktime_t current_time = ktime_get();
6398
6399 spin_lock(&t->lock);
6400 to_proc = t->to_proc;
6401 seq_printf(m,
6402 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
6403 prefix, t->debug_id, t,
6404 t->from_pid,
6405 t->from_tid,
6406 to_proc ? to_proc->pid : 0,
6407 t->to_thread ? t->to_thread->pid : 0,
6408 t->code, t->flags, t->priority, t->is_async, t->is_reply,
6409 ktime_ms_delta(current_time, t->start_time));
6410 spin_unlock(&t->lock);
6411
6412 if (proc != to_proc) {
6413 /*
6414 * Can only safely deref buffer if we are holding the
6415 * correct proc inner lock for this node
6416 */
6417 seq_puts(m, "\n");
6418 return;
6419 }
6420
6421 if (buffer == NULL) {
6422 seq_puts(m, " buffer free\n");
6423 return;
6424 }
6425 if (buffer->target_node)
6426 seq_printf(m, " node %d", buffer->target_node->debug_id);
6427 seq_printf(m, " size %zd:%zd offset %lx\n",
6428 buffer->data_size, buffer->offsets_size,
6429 buffer->user_data - proc->alloc.vm_start);
6430}
6431
6432static void print_binder_work_ilocked(struct seq_file *m,
6433 struct binder_proc *proc,
6434 const char *prefix,
6435 const char *transaction_prefix,
6436 struct binder_work *w, bool hash_ptrs)
6437{
6438 struct binder_node *node;
6439 struct binder_transaction *t;
6440
6441 switch (w->type) {
6442 case BINDER_WORK_TRANSACTION:
6443 t = container_of(w, struct binder_transaction, work);
6444 print_binder_transaction_ilocked(
6445 m, proc, transaction_prefix, t);
6446 break;
6447 case BINDER_WORK_RETURN_ERROR: {
6448 struct binder_error *e = container_of(
6449 w, struct binder_error, work);
6450
6451 seq_printf(m, "%stransaction error: %u\n",
6452 prefix, e->cmd);
6453 } break;
6454 case BINDER_WORK_TRANSACTION_COMPLETE:
6455 seq_printf(m, "%stransaction complete\n", prefix);
6456 break;
6457 case BINDER_WORK_NODE:
6458 node = container_of(w, struct binder_node, work);
6459 if (hash_ptrs)
6460 seq_printf(m, "%snode work %d: u%p c%p\n",
6461 prefix, node->debug_id,
6462 (void *)(long)node->ptr,
6463 (void *)(long)node->cookie);
6464 else
6465 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6466 prefix, node->debug_id,
6467 (u64)node->ptr, (u64)node->cookie);
6468 break;
6469 case BINDER_WORK_DEAD_BINDER:
6470 seq_printf(m, "%shas dead binder\n", prefix);
6471 break;
6472 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6473 seq_printf(m, "%shas cleared dead binder\n", prefix);
6474 break;
6475 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6476 seq_printf(m, "%shas cleared death notification\n", prefix);
6477 break;
6478 case BINDER_WORK_FROZEN_BINDER:
6479 seq_printf(m, "%shas frozen binder\n", prefix);
6480 break;
6481 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6482 seq_printf(m, "%shas cleared freeze notification\n", prefix);
6483 break;
6484 default:
6485 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6486 break;
6487 }
6488}
6489
6490static void print_binder_thread_ilocked(struct seq_file *m,
6491 struct binder_thread *thread,
6492 bool print_always, bool hash_ptrs)
6493{
6494 struct binder_transaction *t;
6495 struct binder_work *w;
6496 size_t start_pos = m->count;
6497 size_t header_pos;
6498
6499 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6500 thread->pid, thread->looper,
6501 thread->looper_need_return,
6502 atomic_read(&thread->tmp_ref));
6503 header_pos = m->count;
6504 t = thread->transaction_stack;
6505 while (t) {
6506 if (t->from == thread) {
6507 print_binder_transaction_ilocked(m, thread->proc,
6508 " outgoing transaction", t);
6509 t = t->from_parent;
6510 } else if (t->to_thread == thread) {
6511 print_binder_transaction_ilocked(m, thread->proc,
6512 " incoming transaction", t);
6513 t = t->to_parent;
6514 } else {
6515 print_binder_transaction_ilocked(m, thread->proc,
6516 " bad transaction", t);
6517 t = NULL;
6518 }
6519 }
6520 list_for_each_entry(w, &thread->todo, entry) {
6521 print_binder_work_ilocked(m, thread->proc, " ",
6522 " pending transaction",
6523 w, hash_ptrs);
6524 }
6525 if (!print_always && m->count == header_pos)
6526 m->count = start_pos;
6527}
6528
6529static void print_binder_node_nilocked(struct seq_file *m,
6530 struct binder_node *node,
6531 bool hash_ptrs)
6532{
6533 struct binder_ref *ref;
6534 struct binder_work *w;
6535 int count;
6536
6537 count = hlist_count_nodes(&node->refs);
6538
6539 if (hash_ptrs)
6540 seq_printf(m, " node %d: u%p c%p", node->debug_id,
6541 (void *)(long)node->ptr, (void *)(long)node->cookie);
6542 else
6543 seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
6544 (u64)node->ptr, (u64)node->cookie);
6545 seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6546 node->has_strong_ref, node->has_weak_ref,
6547 node->local_strong_refs, node->local_weak_refs,
6548 node->internal_strong_refs, count, node->tmp_refs);
6549 if (count) {
6550 seq_puts(m, " proc");
6551 hlist_for_each_entry(ref, &node->refs, node_entry)
6552 seq_printf(m, " %d", ref->proc->pid);
6553 }
6554 seq_puts(m, "\n");
6555 if (node->proc) {
6556 list_for_each_entry(w, &node->async_todo, entry)
6557 print_binder_work_ilocked(m, node->proc, " ",
6558 " pending async transaction",
6559 w, hash_ptrs);
6560 }
6561}
6562
6563static void print_binder_ref_olocked(struct seq_file *m,
6564 struct binder_ref *ref)
6565{
6566 binder_node_lock(ref->node);
6567 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6568 ref->data.debug_id, ref->data.desc,
6569 ref->node->proc ? "" : "dead ",
6570 ref->node->debug_id, ref->data.strong,
6571 ref->data.weak, ref->death);
6572 binder_node_unlock(ref->node);
6573}
6574
6575/**
6576 * print_next_binder_node_ilocked() - Print binder_node from a locked list
6577 * @m: struct seq_file for output via seq_printf()
6578 * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
6579 * @node: struct binder_node to print fields of
6580 * @prev_node: struct binder_node we hold a temporary reference to (if any)
6581 * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
6582 *
6583 * Helper function to handle synchronization around printing a struct
6584 * binder_node while iterating through @proc->nodes or the dead nodes list.
6585 * Caller must hold either @proc->inner_lock (for live nodes) or
6586 * binder_dead_nodes_lock. This lock will be released during the body of this
6587 * function, but it will be reacquired before returning to the caller.
6588 *
6589 * Return: pointer to the struct binder_node we hold a tmpref on
6590 */
6591static struct binder_node *
6592print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6593 struct binder_node *node,
6594 struct binder_node *prev_node, bool hash_ptrs)
6595{
6596 /*
6597 * Take a temporary reference on the node so that isn't freed while
6598 * we print it.
6599 */
6600 binder_inc_node_tmpref_ilocked(node);
6601 /*
6602 * Live nodes need to drop the inner proc lock and dead nodes need to
6603 * drop the binder_dead_nodes_lock before trying to take the node lock.
6604 */
6605 if (proc)
6606 binder_inner_proc_unlock(proc);
6607 else
6608 spin_unlock(&binder_dead_nodes_lock);
6609 if (prev_node)
6610 binder_put_node(prev_node);
6611 binder_node_inner_lock(node);
6612 print_binder_node_nilocked(m, node, hash_ptrs);
6613 binder_node_inner_unlock(node);
6614 if (proc)
6615 binder_inner_proc_lock(proc);
6616 else
6617 spin_lock(&binder_dead_nodes_lock);
6618 return node;
6619}
6620
6621static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6622 bool print_all, bool hash_ptrs)
6623{
6624 struct binder_work *w;
6625 struct rb_node *n;
6626 size_t start_pos = m->count;
6627 size_t header_pos;
6628 struct binder_node *last_node = NULL;
6629
6630 seq_printf(m, "proc %d\n", proc->pid);
6631 seq_printf(m, "context %s\n", proc->context->name);
6632 header_pos = m->count;
6633
6634 binder_inner_proc_lock(proc);
6635 for (n = rb_first(&proc->threads); n; n = rb_next(n))
6636 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6637 rb_node), print_all, hash_ptrs);
6638
6639 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6640 struct binder_node *node = rb_entry(n, struct binder_node,
6641 rb_node);
6642 if (!print_all && !node->has_async_transaction)
6643 continue;
6644
6645 last_node = print_next_binder_node_ilocked(m, proc, node,
6646 last_node,
6647 hash_ptrs);
6648 }
6649 binder_inner_proc_unlock(proc);
6650 if (last_node)
6651 binder_put_node(last_node);
6652
6653 if (print_all) {
6654 binder_proc_lock(proc);
6655 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6656 print_binder_ref_olocked(m, rb_entry(n,
6657 struct binder_ref,
6658 rb_node_desc));
6659 binder_proc_unlock(proc);
6660 }
6661 binder_alloc_print_allocated(m, &proc->alloc);
6662 binder_inner_proc_lock(proc);
6663 list_for_each_entry(w, &proc->todo, entry)
6664 print_binder_work_ilocked(m, proc, " ",
6665 " pending transaction", w,
6666 hash_ptrs);
6667 list_for_each_entry(w, &proc->delivered_death, entry) {
6668 seq_puts(m, " has delivered dead binder\n");
6669 break;
6670 }
6671 list_for_each_entry(w, &proc->delivered_freeze, entry) {
6672 seq_puts(m, " has delivered freeze binder\n");
6673 break;
6674 }
6675 binder_inner_proc_unlock(proc);
6676 if (!print_all && m->count == header_pos)
6677 m->count = start_pos;
6678}
6679
6680static const char * const binder_return_strings[] = {
6681 "BR_ERROR",
6682 "BR_OK",
6683 "BR_TRANSACTION",
6684 "BR_REPLY",
6685 "BR_ACQUIRE_RESULT",
6686 "BR_DEAD_REPLY",
6687 "BR_TRANSACTION_COMPLETE",
6688 "BR_INCREFS",
6689 "BR_ACQUIRE",
6690 "BR_RELEASE",
6691 "BR_DECREFS",
6692 "BR_ATTEMPT_ACQUIRE",
6693 "BR_NOOP",
6694 "BR_SPAWN_LOOPER",
6695 "BR_FINISHED",
6696 "BR_DEAD_BINDER",
6697 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6698 "BR_FAILED_REPLY",
6699 "BR_FROZEN_REPLY",
6700 "BR_ONEWAY_SPAM_SUSPECT",
6701 "BR_TRANSACTION_PENDING_FROZEN",
6702 "BR_FROZEN_BINDER",
6703 "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6704};
6705
6706static const char * const binder_command_strings[] = {
6707 "BC_TRANSACTION",
6708 "BC_REPLY",
6709 "BC_ACQUIRE_RESULT",
6710 "BC_FREE_BUFFER",
6711 "BC_INCREFS",
6712 "BC_ACQUIRE",
6713 "BC_RELEASE",
6714 "BC_DECREFS",
6715 "BC_INCREFS_DONE",
6716 "BC_ACQUIRE_DONE",
6717 "BC_ATTEMPT_ACQUIRE",
6718 "BC_REGISTER_LOOPER",
6719 "BC_ENTER_LOOPER",
6720 "BC_EXIT_LOOPER",
6721 "BC_REQUEST_DEATH_NOTIFICATION",
6722 "BC_CLEAR_DEATH_NOTIFICATION",
6723 "BC_DEAD_BINDER_DONE",
6724 "BC_TRANSACTION_SG",
6725 "BC_REPLY_SG",
6726 "BC_REQUEST_FREEZE_NOTIFICATION",
6727 "BC_CLEAR_FREEZE_NOTIFICATION",
6728 "BC_FREEZE_NOTIFICATION_DONE",
6729};
6730
6731static const char * const binder_objstat_strings[] = {
6732 "proc",
6733 "thread",
6734 "node",
6735 "ref",
6736 "death",
6737 "transaction",
6738 "transaction_complete",
6739 "freeze",
6740};
6741
6742static void print_binder_stats(struct seq_file *m, const char *prefix,
6743 struct binder_stats *stats)
6744{
6745 int i;
6746
6747 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6748 ARRAY_SIZE(binder_command_strings));
6749 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6750 int temp = atomic_read(&stats->bc[i]);
6751
6752 if (temp)
6753 seq_printf(m, "%s%s: %d\n", prefix,
6754 binder_command_strings[i], temp);
6755 }
6756
6757 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6758 ARRAY_SIZE(binder_return_strings));
6759 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6760 int temp = atomic_read(&stats->br[i]);
6761
6762 if (temp)
6763 seq_printf(m, "%s%s: %d\n", prefix,
6764 binder_return_strings[i], temp);
6765 }
6766
6767 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6768 ARRAY_SIZE(binder_objstat_strings));
6769 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6770 ARRAY_SIZE(stats->obj_deleted));
6771 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6772 int created = atomic_read(&stats->obj_created[i]);
6773 int deleted = atomic_read(&stats->obj_deleted[i]);
6774
6775 if (created || deleted)
6776 seq_printf(m, "%s%s: active %d total %d\n",
6777 prefix,
6778 binder_objstat_strings[i],
6779 created - deleted,
6780 created);
6781 }
6782}
6783
6784static void print_binder_proc_stats(struct seq_file *m,
6785 struct binder_proc *proc)
6786{
6787 struct binder_work *w;
6788 struct binder_thread *thread;
6789 struct rb_node *n;
6790 int count, strong, weak, ready_threads;
6791 size_t free_async_space =
6792 binder_alloc_get_free_async_space(&proc->alloc);
6793
6794 seq_printf(m, "proc %d\n", proc->pid);
6795 seq_printf(m, "context %s\n", proc->context->name);
6796 count = 0;
6797 ready_threads = 0;
6798 binder_inner_proc_lock(proc);
6799 for (n = rb_first(&proc->threads); n; n = rb_next(n))
6800 count++;
6801
6802 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6803 ready_threads++;
6804
6805 seq_printf(m, " threads: %d\n", count);
6806 seq_printf(m, " requested threads: %d+%d/%d\n"
6807 " ready threads %d\n"
6808 " free async space %zd\n", proc->requested_threads,
6809 proc->requested_threads_started, proc->max_threads,
6810 ready_threads,
6811 free_async_space);
6812 count = 0;
6813 for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6814 count++;
6815 binder_inner_proc_unlock(proc);
6816 seq_printf(m, " nodes: %d\n", count);
6817 count = 0;
6818 strong = 0;
6819 weak = 0;
6820 binder_proc_lock(proc);
6821 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6822 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6823 rb_node_desc);
6824 count++;
6825 strong += ref->data.strong;
6826 weak += ref->data.weak;
6827 }
6828 binder_proc_unlock(proc);
6829 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6830
6831 count = binder_alloc_get_allocated_count(&proc->alloc);
6832 seq_printf(m, " buffers: %d\n", count);
6833
6834 binder_alloc_print_pages(m, &proc->alloc);
6835
6836 count = 0;
6837 binder_inner_proc_lock(proc);
6838 list_for_each_entry(w, &proc->todo, entry) {
6839 if (w->type == BINDER_WORK_TRANSACTION)
6840 count++;
6841 }
6842 binder_inner_proc_unlock(proc);
6843 seq_printf(m, " pending transactions: %d\n", count);
6844
6845 print_binder_stats(m, " ", &proc->stats);
6846}
6847
6848static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6849{
6850 struct binder_proc *proc;
6851 struct binder_node *node;
6852 struct binder_node *last_node = NULL;
6853
6854 seq_puts(m, "binder state:\n");
6855
6856 spin_lock(&binder_dead_nodes_lock);
6857 if (!hlist_empty(&binder_dead_nodes))
6858 seq_puts(m, "dead nodes:\n");
6859 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6860 last_node = print_next_binder_node_ilocked(m, NULL, node,
6861 last_node,
6862 hash_ptrs);
6863 spin_unlock(&binder_dead_nodes_lock);
6864 if (last_node)
6865 binder_put_node(last_node);
6866
6867 mutex_lock(&binder_procs_lock);
6868 hlist_for_each_entry(proc, &binder_procs, proc_node)
6869 print_binder_proc(m, proc, true, hash_ptrs);
6870 mutex_unlock(&binder_procs_lock);
6871}
6872
6873static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6874{
6875 struct binder_proc *proc;
6876
6877 seq_puts(m, "binder transactions:\n");
6878 mutex_lock(&binder_procs_lock);
6879 hlist_for_each_entry(proc, &binder_procs, proc_node)
6880 print_binder_proc(m, proc, false, hash_ptrs);
6881 mutex_unlock(&binder_procs_lock);
6882}
6883
6884static int state_show(struct seq_file *m, void *unused)
6885{
6886 print_binder_state(m, false);
6887 return 0;
6888}
6889
6890static int state_hashed_show(struct seq_file *m, void *unused)
6891{
6892 print_binder_state(m, true);
6893 return 0;
6894}
6895
6896static int stats_show(struct seq_file *m, void *unused)
6897{
6898 struct binder_proc *proc;
6899
6900 seq_puts(m, "binder stats:\n");
6901
6902 print_binder_stats(m, "", &binder_stats);
6903
6904 mutex_lock(&binder_procs_lock);
6905 hlist_for_each_entry(proc, &binder_procs, proc_node)
6906 print_binder_proc_stats(m, proc);
6907 mutex_unlock(&binder_procs_lock);
6908
6909 return 0;
6910}
6911
6912static int transactions_show(struct seq_file *m, void *unused)
6913{
6914 print_binder_transactions(m, false);
6915 return 0;
6916}
6917
6918static int transactions_hashed_show(struct seq_file *m, void *unused)
6919{
6920 print_binder_transactions(m, true);
6921 return 0;
6922}
6923
6924static int proc_show(struct seq_file *m, void *unused)
6925{
6926 struct binder_proc *itr;
6927 int pid = (unsigned long)m->private;
6928
6929 guard(mutex)(&binder_procs_lock);
6930 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6931 if (itr->pid == pid) {
6932 seq_puts(m, "binder proc state:\n");
6933 print_binder_proc(m, itr, true, false);
6934 }
6935 }
6936
6937 return 0;
6938}
6939
6940static void print_binder_transaction_log_entry(struct seq_file *m,
6941 struct binder_transaction_log_entry *e)
6942{
6943 int debug_id = READ_ONCE(e->debug_id_done);
6944 /*
6945 * read barrier to guarantee debug_id_done read before
6946 * we print the log values
6947 */
6948 smp_rmb();
6949 seq_printf(m,
6950 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6951 e->debug_id, (e->call_type == 2) ? "reply" :
6952 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6953 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6954 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6955 e->return_error, e->return_error_param,
6956 e->return_error_line);
6957 /*
6958 * read-barrier to guarantee read of debug_id_done after
6959 * done printing the fields of the entry
6960 */
6961 smp_rmb();
6962 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6963 "\n" : " (incomplete)\n");
6964}
6965
6966static int transaction_log_show(struct seq_file *m, void *unused)
6967{
6968 struct binder_transaction_log *log = m->private;
6969 unsigned int log_cur = atomic_read(&log->cur);
6970 unsigned int count;
6971 unsigned int cur;
6972 int i;
6973
6974 count = log_cur + 1;
6975 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6976 0 : count % ARRAY_SIZE(log->entry);
6977 if (count > ARRAY_SIZE(log->entry) || log->full)
6978 count = ARRAY_SIZE(log->entry);
6979 for (i = 0; i < count; i++) {
6980 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6981
6982 print_binder_transaction_log_entry(m, &log->entry[index]);
6983 }
6984 return 0;
6985}
6986
6987const struct file_operations binder_fops = {
6988 .owner = THIS_MODULE,
6989 .poll = binder_poll,
6990 .unlocked_ioctl = binder_ioctl,
6991 .compat_ioctl = compat_ptr_ioctl,
6992 .mmap = binder_mmap,
6993 .open = binder_open,
6994 .flush = binder_flush,
6995 .release = binder_release,
6996};
6997
6998DEFINE_SHOW_ATTRIBUTE(state);
6999DEFINE_SHOW_ATTRIBUTE(state_hashed);
7000DEFINE_SHOW_ATTRIBUTE(stats);
7001DEFINE_SHOW_ATTRIBUTE(transactions);
7002DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
7003DEFINE_SHOW_ATTRIBUTE(transaction_log);
7004
7005const struct binder_debugfs_entry binder_debugfs_entries[] = {
7006 {
7007 .name = "state",
7008 .mode = 0444,
7009 .fops = &state_fops,
7010 .data = NULL,
7011 },
7012 {
7013 .name = "state_hashed",
7014 .mode = 0444,
7015 .fops = &state_hashed_fops,
7016 .data = NULL,
7017 },
7018 {
7019 .name = "stats",
7020 .mode = 0444,
7021 .fops = &stats_fops,
7022 .data = NULL,
7023 },
7024 {
7025 .name = "transactions",
7026 .mode = 0444,
7027 .fops = &transactions_fops,
7028 .data = NULL,
7029 },
7030 {
7031 .name = "transactions_hashed",
7032 .mode = 0444,
7033 .fops = &transactions_hashed_fops,
7034 .data = NULL,
7035 },
7036 {
7037 .name = "transaction_log",
7038 .mode = 0444,
7039 .fops = &transaction_log_fops,
7040 .data = &binder_transaction_log,
7041 },
7042 {
7043 .name = "failed_transaction_log",
7044 .mode = 0444,
7045 .fops = &transaction_log_fops,
7046 .data = &binder_transaction_log_failed,
7047 },
7048 {} /* terminator */
7049};
7050
7051void binder_add_device(struct binder_device *device)
7052{
7053 guard(spinlock)(&binder_devices_lock);
7054 hlist_add_head(&device->hlist, &binder_devices);
7055}
7056
7057void binder_remove_device(struct binder_device *device)
7058{
7059 guard(spinlock)(&binder_devices_lock);
7060 hlist_del_init(&device->hlist);
7061}
7062
7063static int __init init_binder_device(const char *name)
7064{
7065 int ret;
7066 struct binder_device *binder_device;
7067
7068 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7069 if (!binder_device)
7070 return -ENOMEM;
7071
7072 binder_device->miscdev.fops = &binder_fops;
7073 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7074 binder_device->miscdev.name = name;
7075
7076 refcount_set(&binder_device->ref, 1);
7077 binder_device->context.binder_context_mgr_uid = INVALID_UID;
7078 binder_device->context.name = name;
7079 mutex_init(&binder_device->context.context_mgr_node_lock);
7080
7081 ret = misc_register(&binder_device->miscdev);
7082 if (ret < 0) {
7083 kfree(binder_device);
7084 return ret;
7085 }
7086
7087 binder_add_device(binder_device);
7088
7089 return ret;
7090}
7091
7092static int __init binder_init(void)
7093{
7094 int ret;
7095 char *device_name, *device_tmp;
7096 struct binder_device *device;
7097 struct hlist_node *tmp;
7098 char *device_names = NULL;
7099 const struct binder_debugfs_entry *db_entry;
7100
7101 ret = binder_alloc_shrinker_init();
7102 if (ret)
7103 return ret;
7104
7105 atomic_set(&binder_transaction_log.cur, ~0U);
7106 atomic_set(&binder_transaction_log_failed.cur, ~0U);
7107
7108 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7109
7110 binder_for_each_debugfs_entry(db_entry)
7111 debugfs_create_file(db_entry->name,
7112 db_entry->mode,
7113 binder_debugfs_dir_entry_root,
7114 db_entry->data,
7115 db_entry->fops);
7116
7117 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7118 binder_debugfs_dir_entry_root);
7119
7120 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7121 strcmp(binder_devices_param, "") != 0) {
7122 /*
7123 * Copy the module_parameter string, because we don't want to
7124 * tokenize it in-place.
7125 */
7126 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7127 if (!device_names) {
7128 ret = -ENOMEM;
7129 goto err_alloc_device_names_failed;
7130 }
7131
7132 device_tmp = device_names;
7133 while ((device_name = strsep(&device_tmp, ","))) {
7134 ret = init_binder_device(device_name);
7135 if (ret)
7136 goto err_init_binder_device_failed;
7137 }
7138 }
7139
7140 ret = genl_register_family(&binder_nl_family);
7141 if (ret)
7142 goto err_init_binder_device_failed;
7143
7144 ret = init_binderfs();
7145 if (ret)
7146 goto err_init_binderfs_failed;
7147
7148 return ret;
7149
7150err_init_binderfs_failed:
7151 genl_unregister_family(&binder_nl_family);
7152
7153err_init_binder_device_failed:
7154 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7155 misc_deregister(&device->miscdev);
7156 binder_remove_device(device);
7157 kfree(device);
7158 }
7159
7160 kfree(device_names);
7161
7162err_alloc_device_names_failed:
7163 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7164 binder_alloc_shrinker_exit();
7165
7166 return ret;
7167}
7168
7169device_initcall(binder_init);
7170
7171#define CREATE_TRACE_POINTS
7172#include "binder_trace.h"