Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69
70#include <uapi/linux/android/binder.h>
71
72#include <asm/cacheflush.h>
73
74#include "binder_internal.h"
75#include "binder_trace.h"
76
77static HLIST_HEAD(binder_deferred_list);
78static DEFINE_MUTEX(binder_deferred_lock);
79
80static HLIST_HEAD(binder_devices);
81static HLIST_HEAD(binder_procs);
82static DEFINE_MUTEX(binder_procs_lock);
83
84static HLIST_HEAD(binder_dead_nodes);
85static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87static struct dentry *binder_debugfs_dir_entry_root;
88static struct dentry *binder_debugfs_dir_entry_proc;
89static atomic_t binder_last_id;
90
91static int proc_show(struct seq_file *m, void *unused);
92DEFINE_SHOW_ATTRIBUTE(proc);
93
94#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
95
96enum {
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
112};
113static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118module_param_named(devices, binder_devices_param, charp, 0444);
119
120static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121static int binder_stop_on_user_error;
122
123static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
125{
126 int ret;
127
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
131 return ret;
132}
133module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
135
136#define binder_debug(mask, x...) \
137 do { \
138 if (binder_debug_mask & mask) \
139 pr_info_ratelimited(x); \
140 } while (0)
141
142#define binder_user_error(x...) \
143 do { \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 pr_info_ratelimited(x); \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
148 } while (0)
149
150#define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
152
153#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155#define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
157
158#define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
160
161static struct binder_stats binder_stats;
162
163static inline void binder_stats_deleted(enum binder_stat_types type)
164{
165 atomic_inc(&binder_stats.obj_deleted[type]);
166}
167
168static inline void binder_stats_created(enum binder_stat_types type)
169{
170 atomic_inc(&binder_stats.obj_created[type]);
171}
172
173struct binder_transaction_log binder_transaction_log;
174struct binder_transaction_log binder_transaction_log_failed;
175
176static struct binder_transaction_log_entry *binder_transaction_log_add(
177 struct binder_transaction_log *log)
178{
179 struct binder_transaction_log_entry *e;
180 unsigned int cur = atomic_inc_return(&log->cur);
181
182 if (cur >= ARRAY_SIZE(log->entry))
183 log->full = true;
184 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185 WRITE_ONCE(e->debug_id_done, 0);
186 /*
187 * write-barrier to synchronize access to e->debug_id_done.
188 * We make sure the initialized 0 value is seen before
189 * memset() other fields are zeroed by memset.
190 */
191 smp_wmb();
192 memset(e, 0, sizeof(*e));
193 return e;
194}
195
196enum binder_deferred_state {
197 BINDER_DEFERRED_FLUSH = 0x01,
198 BINDER_DEFERRED_RELEASE = 0x02,
199};
200
201enum {
202 BINDER_LOOPER_STATE_REGISTERED = 0x01,
203 BINDER_LOOPER_STATE_ENTERED = 0x02,
204 BINDER_LOOPER_STATE_EXITED = 0x04,
205 BINDER_LOOPER_STATE_INVALID = 0x08,
206 BINDER_LOOPER_STATE_WAITING = 0x10,
207 BINDER_LOOPER_STATE_POLL = 0x20,
208};
209
210/**
211 * binder_proc_lock() - Acquire outer lock for given binder_proc
212 * @proc: struct binder_proc to acquire
213 *
214 * Acquires proc->outer_lock. Used to protect binder_ref
215 * structures associated with the given proc.
216 */
217#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218static void
219_binder_proc_lock(struct binder_proc *proc, int line)
220 __acquires(&proc->outer_lock)
221{
222 binder_debug(BINDER_DEBUG_SPINLOCKS,
223 "%s: line=%d\n", __func__, line);
224 spin_lock(&proc->outer_lock);
225}
226
227/**
228 * binder_proc_unlock() - Release spinlock for given binder_proc
229 * @proc: struct binder_proc to acquire
230 *
231 * Release lock acquired via binder_proc_lock()
232 */
233#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234static void
235_binder_proc_unlock(struct binder_proc *proc, int line)
236 __releases(&proc->outer_lock)
237{
238 binder_debug(BINDER_DEBUG_SPINLOCKS,
239 "%s: line=%d\n", __func__, line);
240 spin_unlock(&proc->outer_lock);
241}
242
243/**
244 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245 * @proc: struct binder_proc to acquire
246 *
247 * Acquires proc->inner_lock. Used to protect todo lists
248 */
249#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250static void
251_binder_inner_proc_lock(struct binder_proc *proc, int line)
252 __acquires(&proc->inner_lock)
253{
254 binder_debug(BINDER_DEBUG_SPINLOCKS,
255 "%s: line=%d\n", __func__, line);
256 spin_lock(&proc->inner_lock);
257}
258
259/**
260 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261 * @proc: struct binder_proc to acquire
262 *
263 * Release lock acquired via binder_inner_proc_lock()
264 */
265#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266static void
267_binder_inner_proc_unlock(struct binder_proc *proc, int line)
268 __releases(&proc->inner_lock)
269{
270 binder_debug(BINDER_DEBUG_SPINLOCKS,
271 "%s: line=%d\n", __func__, line);
272 spin_unlock(&proc->inner_lock);
273}
274
275/**
276 * binder_node_lock() - Acquire spinlock for given binder_node
277 * @node: struct binder_node to acquire
278 *
279 * Acquires node->lock. Used to protect binder_node fields
280 */
281#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282static void
283_binder_node_lock(struct binder_node *node, int line)
284 __acquires(&node->lock)
285{
286 binder_debug(BINDER_DEBUG_SPINLOCKS,
287 "%s: line=%d\n", __func__, line);
288 spin_lock(&node->lock);
289}
290
291/**
292 * binder_node_unlock() - Release spinlock for given binder_proc
293 * @node: struct binder_node to acquire
294 *
295 * Release lock acquired via binder_node_lock()
296 */
297#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298static void
299_binder_node_unlock(struct binder_node *node, int line)
300 __releases(&node->lock)
301{
302 binder_debug(BINDER_DEBUG_SPINLOCKS,
303 "%s: line=%d\n", __func__, line);
304 spin_unlock(&node->lock);
305}
306
307/**
308 * binder_node_inner_lock() - Acquire node and inner locks
309 * @node: struct binder_node to acquire
310 *
311 * Acquires node->lock. If node->proc also acquires
312 * proc->inner_lock. Used to protect binder_node fields
313 */
314#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315static void
316_binder_node_inner_lock(struct binder_node *node, int line)
317 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
318{
319 binder_debug(BINDER_DEBUG_SPINLOCKS,
320 "%s: line=%d\n", __func__, line);
321 spin_lock(&node->lock);
322 if (node->proc)
323 binder_inner_proc_lock(node->proc);
324 else
325 /* annotation for sparse */
326 __acquire(&node->proc->inner_lock);
327}
328
329/**
330 * binder_node_unlock() - Release node and inner locks
331 * @node: struct binder_node to acquire
332 *
333 * Release lock acquired via binder_node_lock()
334 */
335#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336static void
337_binder_node_inner_unlock(struct binder_node *node, int line)
338 __releases(&node->lock) __releases(&node->proc->inner_lock)
339{
340 struct binder_proc *proc = node->proc;
341
342 binder_debug(BINDER_DEBUG_SPINLOCKS,
343 "%s: line=%d\n", __func__, line);
344 if (proc)
345 binder_inner_proc_unlock(proc);
346 else
347 /* annotation for sparse */
348 __release(&node->proc->inner_lock);
349 spin_unlock(&node->lock);
350}
351
352static bool binder_worklist_empty_ilocked(struct list_head *list)
353{
354 return list_empty(list);
355}
356
357/**
358 * binder_worklist_empty() - Check if no items on the work list
359 * @proc: binder_proc associated with list
360 * @list: list to check
361 *
362 * Return: true if there are no items on list, else false
363 */
364static bool binder_worklist_empty(struct binder_proc *proc,
365 struct list_head *list)
366{
367 bool ret;
368
369 binder_inner_proc_lock(proc);
370 ret = binder_worklist_empty_ilocked(list);
371 binder_inner_proc_unlock(proc);
372 return ret;
373}
374
375/**
376 * binder_enqueue_work_ilocked() - Add an item to the work list
377 * @work: struct binder_work to add to list
378 * @target_list: list to add work to
379 *
380 * Adds the work to the specified list. Asserts that work
381 * is not already on a list.
382 *
383 * Requires the proc->inner_lock to be held.
384 */
385static void
386binder_enqueue_work_ilocked(struct binder_work *work,
387 struct list_head *target_list)
388{
389 BUG_ON(target_list == NULL);
390 BUG_ON(work->entry.next && !list_empty(&work->entry));
391 list_add_tail(&work->entry, target_list);
392}
393
394/**
395 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396 * @thread: thread to queue work to
397 * @work: struct binder_work to add to list
398 *
399 * Adds the work to the todo list of the thread. Doesn't set the process_todo
400 * flag, which means that (if it wasn't already set) the thread will go to
401 * sleep without handling this work when it calls read.
402 *
403 * Requires the proc->inner_lock to be held.
404 */
405static void
406binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407 struct binder_work *work)
408{
409 WARN_ON(!list_empty(&thread->waiting_thread_node));
410 binder_enqueue_work_ilocked(work, &thread->todo);
411}
412
413/**
414 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415 * @thread: thread to queue work to
416 * @work: struct binder_work to add to list
417 *
418 * Adds the work to the todo list of the thread, and enables processing
419 * of the todo queue.
420 *
421 * Requires the proc->inner_lock to be held.
422 */
423static void
424binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425 struct binder_work *work)
426{
427 WARN_ON(!list_empty(&thread->waiting_thread_node));
428 binder_enqueue_work_ilocked(work, &thread->todo);
429 thread->process_todo = true;
430}
431
432/**
433 * binder_enqueue_thread_work() - Add an item to the thread work list
434 * @thread: thread to queue work to
435 * @work: struct binder_work to add to list
436 *
437 * Adds the work to the todo list of the thread, and enables processing
438 * of the todo queue.
439 */
440static void
441binder_enqueue_thread_work(struct binder_thread *thread,
442 struct binder_work *work)
443{
444 binder_inner_proc_lock(thread->proc);
445 binder_enqueue_thread_work_ilocked(thread, work);
446 binder_inner_proc_unlock(thread->proc);
447}
448
449static void
450binder_dequeue_work_ilocked(struct binder_work *work)
451{
452 list_del_init(&work->entry);
453}
454
455/**
456 * binder_dequeue_work() - Removes an item from the work list
457 * @proc: binder_proc associated with list
458 * @work: struct binder_work to remove from list
459 *
460 * Removes the specified work item from whatever list it is on.
461 * Can safely be called if work is not on any list.
462 */
463static void
464binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465{
466 binder_inner_proc_lock(proc);
467 binder_dequeue_work_ilocked(work);
468 binder_inner_proc_unlock(proc);
469}
470
471static struct binder_work *binder_dequeue_work_head_ilocked(
472 struct list_head *list)
473{
474 struct binder_work *w;
475
476 w = list_first_entry_or_null(list, struct binder_work, entry);
477 if (w)
478 list_del_init(&w->entry);
479 return w;
480}
481
482static void
483binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484static void binder_free_thread(struct binder_thread *thread);
485static void binder_free_proc(struct binder_proc *proc);
486static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487
488static bool binder_has_work_ilocked(struct binder_thread *thread,
489 bool do_proc_work)
490{
491 return thread->process_todo ||
492 thread->looper_need_return ||
493 (do_proc_work &&
494 !binder_worklist_empty_ilocked(&thread->proc->todo));
495}
496
497static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498{
499 bool has_work;
500
501 binder_inner_proc_lock(thread->proc);
502 has_work = binder_has_work_ilocked(thread, do_proc_work);
503 binder_inner_proc_unlock(thread->proc);
504
505 return has_work;
506}
507
508static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509{
510 return !thread->transaction_stack &&
511 binder_worklist_empty_ilocked(&thread->todo) &&
512 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513 BINDER_LOOPER_STATE_REGISTERED));
514}
515
516static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517 bool sync)
518{
519 struct rb_node *n;
520 struct binder_thread *thread;
521
522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523 thread = rb_entry(n, struct binder_thread, rb_node);
524 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525 binder_available_for_proc_work_ilocked(thread)) {
526 if (sync)
527 wake_up_interruptible_sync(&thread->wait);
528 else
529 wake_up_interruptible(&thread->wait);
530 }
531 }
532}
533
534/**
535 * binder_select_thread_ilocked() - selects a thread for doing proc work.
536 * @proc: process to select a thread from
537 *
538 * Note that calling this function moves the thread off the waiting_threads
539 * list, so it can only be woken up by the caller of this function, or a
540 * signal. Therefore, callers *should* always wake up the thread this function
541 * returns.
542 *
543 * Return: If there's a thread currently waiting for process work,
544 * returns that thread. Otherwise returns NULL.
545 */
546static struct binder_thread *
547binder_select_thread_ilocked(struct binder_proc *proc)
548{
549 struct binder_thread *thread;
550
551 assert_spin_locked(&proc->inner_lock);
552 thread = list_first_entry_or_null(&proc->waiting_threads,
553 struct binder_thread,
554 waiting_thread_node);
555
556 if (thread)
557 list_del_init(&thread->waiting_thread_node);
558
559 return thread;
560}
561
562/**
563 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564 * @proc: process to wake up a thread in
565 * @thread: specific thread to wake-up (may be NULL)
566 * @sync: whether to do a synchronous wake-up
567 *
568 * This function wakes up a thread in the @proc process.
569 * The caller may provide a specific thread to wake-up in
570 * the @thread parameter. If @thread is NULL, this function
571 * will wake up threads that have called poll().
572 *
573 * Note that for this function to work as expected, callers
574 * should first call binder_select_thread() to find a thread
575 * to handle the work (if they don't have a thread already),
576 * and pass the result into the @thread parameter.
577 */
578static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579 struct binder_thread *thread,
580 bool sync)
581{
582 assert_spin_locked(&proc->inner_lock);
583
584 if (thread) {
585 if (sync)
586 wake_up_interruptible_sync(&thread->wait);
587 else
588 wake_up_interruptible(&thread->wait);
589 return;
590 }
591
592 /* Didn't find a thread waiting for proc work; this can happen
593 * in two scenarios:
594 * 1. All threads are busy handling transactions
595 * In that case, one of those threads should call back into
596 * the kernel driver soon and pick up this work.
597 * 2. Threads are using the (e)poll interface, in which case
598 * they may be blocked on the waitqueue without having been
599 * added to waiting_threads. For this case, we just iterate
600 * over all threads not handling transaction work, and
601 * wake them all up. We wake all because we don't know whether
602 * a thread that called into (e)poll is handling non-binder
603 * work currently.
604 */
605 binder_wakeup_poll_threads_ilocked(proc, sync);
606}
607
608static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609{
610 struct binder_thread *thread = binder_select_thread_ilocked(proc);
611
612 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
613}
614
615static void binder_set_nice(long nice)
616{
617 long min_nice;
618
619 if (can_nice(current, nice)) {
620 set_user_nice(current, nice);
621 return;
622 }
623 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625 "%d: nice value %ld not allowed use %ld instead\n",
626 current->pid, nice, min_nice);
627 set_user_nice(current, min_nice);
628 if (min_nice <= MAX_NICE)
629 return;
630 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631}
632
633static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634 binder_uintptr_t ptr)
635{
636 struct rb_node *n = proc->nodes.rb_node;
637 struct binder_node *node;
638
639 assert_spin_locked(&proc->inner_lock);
640
641 while (n) {
642 node = rb_entry(n, struct binder_node, rb_node);
643
644 if (ptr < node->ptr)
645 n = n->rb_left;
646 else if (ptr > node->ptr)
647 n = n->rb_right;
648 else {
649 /*
650 * take an implicit weak reference
651 * to ensure node stays alive until
652 * call to binder_put_node()
653 */
654 binder_inc_node_tmpref_ilocked(node);
655 return node;
656 }
657 }
658 return NULL;
659}
660
661static struct binder_node *binder_get_node(struct binder_proc *proc,
662 binder_uintptr_t ptr)
663{
664 struct binder_node *node;
665
666 binder_inner_proc_lock(proc);
667 node = binder_get_node_ilocked(proc, ptr);
668 binder_inner_proc_unlock(proc);
669 return node;
670}
671
672static struct binder_node *binder_init_node_ilocked(
673 struct binder_proc *proc,
674 struct binder_node *new_node,
675 struct flat_binder_object *fp)
676{
677 struct rb_node **p = &proc->nodes.rb_node;
678 struct rb_node *parent = NULL;
679 struct binder_node *node;
680 binder_uintptr_t ptr = fp ? fp->binder : 0;
681 binder_uintptr_t cookie = fp ? fp->cookie : 0;
682 __u32 flags = fp ? fp->flags : 0;
683
684 assert_spin_locked(&proc->inner_lock);
685
686 while (*p) {
687
688 parent = *p;
689 node = rb_entry(parent, struct binder_node, rb_node);
690
691 if (ptr < node->ptr)
692 p = &(*p)->rb_left;
693 else if (ptr > node->ptr)
694 p = &(*p)->rb_right;
695 else {
696 /*
697 * A matching node is already in
698 * the rb tree. Abandon the init
699 * and return it.
700 */
701 binder_inc_node_tmpref_ilocked(node);
702 return node;
703 }
704 }
705 node = new_node;
706 binder_stats_created(BINDER_STAT_NODE);
707 node->tmp_refs++;
708 rb_link_node(&node->rb_node, parent, p);
709 rb_insert_color(&node->rb_node, &proc->nodes);
710 node->debug_id = atomic_inc_return(&binder_last_id);
711 node->proc = proc;
712 node->ptr = ptr;
713 node->cookie = cookie;
714 node->work.type = BINDER_WORK_NODE;
715 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718 spin_lock_init(&node->lock);
719 INIT_LIST_HEAD(&node->work.entry);
720 INIT_LIST_HEAD(&node->async_todo);
721 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722 "%d:%d node %d u%016llx c%016llx created\n",
723 proc->pid, current->pid, node->debug_id,
724 (u64)node->ptr, (u64)node->cookie);
725
726 return node;
727}
728
729static struct binder_node *binder_new_node(struct binder_proc *proc,
730 struct flat_binder_object *fp)
731{
732 struct binder_node *node;
733 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734
735 if (!new_node)
736 return NULL;
737 binder_inner_proc_lock(proc);
738 node = binder_init_node_ilocked(proc, new_node, fp);
739 binder_inner_proc_unlock(proc);
740 if (node != new_node)
741 /*
742 * The node was already added by another thread
743 */
744 kfree(new_node);
745
746 return node;
747}
748
749static void binder_free_node(struct binder_node *node)
750{
751 kfree(node);
752 binder_stats_deleted(BINDER_STAT_NODE);
753}
754
755static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756 int internal,
757 struct list_head *target_list)
758{
759 struct binder_proc *proc = node->proc;
760
761 assert_spin_locked(&node->lock);
762 if (proc)
763 assert_spin_locked(&proc->inner_lock);
764 if (strong) {
765 if (internal) {
766 if (target_list == NULL &&
767 node->internal_strong_refs == 0 &&
768 !(node->proc &&
769 node == node->proc->context->binder_context_mgr_node &&
770 node->has_strong_ref)) {
771 pr_err("invalid inc strong node for %d\n",
772 node->debug_id);
773 return -EINVAL;
774 }
775 node->internal_strong_refs++;
776 } else
777 node->local_strong_refs++;
778 if (!node->has_strong_ref && target_list) {
779 struct binder_thread *thread = container_of(target_list,
780 struct binder_thread, todo);
781 binder_dequeue_work_ilocked(&node->work);
782 BUG_ON(&thread->todo != target_list);
783 binder_enqueue_deferred_thread_work_ilocked(thread,
784 &node->work);
785 }
786 } else {
787 if (!internal)
788 node->local_weak_refs++;
789 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790 if (target_list == NULL) {
791 pr_err("invalid inc weak node for %d\n",
792 node->debug_id);
793 return -EINVAL;
794 }
795 /*
796 * See comment above
797 */
798 binder_enqueue_work_ilocked(&node->work, target_list);
799 }
800 }
801 return 0;
802}
803
804static int binder_inc_node(struct binder_node *node, int strong, int internal,
805 struct list_head *target_list)
806{
807 int ret;
808
809 binder_node_inner_lock(node);
810 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811 binder_node_inner_unlock(node);
812
813 return ret;
814}
815
816static bool binder_dec_node_nilocked(struct binder_node *node,
817 int strong, int internal)
818{
819 struct binder_proc *proc = node->proc;
820
821 assert_spin_locked(&node->lock);
822 if (proc)
823 assert_spin_locked(&proc->inner_lock);
824 if (strong) {
825 if (internal)
826 node->internal_strong_refs--;
827 else
828 node->local_strong_refs--;
829 if (node->local_strong_refs || node->internal_strong_refs)
830 return false;
831 } else {
832 if (!internal)
833 node->local_weak_refs--;
834 if (node->local_weak_refs || node->tmp_refs ||
835 !hlist_empty(&node->refs))
836 return false;
837 }
838
839 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840 if (list_empty(&node->work.entry)) {
841 binder_enqueue_work_ilocked(&node->work, &proc->todo);
842 binder_wakeup_proc_ilocked(proc);
843 }
844 } else {
845 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846 !node->local_weak_refs && !node->tmp_refs) {
847 if (proc) {
848 binder_dequeue_work_ilocked(&node->work);
849 rb_erase(&node->rb_node, &proc->nodes);
850 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851 "refless node %d deleted\n",
852 node->debug_id);
853 } else {
854 BUG_ON(!list_empty(&node->work.entry));
855 spin_lock(&binder_dead_nodes_lock);
856 /*
857 * tmp_refs could have changed so
858 * check it again
859 */
860 if (node->tmp_refs) {
861 spin_unlock(&binder_dead_nodes_lock);
862 return false;
863 }
864 hlist_del(&node->dead_node);
865 spin_unlock(&binder_dead_nodes_lock);
866 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867 "dead node %d deleted\n",
868 node->debug_id);
869 }
870 return true;
871 }
872 }
873 return false;
874}
875
876static void binder_dec_node(struct binder_node *node, int strong, int internal)
877{
878 bool free_node;
879
880 binder_node_inner_lock(node);
881 free_node = binder_dec_node_nilocked(node, strong, internal);
882 binder_node_inner_unlock(node);
883 if (free_node)
884 binder_free_node(node);
885}
886
887static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888{
889 /*
890 * No call to binder_inc_node() is needed since we
891 * don't need to inform userspace of any changes to
892 * tmp_refs
893 */
894 node->tmp_refs++;
895}
896
897/**
898 * binder_inc_node_tmpref() - take a temporary reference on node
899 * @node: node to reference
900 *
901 * Take reference on node to prevent the node from being freed
902 * while referenced only by a local variable. The inner lock is
903 * needed to serialize with the node work on the queue (which
904 * isn't needed after the node is dead). If the node is dead
905 * (node->proc is NULL), use binder_dead_nodes_lock to protect
906 * node->tmp_refs against dead-node-only cases where the node
907 * lock cannot be acquired (eg traversing the dead node list to
908 * print nodes)
909 */
910static void binder_inc_node_tmpref(struct binder_node *node)
911{
912 binder_node_lock(node);
913 if (node->proc)
914 binder_inner_proc_lock(node->proc);
915 else
916 spin_lock(&binder_dead_nodes_lock);
917 binder_inc_node_tmpref_ilocked(node);
918 if (node->proc)
919 binder_inner_proc_unlock(node->proc);
920 else
921 spin_unlock(&binder_dead_nodes_lock);
922 binder_node_unlock(node);
923}
924
925/**
926 * binder_dec_node_tmpref() - remove a temporary reference on node
927 * @node: node to reference
928 *
929 * Release temporary reference on node taken via binder_inc_node_tmpref()
930 */
931static void binder_dec_node_tmpref(struct binder_node *node)
932{
933 bool free_node;
934
935 binder_node_inner_lock(node);
936 if (!node->proc)
937 spin_lock(&binder_dead_nodes_lock);
938 else
939 __acquire(&binder_dead_nodes_lock);
940 node->tmp_refs--;
941 BUG_ON(node->tmp_refs < 0);
942 if (!node->proc)
943 spin_unlock(&binder_dead_nodes_lock);
944 else
945 __release(&binder_dead_nodes_lock);
946 /*
947 * Call binder_dec_node() to check if all refcounts are 0
948 * and cleanup is needed. Calling with strong=0 and internal=1
949 * causes no actual reference to be released in binder_dec_node().
950 * If that changes, a change is needed here too.
951 */
952 free_node = binder_dec_node_nilocked(node, 0, 1);
953 binder_node_inner_unlock(node);
954 if (free_node)
955 binder_free_node(node);
956}
957
958static void binder_put_node(struct binder_node *node)
959{
960 binder_dec_node_tmpref(node);
961}
962
963static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964 u32 desc, bool need_strong_ref)
965{
966 struct rb_node *n = proc->refs_by_desc.rb_node;
967 struct binder_ref *ref;
968
969 while (n) {
970 ref = rb_entry(n, struct binder_ref, rb_node_desc);
971
972 if (desc < ref->data.desc) {
973 n = n->rb_left;
974 } else if (desc > ref->data.desc) {
975 n = n->rb_right;
976 } else if (need_strong_ref && !ref->data.strong) {
977 binder_user_error("tried to use weak ref as strong ref\n");
978 return NULL;
979 } else {
980 return ref;
981 }
982 }
983 return NULL;
984}
985
986/**
987 * binder_get_ref_for_node_olocked() - get the ref associated with given node
988 * @proc: binder_proc that owns the ref
989 * @node: binder_node of target
990 * @new_ref: newly allocated binder_ref to be initialized or %NULL
991 *
992 * Look up the ref for the given node and return it if it exists
993 *
994 * If it doesn't exist and the caller provides a newly allocated
995 * ref, initialize the fields of the newly allocated ref and insert
996 * into the given proc rb_trees and node refs list.
997 *
998 * Return: the ref for node. It is possible that another thread
999 * allocated/initialized the ref first in which case the
1000 * returned ref would be different than the passed-in
1001 * new_ref. new_ref must be kfree'd by the caller in
1002 * this case.
1003 */
1004static struct binder_ref *binder_get_ref_for_node_olocked(
1005 struct binder_proc *proc,
1006 struct binder_node *node,
1007 struct binder_ref *new_ref)
1008{
1009 struct binder_context *context = proc->context;
1010 struct rb_node **p = &proc->refs_by_node.rb_node;
1011 struct rb_node *parent = NULL;
1012 struct binder_ref *ref;
1013 struct rb_node *n;
1014
1015 while (*p) {
1016 parent = *p;
1017 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018
1019 if (node < ref->node)
1020 p = &(*p)->rb_left;
1021 else if (node > ref->node)
1022 p = &(*p)->rb_right;
1023 else
1024 return ref;
1025 }
1026 if (!new_ref)
1027 return NULL;
1028
1029 binder_stats_created(BINDER_STAT_REF);
1030 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031 new_ref->proc = proc;
1032 new_ref->node = node;
1033 rb_link_node(&new_ref->rb_node_node, parent, p);
1034 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035
1036 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039 if (ref->data.desc > new_ref->data.desc)
1040 break;
1041 new_ref->data.desc = ref->data.desc + 1;
1042 }
1043
1044 p = &proc->refs_by_desc.rb_node;
1045 while (*p) {
1046 parent = *p;
1047 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048
1049 if (new_ref->data.desc < ref->data.desc)
1050 p = &(*p)->rb_left;
1051 else if (new_ref->data.desc > ref->data.desc)
1052 p = &(*p)->rb_right;
1053 else
1054 BUG();
1055 }
1056 rb_link_node(&new_ref->rb_node_desc, parent, p);
1057 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058
1059 binder_node_lock(node);
1060 hlist_add_head(&new_ref->node_entry, &node->refs);
1061
1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063 "%d new ref %d desc %d for node %d\n",
1064 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065 node->debug_id);
1066 binder_node_unlock(node);
1067 return new_ref;
1068}
1069
1070static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071{
1072 bool delete_node = false;
1073
1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075 "%d delete ref %d desc %d for node %d\n",
1076 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077 ref->node->debug_id);
1078
1079 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081
1082 binder_node_inner_lock(ref->node);
1083 if (ref->data.strong)
1084 binder_dec_node_nilocked(ref->node, 1, 1);
1085
1086 hlist_del(&ref->node_entry);
1087 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088 binder_node_inner_unlock(ref->node);
1089 /*
1090 * Clear ref->node unless we want the caller to free the node
1091 */
1092 if (!delete_node) {
1093 /*
1094 * The caller uses ref->node to determine
1095 * whether the node needs to be freed. Clear
1096 * it since the node is still alive.
1097 */
1098 ref->node = NULL;
1099 }
1100
1101 if (ref->death) {
1102 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103 "%d delete ref %d desc %d has death notification\n",
1104 ref->proc->pid, ref->data.debug_id,
1105 ref->data.desc);
1106 binder_dequeue_work(ref->proc, &ref->death->work);
1107 binder_stats_deleted(BINDER_STAT_DEATH);
1108 }
1109 binder_stats_deleted(BINDER_STAT_REF);
1110}
1111
1112/**
1113 * binder_inc_ref_olocked() - increment the ref for given handle
1114 * @ref: ref to be incremented
1115 * @strong: if true, strong increment, else weak
1116 * @target_list: list to queue node work on
1117 *
1118 * Increment the ref. @ref->proc->outer_lock must be held on entry
1119 *
1120 * Return: 0, if successful, else errno
1121 */
1122static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123 struct list_head *target_list)
1124{
1125 int ret;
1126
1127 if (strong) {
1128 if (ref->data.strong == 0) {
1129 ret = binder_inc_node(ref->node, 1, 1, target_list);
1130 if (ret)
1131 return ret;
1132 }
1133 ref->data.strong++;
1134 } else {
1135 if (ref->data.weak == 0) {
1136 ret = binder_inc_node(ref->node, 0, 1, target_list);
1137 if (ret)
1138 return ret;
1139 }
1140 ref->data.weak++;
1141 }
1142 return 0;
1143}
1144
1145/**
1146 * binder_dec_ref() - dec the ref for given handle
1147 * @ref: ref to be decremented
1148 * @strong: if true, strong decrement, else weak
1149 *
1150 * Decrement the ref.
1151 *
1152 * Return: true if ref is cleaned up and ready to be freed
1153 */
1154static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155{
1156 if (strong) {
1157 if (ref->data.strong == 0) {
1158 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 ref->proc->pid, ref->data.debug_id,
1160 ref->data.desc, ref->data.strong,
1161 ref->data.weak);
1162 return false;
1163 }
1164 ref->data.strong--;
1165 if (ref->data.strong == 0)
1166 binder_dec_node(ref->node, strong, 1);
1167 } else {
1168 if (ref->data.weak == 0) {
1169 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 ref->proc->pid, ref->data.debug_id,
1171 ref->data.desc, ref->data.strong,
1172 ref->data.weak);
1173 return false;
1174 }
1175 ref->data.weak--;
1176 }
1177 if (ref->data.strong == 0 && ref->data.weak == 0) {
1178 binder_cleanup_ref_olocked(ref);
1179 return true;
1180 }
1181 return false;
1182}
1183
1184/**
1185 * binder_get_node_from_ref() - get the node from the given proc/desc
1186 * @proc: proc containing the ref
1187 * @desc: the handle associated with the ref
1188 * @need_strong_ref: if true, only return node if ref is strong
1189 * @rdata: the id/refcount data for the ref
1190 *
1191 * Given a proc and ref handle, return the associated binder_node
1192 *
1193 * Return: a binder_node or NULL if not found or not strong when strong required
1194 */
1195static struct binder_node *binder_get_node_from_ref(
1196 struct binder_proc *proc,
1197 u32 desc, bool need_strong_ref,
1198 struct binder_ref_data *rdata)
1199{
1200 struct binder_node *node;
1201 struct binder_ref *ref;
1202
1203 binder_proc_lock(proc);
1204 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205 if (!ref)
1206 goto err_no_ref;
1207 node = ref->node;
1208 /*
1209 * Take an implicit reference on the node to ensure
1210 * it stays alive until the call to binder_put_node()
1211 */
1212 binder_inc_node_tmpref(node);
1213 if (rdata)
1214 *rdata = ref->data;
1215 binder_proc_unlock(proc);
1216
1217 return node;
1218
1219err_no_ref:
1220 binder_proc_unlock(proc);
1221 return NULL;
1222}
1223
1224/**
1225 * binder_free_ref() - free the binder_ref
1226 * @ref: ref to free
1227 *
1228 * Free the binder_ref. Free the binder_node indicated by ref->node
1229 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230 */
1231static void binder_free_ref(struct binder_ref *ref)
1232{
1233 if (ref->node)
1234 binder_free_node(ref->node);
1235 kfree(ref->death);
1236 kfree(ref);
1237}
1238
1239/**
1240 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241 * @proc: proc containing the ref
1242 * @desc: the handle associated with the ref
1243 * @increment: true=inc reference, false=dec reference
1244 * @strong: true=strong reference, false=weak reference
1245 * @rdata: the id/refcount data for the ref
1246 *
1247 * Given a proc and ref handle, increment or decrement the ref
1248 * according to "increment" arg.
1249 *
1250 * Return: 0 if successful, else errno
1251 */
1252static int binder_update_ref_for_handle(struct binder_proc *proc,
1253 uint32_t desc, bool increment, bool strong,
1254 struct binder_ref_data *rdata)
1255{
1256 int ret = 0;
1257 struct binder_ref *ref;
1258 bool delete_ref = false;
1259
1260 binder_proc_lock(proc);
1261 ref = binder_get_ref_olocked(proc, desc, strong);
1262 if (!ref) {
1263 ret = -EINVAL;
1264 goto err_no_ref;
1265 }
1266 if (increment)
1267 ret = binder_inc_ref_olocked(ref, strong, NULL);
1268 else
1269 delete_ref = binder_dec_ref_olocked(ref, strong);
1270
1271 if (rdata)
1272 *rdata = ref->data;
1273 binder_proc_unlock(proc);
1274
1275 if (delete_ref)
1276 binder_free_ref(ref);
1277 return ret;
1278
1279err_no_ref:
1280 binder_proc_unlock(proc);
1281 return ret;
1282}
1283
1284/**
1285 * binder_dec_ref_for_handle() - dec the ref for given handle
1286 * @proc: proc containing the ref
1287 * @desc: the handle associated with the ref
1288 * @strong: true=strong reference, false=weak reference
1289 * @rdata: the id/refcount data for the ref
1290 *
1291 * Just calls binder_update_ref_for_handle() to decrement the ref.
1292 *
1293 * Return: 0 if successful, else errno
1294 */
1295static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297{
1298 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299}
1300
1301
1302/**
1303 * binder_inc_ref_for_node() - increment the ref for given proc/node
1304 * @proc: proc containing the ref
1305 * @node: target node
1306 * @strong: true=strong reference, false=weak reference
1307 * @target_list: worklist to use if node is incremented
1308 * @rdata: the id/refcount data for the ref
1309 *
1310 * Given a proc and node, increment the ref. Create the ref if it
1311 * doesn't already exist
1312 *
1313 * Return: 0 if successful, else errno
1314 */
1315static int binder_inc_ref_for_node(struct binder_proc *proc,
1316 struct binder_node *node,
1317 bool strong,
1318 struct list_head *target_list,
1319 struct binder_ref_data *rdata)
1320{
1321 struct binder_ref *ref;
1322 struct binder_ref *new_ref = NULL;
1323 int ret = 0;
1324
1325 binder_proc_lock(proc);
1326 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327 if (!ref) {
1328 binder_proc_unlock(proc);
1329 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330 if (!new_ref)
1331 return -ENOMEM;
1332 binder_proc_lock(proc);
1333 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334 }
1335 ret = binder_inc_ref_olocked(ref, strong, target_list);
1336 *rdata = ref->data;
1337 binder_proc_unlock(proc);
1338 if (new_ref && ref != new_ref)
1339 /*
1340 * Another thread created the ref first so
1341 * free the one we allocated
1342 */
1343 kfree(new_ref);
1344 return ret;
1345}
1346
1347static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348 struct binder_transaction *t)
1349{
1350 BUG_ON(!target_thread);
1351 assert_spin_locked(&target_thread->proc->inner_lock);
1352 BUG_ON(target_thread->transaction_stack != t);
1353 BUG_ON(target_thread->transaction_stack->from != target_thread);
1354 target_thread->transaction_stack =
1355 target_thread->transaction_stack->from_parent;
1356 t->from = NULL;
1357}
1358
1359/**
1360 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361 * @thread: thread to decrement
1362 *
1363 * A thread needs to be kept alive while being used to create or
1364 * handle a transaction. binder_get_txn_from() is used to safely
1365 * extract t->from from a binder_transaction and keep the thread
1366 * indicated by t->from from being freed. When done with that
1367 * binder_thread, this function is called to decrement the
1368 * tmp_ref and free if appropriate (thread has been released
1369 * and no transaction being processed by the driver)
1370 */
1371static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372{
1373 /*
1374 * atomic is used to protect the counter value while
1375 * it cannot reach zero or thread->is_dead is false
1376 */
1377 binder_inner_proc_lock(thread->proc);
1378 atomic_dec(&thread->tmp_ref);
1379 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380 binder_inner_proc_unlock(thread->proc);
1381 binder_free_thread(thread);
1382 return;
1383 }
1384 binder_inner_proc_unlock(thread->proc);
1385}
1386
1387/**
1388 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389 * @proc: proc to decrement
1390 *
1391 * A binder_proc needs to be kept alive while being used to create or
1392 * handle a transaction. proc->tmp_ref is incremented when
1393 * creating a new transaction or the binder_proc is currently in-use
1394 * by threads that are being released. When done with the binder_proc,
1395 * this function is called to decrement the counter and free the
1396 * proc if appropriate (proc has been released, all threads have
1397 * been released and not currenly in-use to process a transaction).
1398 */
1399static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400{
1401 binder_inner_proc_lock(proc);
1402 proc->tmp_ref--;
1403 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404 !proc->tmp_ref) {
1405 binder_inner_proc_unlock(proc);
1406 binder_free_proc(proc);
1407 return;
1408 }
1409 binder_inner_proc_unlock(proc);
1410}
1411
1412/**
1413 * binder_get_txn_from() - safely extract the "from" thread in transaction
1414 * @t: binder transaction for t->from
1415 *
1416 * Atomically return the "from" thread and increment the tmp_ref
1417 * count for the thread to ensure it stays alive until
1418 * binder_thread_dec_tmpref() is called.
1419 *
1420 * Return: the value of t->from
1421 */
1422static struct binder_thread *binder_get_txn_from(
1423 struct binder_transaction *t)
1424{
1425 struct binder_thread *from;
1426
1427 spin_lock(&t->lock);
1428 from = t->from;
1429 if (from)
1430 atomic_inc(&from->tmp_ref);
1431 spin_unlock(&t->lock);
1432 return from;
1433}
1434
1435/**
1436 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437 * @t: binder transaction for t->from
1438 *
1439 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440 * to guarantee that the thread cannot be released while operating on it.
1441 * The caller must call binder_inner_proc_unlock() to release the inner lock
1442 * as well as call binder_dec_thread_txn() to release the reference.
1443 *
1444 * Return: the value of t->from
1445 */
1446static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447 struct binder_transaction *t)
1448 __acquires(&t->from->proc->inner_lock)
1449{
1450 struct binder_thread *from;
1451
1452 from = binder_get_txn_from(t);
1453 if (!from) {
1454 __acquire(&from->proc->inner_lock);
1455 return NULL;
1456 }
1457 binder_inner_proc_lock(from->proc);
1458 if (t->from) {
1459 BUG_ON(from != t->from);
1460 return from;
1461 }
1462 binder_inner_proc_unlock(from->proc);
1463 __acquire(&from->proc->inner_lock);
1464 binder_thread_dec_tmpref(from);
1465 return NULL;
1466}
1467
1468/**
1469 * binder_free_txn_fixups() - free unprocessed fd fixups
1470 * @t: binder transaction for t->from
1471 *
1472 * If the transaction is being torn down prior to being
1473 * processed by the target process, free all of the
1474 * fd fixups and fput the file structs. It is safe to
1475 * call this function after the fixups have been
1476 * processed -- in that case, the list will be empty.
1477 */
1478static void binder_free_txn_fixups(struct binder_transaction *t)
1479{
1480 struct binder_txn_fd_fixup *fixup, *tmp;
1481
1482 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483 fput(fixup->file);
1484 list_del(&fixup->fixup_entry);
1485 kfree(fixup);
1486 }
1487}
1488
1489static void binder_txn_latency_free(struct binder_transaction *t)
1490{
1491 int from_proc, from_thread, to_proc, to_thread;
1492
1493 spin_lock(&t->lock);
1494 from_proc = t->from ? t->from->proc->pid : 0;
1495 from_thread = t->from ? t->from->pid : 0;
1496 to_proc = t->to_proc ? t->to_proc->pid : 0;
1497 to_thread = t->to_thread ? t->to_thread->pid : 0;
1498 spin_unlock(&t->lock);
1499
1500 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501}
1502
1503static void binder_free_transaction(struct binder_transaction *t)
1504{
1505 struct binder_proc *target_proc = t->to_proc;
1506
1507 if (target_proc) {
1508 binder_inner_proc_lock(target_proc);
1509 target_proc->outstanding_txns--;
1510 if (target_proc->outstanding_txns < 0)
1511 pr_warn("%s: Unexpected outstanding_txns %d\n",
1512 __func__, target_proc->outstanding_txns);
1513 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514 wake_up_interruptible_all(&target_proc->freeze_wait);
1515 if (t->buffer)
1516 t->buffer->transaction = NULL;
1517 binder_inner_proc_unlock(target_proc);
1518 }
1519 if (trace_binder_txn_latency_free_enabled())
1520 binder_txn_latency_free(t);
1521 /*
1522 * If the transaction has no target_proc, then
1523 * t->buffer->transaction has already been cleared.
1524 */
1525 binder_free_txn_fixups(t);
1526 kfree(t);
1527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528}
1529
1530static void binder_send_failed_reply(struct binder_transaction *t,
1531 uint32_t error_code)
1532{
1533 struct binder_thread *target_thread;
1534 struct binder_transaction *next;
1535
1536 BUG_ON(t->flags & TF_ONE_WAY);
1537 while (1) {
1538 target_thread = binder_get_txn_from_and_acq_inner(t);
1539 if (target_thread) {
1540 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541 "send failed reply for transaction %d to %d:%d\n",
1542 t->debug_id,
1543 target_thread->proc->pid,
1544 target_thread->pid);
1545
1546 binder_pop_transaction_ilocked(target_thread, t);
1547 if (target_thread->reply_error.cmd == BR_OK) {
1548 target_thread->reply_error.cmd = error_code;
1549 binder_enqueue_thread_work_ilocked(
1550 target_thread,
1551 &target_thread->reply_error.work);
1552 wake_up_interruptible(&target_thread->wait);
1553 } else {
1554 /*
1555 * Cannot get here for normal operation, but
1556 * we can if multiple synchronous transactions
1557 * are sent without blocking for responses.
1558 * Just ignore the 2nd error in this case.
1559 */
1560 pr_warn("Unexpected reply error: %u\n",
1561 target_thread->reply_error.cmd);
1562 }
1563 binder_inner_proc_unlock(target_thread->proc);
1564 binder_thread_dec_tmpref(target_thread);
1565 binder_free_transaction(t);
1566 return;
1567 }
1568 __release(&target_thread->proc->inner_lock);
1569 next = t->from_parent;
1570
1571 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572 "send failed reply for transaction %d, target dead\n",
1573 t->debug_id);
1574
1575 binder_free_transaction(t);
1576 if (next == NULL) {
1577 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578 "reply failed, no target thread at root\n");
1579 return;
1580 }
1581 t = next;
1582 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583 "reply failed, no target thread -- retry %d\n",
1584 t->debug_id);
1585 }
1586}
1587
1588/**
1589 * binder_cleanup_transaction() - cleans up undelivered transaction
1590 * @t: transaction that needs to be cleaned up
1591 * @reason: reason the transaction wasn't delivered
1592 * @error_code: error to return to caller (if synchronous call)
1593 */
1594static void binder_cleanup_transaction(struct binder_transaction *t,
1595 const char *reason,
1596 uint32_t error_code)
1597{
1598 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599 binder_send_failed_reply(t, error_code);
1600 } else {
1601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602 "undelivered transaction %d, %s\n",
1603 t->debug_id, reason);
1604 binder_free_transaction(t);
1605 }
1606}
1607
1608/**
1609 * binder_get_object() - gets object and checks for valid metadata
1610 * @proc: binder_proc owning the buffer
1611 * @buffer: binder_buffer that we're parsing.
1612 * @offset: offset in the @buffer at which to validate an object.
1613 * @object: struct binder_object to read into
1614 *
1615 * Return: If there's a valid metadata object at @offset in @buffer, the
1616 * size of that object. Otherwise, it returns zero. The object
1617 * is read into the struct binder_object pointed to by @object.
1618 */
1619static size_t binder_get_object(struct binder_proc *proc,
1620 struct binder_buffer *buffer,
1621 unsigned long offset,
1622 struct binder_object *object)
1623{
1624 size_t read_size;
1625 struct binder_object_header *hdr;
1626 size_t object_size = 0;
1627
1628 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1629 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1630 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1631 offset, read_size))
1632 return 0;
1633
1634 /* Ok, now see if we read a complete object. */
1635 hdr = &object->hdr;
1636 switch (hdr->type) {
1637 case BINDER_TYPE_BINDER:
1638 case BINDER_TYPE_WEAK_BINDER:
1639 case BINDER_TYPE_HANDLE:
1640 case BINDER_TYPE_WEAK_HANDLE:
1641 object_size = sizeof(struct flat_binder_object);
1642 break;
1643 case BINDER_TYPE_FD:
1644 object_size = sizeof(struct binder_fd_object);
1645 break;
1646 case BINDER_TYPE_PTR:
1647 object_size = sizeof(struct binder_buffer_object);
1648 break;
1649 case BINDER_TYPE_FDA:
1650 object_size = sizeof(struct binder_fd_array_object);
1651 break;
1652 default:
1653 return 0;
1654 }
1655 if (offset <= buffer->data_size - object_size &&
1656 buffer->data_size >= object_size)
1657 return object_size;
1658 else
1659 return 0;
1660}
1661
1662/**
1663 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1664 * @proc: binder_proc owning the buffer
1665 * @b: binder_buffer containing the object
1666 * @object: struct binder_object to read into
1667 * @index: index in offset array at which the binder_buffer_object is
1668 * located
1669 * @start_offset: points to the start of the offset array
1670 * @object_offsetp: offset of @object read from @b
1671 * @num_valid: the number of valid offsets in the offset array
1672 *
1673 * Return: If @index is within the valid range of the offset array
1674 * described by @start and @num_valid, and if there's a valid
1675 * binder_buffer_object at the offset found in index @index
1676 * of the offset array, that object is returned. Otherwise,
1677 * %NULL is returned.
1678 * Note that the offset found in index @index itself is not
1679 * verified; this function assumes that @num_valid elements
1680 * from @start were previously verified to have valid offsets.
1681 * If @object_offsetp is non-NULL, then the offset within
1682 * @b is written to it.
1683 */
1684static struct binder_buffer_object *binder_validate_ptr(
1685 struct binder_proc *proc,
1686 struct binder_buffer *b,
1687 struct binder_object *object,
1688 binder_size_t index,
1689 binder_size_t start_offset,
1690 binder_size_t *object_offsetp,
1691 binder_size_t num_valid)
1692{
1693 size_t object_size;
1694 binder_size_t object_offset;
1695 unsigned long buffer_offset;
1696
1697 if (index >= num_valid)
1698 return NULL;
1699
1700 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1701 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1702 b, buffer_offset,
1703 sizeof(object_offset)))
1704 return NULL;
1705 object_size = binder_get_object(proc, b, object_offset, object);
1706 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1707 return NULL;
1708 if (object_offsetp)
1709 *object_offsetp = object_offset;
1710
1711 return &object->bbo;
1712}
1713
1714/**
1715 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1716 * @proc: binder_proc owning the buffer
1717 * @b: transaction buffer
1718 * @objects_start_offset: offset to start of objects buffer
1719 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1720 * @fixup_offset: start offset in @buffer to fix up
1721 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1722 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1723 *
1724 * Return: %true if a fixup in buffer @buffer at offset @offset is
1725 * allowed.
1726 *
1727 * For safety reasons, we only allow fixups inside a buffer to happen
1728 * at increasing offsets; additionally, we only allow fixup on the last
1729 * buffer object that was verified, or one of its parents.
1730 *
1731 * Example of what is allowed:
1732 *
1733 * A
1734 * B (parent = A, offset = 0)
1735 * C (parent = A, offset = 16)
1736 * D (parent = C, offset = 0)
1737 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1738 *
1739 * Examples of what is not allowed:
1740 *
1741 * Decreasing offsets within the same parent:
1742 * A
1743 * C (parent = A, offset = 16)
1744 * B (parent = A, offset = 0) // decreasing offset within A
1745 *
1746 * Referring to a parent that wasn't the last object or any of its parents:
1747 * A
1748 * B (parent = A, offset = 0)
1749 * C (parent = A, offset = 0)
1750 * C (parent = A, offset = 16)
1751 * D (parent = B, offset = 0) // B is not A or any of A's parents
1752 */
1753static bool binder_validate_fixup(struct binder_proc *proc,
1754 struct binder_buffer *b,
1755 binder_size_t objects_start_offset,
1756 binder_size_t buffer_obj_offset,
1757 binder_size_t fixup_offset,
1758 binder_size_t last_obj_offset,
1759 binder_size_t last_min_offset)
1760{
1761 if (!last_obj_offset) {
1762 /* Nothing to fix up in */
1763 return false;
1764 }
1765
1766 while (last_obj_offset != buffer_obj_offset) {
1767 unsigned long buffer_offset;
1768 struct binder_object last_object;
1769 struct binder_buffer_object *last_bbo;
1770 size_t object_size = binder_get_object(proc, b, last_obj_offset,
1771 &last_object);
1772 if (object_size != sizeof(*last_bbo))
1773 return false;
1774
1775 last_bbo = &last_object.bbo;
1776 /*
1777 * Safe to retrieve the parent of last_obj, since it
1778 * was already previously verified by the driver.
1779 */
1780 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1781 return false;
1782 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1783 buffer_offset = objects_start_offset +
1784 sizeof(binder_size_t) * last_bbo->parent;
1785 if (binder_alloc_copy_from_buffer(&proc->alloc,
1786 &last_obj_offset,
1787 b, buffer_offset,
1788 sizeof(last_obj_offset)))
1789 return false;
1790 }
1791 return (fixup_offset >= last_min_offset);
1792}
1793
1794/**
1795 * struct binder_task_work_cb - for deferred close
1796 *
1797 * @twork: callback_head for task work
1798 * @fd: fd to close
1799 *
1800 * Structure to pass task work to be handled after
1801 * returning from binder_ioctl() via task_work_add().
1802 */
1803struct binder_task_work_cb {
1804 struct callback_head twork;
1805 struct file *file;
1806};
1807
1808/**
1809 * binder_do_fd_close() - close list of file descriptors
1810 * @twork: callback head for task work
1811 *
1812 * It is not safe to call ksys_close() during the binder_ioctl()
1813 * function if there is a chance that binder's own file descriptor
1814 * might be closed. This is to meet the requirements for using
1815 * fdget() (see comments for __fget_light()). Therefore use
1816 * task_work_add() to schedule the close operation once we have
1817 * returned from binder_ioctl(). This function is a callback
1818 * for that mechanism and does the actual ksys_close() on the
1819 * given file descriptor.
1820 */
1821static void binder_do_fd_close(struct callback_head *twork)
1822{
1823 struct binder_task_work_cb *twcb = container_of(twork,
1824 struct binder_task_work_cb, twork);
1825
1826 fput(twcb->file);
1827 kfree(twcb);
1828}
1829
1830/**
1831 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1832 * @fd: file-descriptor to close
1833 *
1834 * See comments in binder_do_fd_close(). This function is used to schedule
1835 * a file-descriptor to be closed after returning from binder_ioctl().
1836 */
1837static void binder_deferred_fd_close(int fd)
1838{
1839 struct binder_task_work_cb *twcb;
1840
1841 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1842 if (!twcb)
1843 return;
1844 init_task_work(&twcb->twork, binder_do_fd_close);
1845 close_fd_get_file(fd, &twcb->file);
1846 if (twcb->file) {
1847 filp_close(twcb->file, current->files);
1848 task_work_add(current, &twcb->twork, TWA_RESUME);
1849 } else {
1850 kfree(twcb);
1851 }
1852}
1853
1854static void binder_transaction_buffer_release(struct binder_proc *proc,
1855 struct binder_thread *thread,
1856 struct binder_buffer *buffer,
1857 binder_size_t failed_at,
1858 bool is_failure)
1859{
1860 int debug_id = buffer->debug_id;
1861 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1862
1863 binder_debug(BINDER_DEBUG_TRANSACTION,
1864 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1865 proc->pid, buffer->debug_id,
1866 buffer->data_size, buffer->offsets_size,
1867 (unsigned long long)failed_at);
1868
1869 if (buffer->target_node)
1870 binder_dec_node(buffer->target_node, 1, 0);
1871
1872 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1873 off_end_offset = is_failure && failed_at ? failed_at :
1874 off_start_offset + buffer->offsets_size;
1875 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1876 buffer_offset += sizeof(binder_size_t)) {
1877 struct binder_object_header *hdr;
1878 size_t object_size = 0;
1879 struct binder_object object;
1880 binder_size_t object_offset;
1881
1882 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1883 buffer, buffer_offset,
1884 sizeof(object_offset)))
1885 object_size = binder_get_object(proc, buffer,
1886 object_offset, &object);
1887 if (object_size == 0) {
1888 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1889 debug_id, (u64)object_offset, buffer->data_size);
1890 continue;
1891 }
1892 hdr = &object.hdr;
1893 switch (hdr->type) {
1894 case BINDER_TYPE_BINDER:
1895 case BINDER_TYPE_WEAK_BINDER: {
1896 struct flat_binder_object *fp;
1897 struct binder_node *node;
1898
1899 fp = to_flat_binder_object(hdr);
1900 node = binder_get_node(proc, fp->binder);
1901 if (node == NULL) {
1902 pr_err("transaction release %d bad node %016llx\n",
1903 debug_id, (u64)fp->binder);
1904 break;
1905 }
1906 binder_debug(BINDER_DEBUG_TRANSACTION,
1907 " node %d u%016llx\n",
1908 node->debug_id, (u64)node->ptr);
1909 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1910 0);
1911 binder_put_node(node);
1912 } break;
1913 case BINDER_TYPE_HANDLE:
1914 case BINDER_TYPE_WEAK_HANDLE: {
1915 struct flat_binder_object *fp;
1916 struct binder_ref_data rdata;
1917 int ret;
1918
1919 fp = to_flat_binder_object(hdr);
1920 ret = binder_dec_ref_for_handle(proc, fp->handle,
1921 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1922
1923 if (ret) {
1924 pr_err("transaction release %d bad handle %d, ret = %d\n",
1925 debug_id, fp->handle, ret);
1926 break;
1927 }
1928 binder_debug(BINDER_DEBUG_TRANSACTION,
1929 " ref %d desc %d\n",
1930 rdata.debug_id, rdata.desc);
1931 } break;
1932
1933 case BINDER_TYPE_FD: {
1934 /*
1935 * No need to close the file here since user-space
1936 * closes it for for successfully delivered
1937 * transactions. For transactions that weren't
1938 * delivered, the new fd was never allocated so
1939 * there is no need to close and the fput on the
1940 * file is done when the transaction is torn
1941 * down.
1942 */
1943 } break;
1944 case BINDER_TYPE_PTR:
1945 /*
1946 * Nothing to do here, this will get cleaned up when the
1947 * transaction buffer gets freed
1948 */
1949 break;
1950 case BINDER_TYPE_FDA: {
1951 struct binder_fd_array_object *fda;
1952 struct binder_buffer_object *parent;
1953 struct binder_object ptr_object;
1954 binder_size_t fda_offset;
1955 size_t fd_index;
1956 binder_size_t fd_buf_size;
1957 binder_size_t num_valid;
1958
1959 if (is_failure) {
1960 /*
1961 * The fd fixups have not been applied so no
1962 * fds need to be closed.
1963 */
1964 continue;
1965 }
1966
1967 num_valid = (buffer_offset - off_start_offset) /
1968 sizeof(binder_size_t);
1969 fda = to_binder_fd_array_object(hdr);
1970 parent = binder_validate_ptr(proc, buffer, &ptr_object,
1971 fda->parent,
1972 off_start_offset,
1973 NULL,
1974 num_valid);
1975 if (!parent) {
1976 pr_err("transaction release %d bad parent offset\n",
1977 debug_id);
1978 continue;
1979 }
1980 fd_buf_size = sizeof(u32) * fda->num_fds;
1981 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1982 pr_err("transaction release %d invalid number of fds (%lld)\n",
1983 debug_id, (u64)fda->num_fds);
1984 continue;
1985 }
1986 if (fd_buf_size > parent->length ||
1987 fda->parent_offset > parent->length - fd_buf_size) {
1988 /* No space for all file descriptors here. */
1989 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1990 debug_id, (u64)fda->num_fds);
1991 continue;
1992 }
1993 /*
1994 * the source data for binder_buffer_object is visible
1995 * to user-space and the @buffer element is the user
1996 * pointer to the buffer_object containing the fd_array.
1997 * Convert the address to an offset relative to
1998 * the base of the transaction buffer.
1999 */
2000 fda_offset =
2001 (parent->buffer - (uintptr_t)buffer->user_data) +
2002 fda->parent_offset;
2003 for (fd_index = 0; fd_index < fda->num_fds;
2004 fd_index++) {
2005 u32 fd;
2006 int err;
2007 binder_size_t offset = fda_offset +
2008 fd_index * sizeof(fd);
2009
2010 err = binder_alloc_copy_from_buffer(
2011 &proc->alloc, &fd, buffer,
2012 offset, sizeof(fd));
2013 WARN_ON(err);
2014 if (!err) {
2015 binder_deferred_fd_close(fd);
2016 /*
2017 * Need to make sure the thread goes
2018 * back to userspace to complete the
2019 * deferred close
2020 */
2021 if (thread)
2022 thread->looper_need_return = true;
2023 }
2024 }
2025 } break;
2026 default:
2027 pr_err("transaction release %d bad object type %x\n",
2028 debug_id, hdr->type);
2029 break;
2030 }
2031 }
2032}
2033
2034static int binder_translate_binder(struct flat_binder_object *fp,
2035 struct binder_transaction *t,
2036 struct binder_thread *thread)
2037{
2038 struct binder_node *node;
2039 struct binder_proc *proc = thread->proc;
2040 struct binder_proc *target_proc = t->to_proc;
2041 struct binder_ref_data rdata;
2042 int ret = 0;
2043
2044 node = binder_get_node(proc, fp->binder);
2045 if (!node) {
2046 node = binder_new_node(proc, fp);
2047 if (!node)
2048 return -ENOMEM;
2049 }
2050 if (fp->cookie != node->cookie) {
2051 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2052 proc->pid, thread->pid, (u64)fp->binder,
2053 node->debug_id, (u64)fp->cookie,
2054 (u64)node->cookie);
2055 ret = -EINVAL;
2056 goto done;
2057 }
2058 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2059 ret = -EPERM;
2060 goto done;
2061 }
2062
2063 ret = binder_inc_ref_for_node(target_proc, node,
2064 fp->hdr.type == BINDER_TYPE_BINDER,
2065 &thread->todo, &rdata);
2066 if (ret)
2067 goto done;
2068
2069 if (fp->hdr.type == BINDER_TYPE_BINDER)
2070 fp->hdr.type = BINDER_TYPE_HANDLE;
2071 else
2072 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2073 fp->binder = 0;
2074 fp->handle = rdata.desc;
2075 fp->cookie = 0;
2076
2077 trace_binder_transaction_node_to_ref(t, node, &rdata);
2078 binder_debug(BINDER_DEBUG_TRANSACTION,
2079 " node %d u%016llx -> ref %d desc %d\n",
2080 node->debug_id, (u64)node->ptr,
2081 rdata.debug_id, rdata.desc);
2082done:
2083 binder_put_node(node);
2084 return ret;
2085}
2086
2087static int binder_translate_handle(struct flat_binder_object *fp,
2088 struct binder_transaction *t,
2089 struct binder_thread *thread)
2090{
2091 struct binder_proc *proc = thread->proc;
2092 struct binder_proc *target_proc = t->to_proc;
2093 struct binder_node *node;
2094 struct binder_ref_data src_rdata;
2095 int ret = 0;
2096
2097 node = binder_get_node_from_ref(proc, fp->handle,
2098 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2099 if (!node) {
2100 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2101 proc->pid, thread->pid, fp->handle);
2102 return -EINVAL;
2103 }
2104 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2105 ret = -EPERM;
2106 goto done;
2107 }
2108
2109 binder_node_lock(node);
2110 if (node->proc == target_proc) {
2111 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2112 fp->hdr.type = BINDER_TYPE_BINDER;
2113 else
2114 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2115 fp->binder = node->ptr;
2116 fp->cookie = node->cookie;
2117 if (node->proc)
2118 binder_inner_proc_lock(node->proc);
2119 else
2120 __acquire(&node->proc->inner_lock);
2121 binder_inc_node_nilocked(node,
2122 fp->hdr.type == BINDER_TYPE_BINDER,
2123 0, NULL);
2124 if (node->proc)
2125 binder_inner_proc_unlock(node->proc);
2126 else
2127 __release(&node->proc->inner_lock);
2128 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2129 binder_debug(BINDER_DEBUG_TRANSACTION,
2130 " ref %d desc %d -> node %d u%016llx\n",
2131 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2132 (u64)node->ptr);
2133 binder_node_unlock(node);
2134 } else {
2135 struct binder_ref_data dest_rdata;
2136
2137 binder_node_unlock(node);
2138 ret = binder_inc_ref_for_node(target_proc, node,
2139 fp->hdr.type == BINDER_TYPE_HANDLE,
2140 NULL, &dest_rdata);
2141 if (ret)
2142 goto done;
2143
2144 fp->binder = 0;
2145 fp->handle = dest_rdata.desc;
2146 fp->cookie = 0;
2147 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2148 &dest_rdata);
2149 binder_debug(BINDER_DEBUG_TRANSACTION,
2150 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2151 src_rdata.debug_id, src_rdata.desc,
2152 dest_rdata.debug_id, dest_rdata.desc,
2153 node->debug_id);
2154 }
2155done:
2156 binder_put_node(node);
2157 return ret;
2158}
2159
2160static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2161 struct binder_transaction *t,
2162 struct binder_thread *thread,
2163 struct binder_transaction *in_reply_to)
2164{
2165 struct binder_proc *proc = thread->proc;
2166 struct binder_proc *target_proc = t->to_proc;
2167 struct binder_txn_fd_fixup *fixup;
2168 struct file *file;
2169 int ret = 0;
2170 bool target_allows_fd;
2171
2172 if (in_reply_to)
2173 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2174 else
2175 target_allows_fd = t->buffer->target_node->accept_fds;
2176 if (!target_allows_fd) {
2177 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2178 proc->pid, thread->pid,
2179 in_reply_to ? "reply" : "transaction",
2180 fd);
2181 ret = -EPERM;
2182 goto err_fd_not_accepted;
2183 }
2184
2185 file = fget(fd);
2186 if (!file) {
2187 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2188 proc->pid, thread->pid, fd);
2189 ret = -EBADF;
2190 goto err_fget;
2191 }
2192 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2193 if (ret < 0) {
2194 ret = -EPERM;
2195 goto err_security;
2196 }
2197
2198 /*
2199 * Add fixup record for this transaction. The allocation
2200 * of the fd in the target needs to be done from a
2201 * target thread.
2202 */
2203 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2204 if (!fixup) {
2205 ret = -ENOMEM;
2206 goto err_alloc;
2207 }
2208 fixup->file = file;
2209 fixup->offset = fd_offset;
2210 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2211 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2212
2213 return ret;
2214
2215err_alloc:
2216err_security:
2217 fput(file);
2218err_fget:
2219err_fd_not_accepted:
2220 return ret;
2221}
2222
2223static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2224 struct binder_buffer_object *parent,
2225 struct binder_transaction *t,
2226 struct binder_thread *thread,
2227 struct binder_transaction *in_reply_to)
2228{
2229 binder_size_t fdi, fd_buf_size;
2230 binder_size_t fda_offset;
2231 struct binder_proc *proc = thread->proc;
2232 struct binder_proc *target_proc = t->to_proc;
2233
2234 fd_buf_size = sizeof(u32) * fda->num_fds;
2235 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2236 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2237 proc->pid, thread->pid, (u64)fda->num_fds);
2238 return -EINVAL;
2239 }
2240 if (fd_buf_size > parent->length ||
2241 fda->parent_offset > parent->length - fd_buf_size) {
2242 /* No space for all file descriptors here. */
2243 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2244 proc->pid, thread->pid, (u64)fda->num_fds);
2245 return -EINVAL;
2246 }
2247 /*
2248 * the source data for binder_buffer_object is visible
2249 * to user-space and the @buffer element is the user
2250 * pointer to the buffer_object containing the fd_array.
2251 * Convert the address to an offset relative to
2252 * the base of the transaction buffer.
2253 */
2254 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2255 fda->parent_offset;
2256 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2257 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2258 proc->pid, thread->pid);
2259 return -EINVAL;
2260 }
2261 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2262 u32 fd;
2263 int ret;
2264 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2265
2266 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2267 &fd, t->buffer,
2268 offset, sizeof(fd));
2269 if (!ret)
2270 ret = binder_translate_fd(fd, offset, t, thread,
2271 in_reply_to);
2272 if (ret < 0)
2273 return ret;
2274 }
2275 return 0;
2276}
2277
2278static int binder_fixup_parent(struct binder_transaction *t,
2279 struct binder_thread *thread,
2280 struct binder_buffer_object *bp,
2281 binder_size_t off_start_offset,
2282 binder_size_t num_valid,
2283 binder_size_t last_fixup_obj_off,
2284 binder_size_t last_fixup_min_off)
2285{
2286 struct binder_buffer_object *parent;
2287 struct binder_buffer *b = t->buffer;
2288 struct binder_proc *proc = thread->proc;
2289 struct binder_proc *target_proc = t->to_proc;
2290 struct binder_object object;
2291 binder_size_t buffer_offset;
2292 binder_size_t parent_offset;
2293
2294 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2295 return 0;
2296
2297 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2298 off_start_offset, &parent_offset,
2299 num_valid);
2300 if (!parent) {
2301 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2302 proc->pid, thread->pid);
2303 return -EINVAL;
2304 }
2305
2306 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2307 parent_offset, bp->parent_offset,
2308 last_fixup_obj_off,
2309 last_fixup_min_off)) {
2310 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2311 proc->pid, thread->pid);
2312 return -EINVAL;
2313 }
2314
2315 if (parent->length < sizeof(binder_uintptr_t) ||
2316 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2317 /* No space for a pointer here! */
2318 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2319 proc->pid, thread->pid);
2320 return -EINVAL;
2321 }
2322 buffer_offset = bp->parent_offset +
2323 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2324 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2325 &bp->buffer, sizeof(bp->buffer))) {
2326 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2327 proc->pid, thread->pid);
2328 return -EINVAL;
2329 }
2330
2331 return 0;
2332}
2333
2334/**
2335 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2336 * @t: transaction to send
2337 * @proc: process to send the transaction to
2338 * @thread: thread in @proc to send the transaction to (may be NULL)
2339 *
2340 * This function queues a transaction to the specified process. It will try
2341 * to find a thread in the target process to handle the transaction and
2342 * wake it up. If no thread is found, the work is queued to the proc
2343 * waitqueue.
2344 *
2345 * If the @thread parameter is not NULL, the transaction is always queued
2346 * to the waitlist of that specific thread.
2347 *
2348 * Return: 0 if the transaction was successfully queued
2349 * BR_DEAD_REPLY if the target process or thread is dead
2350 * BR_FROZEN_REPLY if the target process or thread is frozen
2351 */
2352static int binder_proc_transaction(struct binder_transaction *t,
2353 struct binder_proc *proc,
2354 struct binder_thread *thread)
2355{
2356 struct binder_node *node = t->buffer->target_node;
2357 bool oneway = !!(t->flags & TF_ONE_WAY);
2358 bool pending_async = false;
2359
2360 BUG_ON(!node);
2361 binder_node_lock(node);
2362 if (oneway) {
2363 BUG_ON(thread);
2364 if (node->has_async_transaction)
2365 pending_async = true;
2366 else
2367 node->has_async_transaction = true;
2368 }
2369
2370 binder_inner_proc_lock(proc);
2371 if (proc->is_frozen) {
2372 proc->sync_recv |= !oneway;
2373 proc->async_recv |= oneway;
2374 }
2375
2376 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2377 (thread && thread->is_dead)) {
2378 binder_inner_proc_unlock(proc);
2379 binder_node_unlock(node);
2380 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2381 }
2382
2383 if (!thread && !pending_async)
2384 thread = binder_select_thread_ilocked(proc);
2385
2386 if (thread)
2387 binder_enqueue_thread_work_ilocked(thread, &t->work);
2388 else if (!pending_async)
2389 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2390 else
2391 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2392
2393 if (!pending_async)
2394 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2395
2396 proc->outstanding_txns++;
2397 binder_inner_proc_unlock(proc);
2398 binder_node_unlock(node);
2399
2400 return 0;
2401}
2402
2403/**
2404 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2405 * @node: struct binder_node for which to get refs
2406 * @proc: returns @node->proc if valid
2407 * @error: if no @proc then returns BR_DEAD_REPLY
2408 *
2409 * User-space normally keeps the node alive when creating a transaction
2410 * since it has a reference to the target. The local strong ref keeps it
2411 * alive if the sending process dies before the target process processes
2412 * the transaction. If the source process is malicious or has a reference
2413 * counting bug, relying on the local strong ref can fail.
2414 *
2415 * Since user-space can cause the local strong ref to go away, we also take
2416 * a tmpref on the node to ensure it survives while we are constructing
2417 * the transaction. We also need a tmpref on the proc while we are
2418 * constructing the transaction, so we take that here as well.
2419 *
2420 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2421 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2422 * target proc has died, @error is set to BR_DEAD_REPLY
2423 */
2424static struct binder_node *binder_get_node_refs_for_txn(
2425 struct binder_node *node,
2426 struct binder_proc **procp,
2427 uint32_t *error)
2428{
2429 struct binder_node *target_node = NULL;
2430
2431 binder_node_inner_lock(node);
2432 if (node->proc) {
2433 target_node = node;
2434 binder_inc_node_nilocked(node, 1, 0, NULL);
2435 binder_inc_node_tmpref_ilocked(node);
2436 node->proc->tmp_ref++;
2437 *procp = node->proc;
2438 } else
2439 *error = BR_DEAD_REPLY;
2440 binder_node_inner_unlock(node);
2441
2442 return target_node;
2443}
2444
2445static void binder_transaction(struct binder_proc *proc,
2446 struct binder_thread *thread,
2447 struct binder_transaction_data *tr, int reply,
2448 binder_size_t extra_buffers_size)
2449{
2450 int ret;
2451 struct binder_transaction *t;
2452 struct binder_work *w;
2453 struct binder_work *tcomplete;
2454 binder_size_t buffer_offset = 0;
2455 binder_size_t off_start_offset, off_end_offset;
2456 binder_size_t off_min;
2457 binder_size_t sg_buf_offset, sg_buf_end_offset;
2458 struct binder_proc *target_proc = NULL;
2459 struct binder_thread *target_thread = NULL;
2460 struct binder_node *target_node = NULL;
2461 struct binder_transaction *in_reply_to = NULL;
2462 struct binder_transaction_log_entry *e;
2463 uint32_t return_error = 0;
2464 uint32_t return_error_param = 0;
2465 uint32_t return_error_line = 0;
2466 binder_size_t last_fixup_obj_off = 0;
2467 binder_size_t last_fixup_min_off = 0;
2468 struct binder_context *context = proc->context;
2469 int t_debug_id = atomic_inc_return(&binder_last_id);
2470 char *secctx = NULL;
2471 u32 secctx_sz = 0;
2472
2473 e = binder_transaction_log_add(&binder_transaction_log);
2474 e->debug_id = t_debug_id;
2475 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2476 e->from_proc = proc->pid;
2477 e->from_thread = thread->pid;
2478 e->target_handle = tr->target.handle;
2479 e->data_size = tr->data_size;
2480 e->offsets_size = tr->offsets_size;
2481 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2482
2483 if (reply) {
2484 binder_inner_proc_lock(proc);
2485 in_reply_to = thread->transaction_stack;
2486 if (in_reply_to == NULL) {
2487 binder_inner_proc_unlock(proc);
2488 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2489 proc->pid, thread->pid);
2490 return_error = BR_FAILED_REPLY;
2491 return_error_param = -EPROTO;
2492 return_error_line = __LINE__;
2493 goto err_empty_call_stack;
2494 }
2495 if (in_reply_to->to_thread != thread) {
2496 spin_lock(&in_reply_to->lock);
2497 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2498 proc->pid, thread->pid, in_reply_to->debug_id,
2499 in_reply_to->to_proc ?
2500 in_reply_to->to_proc->pid : 0,
2501 in_reply_to->to_thread ?
2502 in_reply_to->to_thread->pid : 0);
2503 spin_unlock(&in_reply_to->lock);
2504 binder_inner_proc_unlock(proc);
2505 return_error = BR_FAILED_REPLY;
2506 return_error_param = -EPROTO;
2507 return_error_line = __LINE__;
2508 in_reply_to = NULL;
2509 goto err_bad_call_stack;
2510 }
2511 thread->transaction_stack = in_reply_to->to_parent;
2512 binder_inner_proc_unlock(proc);
2513 binder_set_nice(in_reply_to->saved_priority);
2514 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2515 if (target_thread == NULL) {
2516 /* annotation for sparse */
2517 __release(&target_thread->proc->inner_lock);
2518 return_error = BR_DEAD_REPLY;
2519 return_error_line = __LINE__;
2520 goto err_dead_binder;
2521 }
2522 if (target_thread->transaction_stack != in_reply_to) {
2523 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2524 proc->pid, thread->pid,
2525 target_thread->transaction_stack ?
2526 target_thread->transaction_stack->debug_id : 0,
2527 in_reply_to->debug_id);
2528 binder_inner_proc_unlock(target_thread->proc);
2529 return_error = BR_FAILED_REPLY;
2530 return_error_param = -EPROTO;
2531 return_error_line = __LINE__;
2532 in_reply_to = NULL;
2533 target_thread = NULL;
2534 goto err_dead_binder;
2535 }
2536 target_proc = target_thread->proc;
2537 target_proc->tmp_ref++;
2538 binder_inner_proc_unlock(target_thread->proc);
2539 } else {
2540 if (tr->target.handle) {
2541 struct binder_ref *ref;
2542
2543 /*
2544 * There must already be a strong ref
2545 * on this node. If so, do a strong
2546 * increment on the node to ensure it
2547 * stays alive until the transaction is
2548 * done.
2549 */
2550 binder_proc_lock(proc);
2551 ref = binder_get_ref_olocked(proc, tr->target.handle,
2552 true);
2553 if (ref) {
2554 target_node = binder_get_node_refs_for_txn(
2555 ref->node, &target_proc,
2556 &return_error);
2557 } else {
2558 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2559 proc->pid, thread->pid, tr->target.handle);
2560 return_error = BR_FAILED_REPLY;
2561 }
2562 binder_proc_unlock(proc);
2563 } else {
2564 mutex_lock(&context->context_mgr_node_lock);
2565 target_node = context->binder_context_mgr_node;
2566 if (target_node)
2567 target_node = binder_get_node_refs_for_txn(
2568 target_node, &target_proc,
2569 &return_error);
2570 else
2571 return_error = BR_DEAD_REPLY;
2572 mutex_unlock(&context->context_mgr_node_lock);
2573 if (target_node && target_proc->pid == proc->pid) {
2574 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2575 proc->pid, thread->pid);
2576 return_error = BR_FAILED_REPLY;
2577 return_error_param = -EINVAL;
2578 return_error_line = __LINE__;
2579 goto err_invalid_target_handle;
2580 }
2581 }
2582 if (!target_node) {
2583 /*
2584 * return_error is set above
2585 */
2586 return_error_param = -EINVAL;
2587 return_error_line = __LINE__;
2588 goto err_dead_binder;
2589 }
2590 e->to_node = target_node->debug_id;
2591 if (WARN_ON(proc == target_proc)) {
2592 return_error = BR_FAILED_REPLY;
2593 return_error_param = -EINVAL;
2594 return_error_line = __LINE__;
2595 goto err_invalid_target_handle;
2596 }
2597 if (security_binder_transaction(proc->cred,
2598 target_proc->cred) < 0) {
2599 return_error = BR_FAILED_REPLY;
2600 return_error_param = -EPERM;
2601 return_error_line = __LINE__;
2602 goto err_invalid_target_handle;
2603 }
2604 binder_inner_proc_lock(proc);
2605
2606 w = list_first_entry_or_null(&thread->todo,
2607 struct binder_work, entry);
2608 if (!(tr->flags & TF_ONE_WAY) && w &&
2609 w->type == BINDER_WORK_TRANSACTION) {
2610 /*
2611 * Do not allow new outgoing transaction from a
2612 * thread that has a transaction at the head of
2613 * its todo list. Only need to check the head
2614 * because binder_select_thread_ilocked picks a
2615 * thread from proc->waiting_threads to enqueue
2616 * the transaction, and nothing is queued to the
2617 * todo list while the thread is on waiting_threads.
2618 */
2619 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2620 proc->pid, thread->pid);
2621 binder_inner_proc_unlock(proc);
2622 return_error = BR_FAILED_REPLY;
2623 return_error_param = -EPROTO;
2624 return_error_line = __LINE__;
2625 goto err_bad_todo_list;
2626 }
2627
2628 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2629 struct binder_transaction *tmp;
2630
2631 tmp = thread->transaction_stack;
2632 if (tmp->to_thread != thread) {
2633 spin_lock(&tmp->lock);
2634 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2635 proc->pid, thread->pid, tmp->debug_id,
2636 tmp->to_proc ? tmp->to_proc->pid : 0,
2637 tmp->to_thread ?
2638 tmp->to_thread->pid : 0);
2639 spin_unlock(&tmp->lock);
2640 binder_inner_proc_unlock(proc);
2641 return_error = BR_FAILED_REPLY;
2642 return_error_param = -EPROTO;
2643 return_error_line = __LINE__;
2644 goto err_bad_call_stack;
2645 }
2646 while (tmp) {
2647 struct binder_thread *from;
2648
2649 spin_lock(&tmp->lock);
2650 from = tmp->from;
2651 if (from && from->proc == target_proc) {
2652 atomic_inc(&from->tmp_ref);
2653 target_thread = from;
2654 spin_unlock(&tmp->lock);
2655 break;
2656 }
2657 spin_unlock(&tmp->lock);
2658 tmp = tmp->from_parent;
2659 }
2660 }
2661 binder_inner_proc_unlock(proc);
2662 }
2663 if (target_thread)
2664 e->to_thread = target_thread->pid;
2665 e->to_proc = target_proc->pid;
2666
2667 /* TODO: reuse incoming transaction for reply */
2668 t = kzalloc(sizeof(*t), GFP_KERNEL);
2669 if (t == NULL) {
2670 return_error = BR_FAILED_REPLY;
2671 return_error_param = -ENOMEM;
2672 return_error_line = __LINE__;
2673 goto err_alloc_t_failed;
2674 }
2675 INIT_LIST_HEAD(&t->fd_fixups);
2676 binder_stats_created(BINDER_STAT_TRANSACTION);
2677 spin_lock_init(&t->lock);
2678
2679 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2680 if (tcomplete == NULL) {
2681 return_error = BR_FAILED_REPLY;
2682 return_error_param = -ENOMEM;
2683 return_error_line = __LINE__;
2684 goto err_alloc_tcomplete_failed;
2685 }
2686 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2687
2688 t->debug_id = t_debug_id;
2689
2690 if (reply)
2691 binder_debug(BINDER_DEBUG_TRANSACTION,
2692 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2693 proc->pid, thread->pid, t->debug_id,
2694 target_proc->pid, target_thread->pid,
2695 (u64)tr->data.ptr.buffer,
2696 (u64)tr->data.ptr.offsets,
2697 (u64)tr->data_size, (u64)tr->offsets_size,
2698 (u64)extra_buffers_size);
2699 else
2700 binder_debug(BINDER_DEBUG_TRANSACTION,
2701 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2702 proc->pid, thread->pid, t->debug_id,
2703 target_proc->pid, target_node->debug_id,
2704 (u64)tr->data.ptr.buffer,
2705 (u64)tr->data.ptr.offsets,
2706 (u64)tr->data_size, (u64)tr->offsets_size,
2707 (u64)extra_buffers_size);
2708
2709 if (!reply && !(tr->flags & TF_ONE_WAY))
2710 t->from = thread;
2711 else
2712 t->from = NULL;
2713 t->sender_euid = task_euid(proc->tsk);
2714 t->to_proc = target_proc;
2715 t->to_thread = target_thread;
2716 t->code = tr->code;
2717 t->flags = tr->flags;
2718 t->priority = task_nice(current);
2719
2720 if (target_node && target_node->txn_security_ctx) {
2721 u32 secid;
2722 size_t added_size;
2723
2724 security_cred_getsecid(proc->cred, &secid);
2725 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2726 if (ret) {
2727 return_error = BR_FAILED_REPLY;
2728 return_error_param = ret;
2729 return_error_line = __LINE__;
2730 goto err_get_secctx_failed;
2731 }
2732 added_size = ALIGN(secctx_sz, sizeof(u64));
2733 extra_buffers_size += added_size;
2734 if (extra_buffers_size < added_size) {
2735 /* integer overflow of extra_buffers_size */
2736 return_error = BR_FAILED_REPLY;
2737 return_error_param = -EINVAL;
2738 return_error_line = __LINE__;
2739 goto err_bad_extra_size;
2740 }
2741 }
2742
2743 trace_binder_transaction(reply, t, target_node);
2744
2745 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2746 tr->offsets_size, extra_buffers_size,
2747 !reply && (t->flags & TF_ONE_WAY), current->tgid);
2748 if (IS_ERR(t->buffer)) {
2749 /*
2750 * -ESRCH indicates VMA cleared. The target is dying.
2751 */
2752 return_error_param = PTR_ERR(t->buffer);
2753 return_error = return_error_param == -ESRCH ?
2754 BR_DEAD_REPLY : BR_FAILED_REPLY;
2755 return_error_line = __LINE__;
2756 t->buffer = NULL;
2757 goto err_binder_alloc_buf_failed;
2758 }
2759 if (secctx) {
2760 int err;
2761 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2762 ALIGN(tr->offsets_size, sizeof(void *)) +
2763 ALIGN(extra_buffers_size, sizeof(void *)) -
2764 ALIGN(secctx_sz, sizeof(u64));
2765
2766 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2767 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2768 t->buffer, buf_offset,
2769 secctx, secctx_sz);
2770 if (err) {
2771 t->security_ctx = 0;
2772 WARN_ON(1);
2773 }
2774 security_release_secctx(secctx, secctx_sz);
2775 secctx = NULL;
2776 }
2777 t->buffer->debug_id = t->debug_id;
2778 t->buffer->transaction = t;
2779 t->buffer->target_node = target_node;
2780 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2781 trace_binder_transaction_alloc_buf(t->buffer);
2782
2783 if (binder_alloc_copy_user_to_buffer(
2784 &target_proc->alloc,
2785 t->buffer, 0,
2786 (const void __user *)
2787 (uintptr_t)tr->data.ptr.buffer,
2788 tr->data_size)) {
2789 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2790 proc->pid, thread->pid);
2791 return_error = BR_FAILED_REPLY;
2792 return_error_param = -EFAULT;
2793 return_error_line = __LINE__;
2794 goto err_copy_data_failed;
2795 }
2796 if (binder_alloc_copy_user_to_buffer(
2797 &target_proc->alloc,
2798 t->buffer,
2799 ALIGN(tr->data_size, sizeof(void *)),
2800 (const void __user *)
2801 (uintptr_t)tr->data.ptr.offsets,
2802 tr->offsets_size)) {
2803 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2804 proc->pid, thread->pid);
2805 return_error = BR_FAILED_REPLY;
2806 return_error_param = -EFAULT;
2807 return_error_line = __LINE__;
2808 goto err_copy_data_failed;
2809 }
2810 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2811 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2812 proc->pid, thread->pid, (u64)tr->offsets_size);
2813 return_error = BR_FAILED_REPLY;
2814 return_error_param = -EINVAL;
2815 return_error_line = __LINE__;
2816 goto err_bad_offset;
2817 }
2818 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2819 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2820 proc->pid, thread->pid,
2821 (u64)extra_buffers_size);
2822 return_error = BR_FAILED_REPLY;
2823 return_error_param = -EINVAL;
2824 return_error_line = __LINE__;
2825 goto err_bad_offset;
2826 }
2827 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2828 buffer_offset = off_start_offset;
2829 off_end_offset = off_start_offset + tr->offsets_size;
2830 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2831 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2832 ALIGN(secctx_sz, sizeof(u64));
2833 off_min = 0;
2834 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2835 buffer_offset += sizeof(binder_size_t)) {
2836 struct binder_object_header *hdr;
2837 size_t object_size;
2838 struct binder_object object;
2839 binder_size_t object_offset;
2840
2841 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2842 &object_offset,
2843 t->buffer,
2844 buffer_offset,
2845 sizeof(object_offset))) {
2846 return_error = BR_FAILED_REPLY;
2847 return_error_param = -EINVAL;
2848 return_error_line = __LINE__;
2849 goto err_bad_offset;
2850 }
2851 object_size = binder_get_object(target_proc, t->buffer,
2852 object_offset, &object);
2853 if (object_size == 0 || object_offset < off_min) {
2854 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2855 proc->pid, thread->pid,
2856 (u64)object_offset,
2857 (u64)off_min,
2858 (u64)t->buffer->data_size);
2859 return_error = BR_FAILED_REPLY;
2860 return_error_param = -EINVAL;
2861 return_error_line = __LINE__;
2862 goto err_bad_offset;
2863 }
2864
2865 hdr = &object.hdr;
2866 off_min = object_offset + object_size;
2867 switch (hdr->type) {
2868 case BINDER_TYPE_BINDER:
2869 case BINDER_TYPE_WEAK_BINDER: {
2870 struct flat_binder_object *fp;
2871
2872 fp = to_flat_binder_object(hdr);
2873 ret = binder_translate_binder(fp, t, thread);
2874
2875 if (ret < 0 ||
2876 binder_alloc_copy_to_buffer(&target_proc->alloc,
2877 t->buffer,
2878 object_offset,
2879 fp, sizeof(*fp))) {
2880 return_error = BR_FAILED_REPLY;
2881 return_error_param = ret;
2882 return_error_line = __LINE__;
2883 goto err_translate_failed;
2884 }
2885 } break;
2886 case BINDER_TYPE_HANDLE:
2887 case BINDER_TYPE_WEAK_HANDLE: {
2888 struct flat_binder_object *fp;
2889
2890 fp = to_flat_binder_object(hdr);
2891 ret = binder_translate_handle(fp, t, thread);
2892 if (ret < 0 ||
2893 binder_alloc_copy_to_buffer(&target_proc->alloc,
2894 t->buffer,
2895 object_offset,
2896 fp, sizeof(*fp))) {
2897 return_error = BR_FAILED_REPLY;
2898 return_error_param = ret;
2899 return_error_line = __LINE__;
2900 goto err_translate_failed;
2901 }
2902 } break;
2903
2904 case BINDER_TYPE_FD: {
2905 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2906 binder_size_t fd_offset = object_offset +
2907 (uintptr_t)&fp->fd - (uintptr_t)fp;
2908 int ret = binder_translate_fd(fp->fd, fd_offset, t,
2909 thread, in_reply_to);
2910
2911 fp->pad_binder = 0;
2912 if (ret < 0 ||
2913 binder_alloc_copy_to_buffer(&target_proc->alloc,
2914 t->buffer,
2915 object_offset,
2916 fp, sizeof(*fp))) {
2917 return_error = BR_FAILED_REPLY;
2918 return_error_param = ret;
2919 return_error_line = __LINE__;
2920 goto err_translate_failed;
2921 }
2922 } break;
2923 case BINDER_TYPE_FDA: {
2924 struct binder_object ptr_object;
2925 binder_size_t parent_offset;
2926 struct binder_fd_array_object *fda =
2927 to_binder_fd_array_object(hdr);
2928 size_t num_valid = (buffer_offset - off_start_offset) /
2929 sizeof(binder_size_t);
2930 struct binder_buffer_object *parent =
2931 binder_validate_ptr(target_proc, t->buffer,
2932 &ptr_object, fda->parent,
2933 off_start_offset,
2934 &parent_offset,
2935 num_valid);
2936 if (!parent) {
2937 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2938 proc->pid, thread->pid);
2939 return_error = BR_FAILED_REPLY;
2940 return_error_param = -EINVAL;
2941 return_error_line = __LINE__;
2942 goto err_bad_parent;
2943 }
2944 if (!binder_validate_fixup(target_proc, t->buffer,
2945 off_start_offset,
2946 parent_offset,
2947 fda->parent_offset,
2948 last_fixup_obj_off,
2949 last_fixup_min_off)) {
2950 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2951 proc->pid, thread->pid);
2952 return_error = BR_FAILED_REPLY;
2953 return_error_param = -EINVAL;
2954 return_error_line = __LINE__;
2955 goto err_bad_parent;
2956 }
2957 ret = binder_translate_fd_array(fda, parent, t, thread,
2958 in_reply_to);
2959 if (ret < 0) {
2960 return_error = BR_FAILED_REPLY;
2961 return_error_param = ret;
2962 return_error_line = __LINE__;
2963 goto err_translate_failed;
2964 }
2965 last_fixup_obj_off = parent_offset;
2966 last_fixup_min_off =
2967 fda->parent_offset + sizeof(u32) * fda->num_fds;
2968 } break;
2969 case BINDER_TYPE_PTR: {
2970 struct binder_buffer_object *bp =
2971 to_binder_buffer_object(hdr);
2972 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
2973 size_t num_valid;
2974
2975 if (bp->length > buf_left) {
2976 binder_user_error("%d:%d got transaction with too large buffer\n",
2977 proc->pid, thread->pid);
2978 return_error = BR_FAILED_REPLY;
2979 return_error_param = -EINVAL;
2980 return_error_line = __LINE__;
2981 goto err_bad_offset;
2982 }
2983 if (binder_alloc_copy_user_to_buffer(
2984 &target_proc->alloc,
2985 t->buffer,
2986 sg_buf_offset,
2987 (const void __user *)
2988 (uintptr_t)bp->buffer,
2989 bp->length)) {
2990 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2991 proc->pid, thread->pid);
2992 return_error_param = -EFAULT;
2993 return_error = BR_FAILED_REPLY;
2994 return_error_line = __LINE__;
2995 goto err_copy_data_failed;
2996 }
2997 /* Fixup buffer pointer to target proc address space */
2998 bp->buffer = (uintptr_t)
2999 t->buffer->user_data + sg_buf_offset;
3000 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3001
3002 num_valid = (buffer_offset - off_start_offset) /
3003 sizeof(binder_size_t);
3004 ret = binder_fixup_parent(t, thread, bp,
3005 off_start_offset,
3006 num_valid,
3007 last_fixup_obj_off,
3008 last_fixup_min_off);
3009 if (ret < 0 ||
3010 binder_alloc_copy_to_buffer(&target_proc->alloc,
3011 t->buffer,
3012 object_offset,
3013 bp, sizeof(*bp))) {
3014 return_error = BR_FAILED_REPLY;
3015 return_error_param = ret;
3016 return_error_line = __LINE__;
3017 goto err_translate_failed;
3018 }
3019 last_fixup_obj_off = object_offset;
3020 last_fixup_min_off = 0;
3021 } break;
3022 default:
3023 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3024 proc->pid, thread->pid, hdr->type);
3025 return_error = BR_FAILED_REPLY;
3026 return_error_param = -EINVAL;
3027 return_error_line = __LINE__;
3028 goto err_bad_object_type;
3029 }
3030 }
3031 if (t->buffer->oneway_spam_suspect)
3032 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3033 else
3034 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3035 t->work.type = BINDER_WORK_TRANSACTION;
3036
3037 if (reply) {
3038 binder_enqueue_thread_work(thread, tcomplete);
3039 binder_inner_proc_lock(target_proc);
3040 if (target_thread->is_dead) {
3041 return_error = BR_DEAD_REPLY;
3042 binder_inner_proc_unlock(target_proc);
3043 goto err_dead_proc_or_thread;
3044 }
3045 BUG_ON(t->buffer->async_transaction != 0);
3046 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3047 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3048 target_proc->outstanding_txns++;
3049 binder_inner_proc_unlock(target_proc);
3050 wake_up_interruptible_sync(&target_thread->wait);
3051 binder_free_transaction(in_reply_to);
3052 } else if (!(t->flags & TF_ONE_WAY)) {
3053 BUG_ON(t->buffer->async_transaction != 0);
3054 binder_inner_proc_lock(proc);
3055 /*
3056 * Defer the TRANSACTION_COMPLETE, so we don't return to
3057 * userspace immediately; this allows the target process to
3058 * immediately start processing this transaction, reducing
3059 * latency. We will then return the TRANSACTION_COMPLETE when
3060 * the target replies (or there is an error).
3061 */
3062 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3063 t->need_reply = 1;
3064 t->from_parent = thread->transaction_stack;
3065 thread->transaction_stack = t;
3066 binder_inner_proc_unlock(proc);
3067 return_error = binder_proc_transaction(t,
3068 target_proc, target_thread);
3069 if (return_error) {
3070 binder_inner_proc_lock(proc);
3071 binder_pop_transaction_ilocked(thread, t);
3072 binder_inner_proc_unlock(proc);
3073 goto err_dead_proc_or_thread;
3074 }
3075 } else {
3076 BUG_ON(target_node == NULL);
3077 BUG_ON(t->buffer->async_transaction != 1);
3078 binder_enqueue_thread_work(thread, tcomplete);
3079 return_error = binder_proc_transaction(t, target_proc, NULL);
3080 if (return_error)
3081 goto err_dead_proc_or_thread;
3082 }
3083 if (target_thread)
3084 binder_thread_dec_tmpref(target_thread);
3085 binder_proc_dec_tmpref(target_proc);
3086 if (target_node)
3087 binder_dec_node_tmpref(target_node);
3088 /*
3089 * write barrier to synchronize with initialization
3090 * of log entry
3091 */
3092 smp_wmb();
3093 WRITE_ONCE(e->debug_id_done, t_debug_id);
3094 return;
3095
3096err_dead_proc_or_thread:
3097 return_error_line = __LINE__;
3098 binder_dequeue_work(proc, tcomplete);
3099err_translate_failed:
3100err_bad_object_type:
3101err_bad_offset:
3102err_bad_parent:
3103err_copy_data_failed:
3104 binder_free_txn_fixups(t);
3105 trace_binder_transaction_failed_buffer_release(t->buffer);
3106 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3107 buffer_offset, true);
3108 if (target_node)
3109 binder_dec_node_tmpref(target_node);
3110 target_node = NULL;
3111 t->buffer->transaction = NULL;
3112 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3113err_binder_alloc_buf_failed:
3114err_bad_extra_size:
3115 if (secctx)
3116 security_release_secctx(secctx, secctx_sz);
3117err_get_secctx_failed:
3118 kfree(tcomplete);
3119 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3120err_alloc_tcomplete_failed:
3121 if (trace_binder_txn_latency_free_enabled())
3122 binder_txn_latency_free(t);
3123 kfree(t);
3124 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3125err_alloc_t_failed:
3126err_bad_todo_list:
3127err_bad_call_stack:
3128err_empty_call_stack:
3129err_dead_binder:
3130err_invalid_target_handle:
3131 if (target_thread)
3132 binder_thread_dec_tmpref(target_thread);
3133 if (target_proc)
3134 binder_proc_dec_tmpref(target_proc);
3135 if (target_node) {
3136 binder_dec_node(target_node, 1, 0);
3137 binder_dec_node_tmpref(target_node);
3138 }
3139
3140 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3141 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3142 proc->pid, thread->pid, return_error, return_error_param,
3143 (u64)tr->data_size, (u64)tr->offsets_size,
3144 return_error_line);
3145
3146 {
3147 struct binder_transaction_log_entry *fe;
3148
3149 e->return_error = return_error;
3150 e->return_error_param = return_error_param;
3151 e->return_error_line = return_error_line;
3152 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3153 *fe = *e;
3154 /*
3155 * write barrier to synchronize with initialization
3156 * of log entry
3157 */
3158 smp_wmb();
3159 WRITE_ONCE(e->debug_id_done, t_debug_id);
3160 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3161 }
3162
3163 BUG_ON(thread->return_error.cmd != BR_OK);
3164 if (in_reply_to) {
3165 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3166 binder_enqueue_thread_work(thread, &thread->return_error.work);
3167 binder_send_failed_reply(in_reply_to, return_error);
3168 } else {
3169 thread->return_error.cmd = return_error;
3170 binder_enqueue_thread_work(thread, &thread->return_error.work);
3171 }
3172}
3173
3174/**
3175 * binder_free_buf() - free the specified buffer
3176 * @proc: binder proc that owns buffer
3177 * @buffer: buffer to be freed
3178 * @is_failure: failed to send transaction
3179 *
3180 * If buffer for an async transaction, enqueue the next async
3181 * transaction from the node.
3182 *
3183 * Cleanup buffer and free it.
3184 */
3185static void
3186binder_free_buf(struct binder_proc *proc,
3187 struct binder_thread *thread,
3188 struct binder_buffer *buffer, bool is_failure)
3189{
3190 binder_inner_proc_lock(proc);
3191 if (buffer->transaction) {
3192 buffer->transaction->buffer = NULL;
3193 buffer->transaction = NULL;
3194 }
3195 binder_inner_proc_unlock(proc);
3196 if (buffer->async_transaction && buffer->target_node) {
3197 struct binder_node *buf_node;
3198 struct binder_work *w;
3199
3200 buf_node = buffer->target_node;
3201 binder_node_inner_lock(buf_node);
3202 BUG_ON(!buf_node->has_async_transaction);
3203 BUG_ON(buf_node->proc != proc);
3204 w = binder_dequeue_work_head_ilocked(
3205 &buf_node->async_todo);
3206 if (!w) {
3207 buf_node->has_async_transaction = false;
3208 } else {
3209 binder_enqueue_work_ilocked(
3210 w, &proc->todo);
3211 binder_wakeup_proc_ilocked(proc);
3212 }
3213 binder_node_inner_unlock(buf_node);
3214 }
3215 trace_binder_transaction_buffer_release(buffer);
3216 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3217 binder_alloc_free_buf(&proc->alloc, buffer);
3218}
3219
3220static int binder_thread_write(struct binder_proc *proc,
3221 struct binder_thread *thread,
3222 binder_uintptr_t binder_buffer, size_t size,
3223 binder_size_t *consumed)
3224{
3225 uint32_t cmd;
3226 struct binder_context *context = proc->context;
3227 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3228 void __user *ptr = buffer + *consumed;
3229 void __user *end = buffer + size;
3230
3231 while (ptr < end && thread->return_error.cmd == BR_OK) {
3232 int ret;
3233
3234 if (get_user(cmd, (uint32_t __user *)ptr))
3235 return -EFAULT;
3236 ptr += sizeof(uint32_t);
3237 trace_binder_command(cmd);
3238 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3239 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3240 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3241 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3242 }
3243 switch (cmd) {
3244 case BC_INCREFS:
3245 case BC_ACQUIRE:
3246 case BC_RELEASE:
3247 case BC_DECREFS: {
3248 uint32_t target;
3249 const char *debug_string;
3250 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3251 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3252 struct binder_ref_data rdata;
3253
3254 if (get_user(target, (uint32_t __user *)ptr))
3255 return -EFAULT;
3256
3257 ptr += sizeof(uint32_t);
3258 ret = -1;
3259 if (increment && !target) {
3260 struct binder_node *ctx_mgr_node;
3261
3262 mutex_lock(&context->context_mgr_node_lock);
3263 ctx_mgr_node = context->binder_context_mgr_node;
3264 if (ctx_mgr_node) {
3265 if (ctx_mgr_node->proc == proc) {
3266 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3267 proc->pid, thread->pid);
3268 mutex_unlock(&context->context_mgr_node_lock);
3269 return -EINVAL;
3270 }
3271 ret = binder_inc_ref_for_node(
3272 proc, ctx_mgr_node,
3273 strong, NULL, &rdata);
3274 }
3275 mutex_unlock(&context->context_mgr_node_lock);
3276 }
3277 if (ret)
3278 ret = binder_update_ref_for_handle(
3279 proc, target, increment, strong,
3280 &rdata);
3281 if (!ret && rdata.desc != target) {
3282 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3283 proc->pid, thread->pid,
3284 target, rdata.desc);
3285 }
3286 switch (cmd) {
3287 case BC_INCREFS:
3288 debug_string = "IncRefs";
3289 break;
3290 case BC_ACQUIRE:
3291 debug_string = "Acquire";
3292 break;
3293 case BC_RELEASE:
3294 debug_string = "Release";
3295 break;
3296 case BC_DECREFS:
3297 default:
3298 debug_string = "DecRefs";
3299 break;
3300 }
3301 if (ret) {
3302 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3303 proc->pid, thread->pid, debug_string,
3304 strong, target, ret);
3305 break;
3306 }
3307 binder_debug(BINDER_DEBUG_USER_REFS,
3308 "%d:%d %s ref %d desc %d s %d w %d\n",
3309 proc->pid, thread->pid, debug_string,
3310 rdata.debug_id, rdata.desc, rdata.strong,
3311 rdata.weak);
3312 break;
3313 }
3314 case BC_INCREFS_DONE:
3315 case BC_ACQUIRE_DONE: {
3316 binder_uintptr_t node_ptr;
3317 binder_uintptr_t cookie;
3318 struct binder_node *node;
3319 bool free_node;
3320
3321 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3322 return -EFAULT;
3323 ptr += sizeof(binder_uintptr_t);
3324 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3325 return -EFAULT;
3326 ptr += sizeof(binder_uintptr_t);
3327 node = binder_get_node(proc, node_ptr);
3328 if (node == NULL) {
3329 binder_user_error("%d:%d %s u%016llx no match\n",
3330 proc->pid, thread->pid,
3331 cmd == BC_INCREFS_DONE ?
3332 "BC_INCREFS_DONE" :
3333 "BC_ACQUIRE_DONE",
3334 (u64)node_ptr);
3335 break;
3336 }
3337 if (cookie != node->cookie) {
3338 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3339 proc->pid, thread->pid,
3340 cmd == BC_INCREFS_DONE ?
3341 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3342 (u64)node_ptr, node->debug_id,
3343 (u64)cookie, (u64)node->cookie);
3344 binder_put_node(node);
3345 break;
3346 }
3347 binder_node_inner_lock(node);
3348 if (cmd == BC_ACQUIRE_DONE) {
3349 if (node->pending_strong_ref == 0) {
3350 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3351 proc->pid, thread->pid,
3352 node->debug_id);
3353 binder_node_inner_unlock(node);
3354 binder_put_node(node);
3355 break;
3356 }
3357 node->pending_strong_ref = 0;
3358 } else {
3359 if (node->pending_weak_ref == 0) {
3360 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3361 proc->pid, thread->pid,
3362 node->debug_id);
3363 binder_node_inner_unlock(node);
3364 binder_put_node(node);
3365 break;
3366 }
3367 node->pending_weak_ref = 0;
3368 }
3369 free_node = binder_dec_node_nilocked(node,
3370 cmd == BC_ACQUIRE_DONE, 0);
3371 WARN_ON(free_node);
3372 binder_debug(BINDER_DEBUG_USER_REFS,
3373 "%d:%d %s node %d ls %d lw %d tr %d\n",
3374 proc->pid, thread->pid,
3375 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3376 node->debug_id, node->local_strong_refs,
3377 node->local_weak_refs, node->tmp_refs);
3378 binder_node_inner_unlock(node);
3379 binder_put_node(node);
3380 break;
3381 }
3382 case BC_ATTEMPT_ACQUIRE:
3383 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3384 return -EINVAL;
3385 case BC_ACQUIRE_RESULT:
3386 pr_err("BC_ACQUIRE_RESULT not supported\n");
3387 return -EINVAL;
3388
3389 case BC_FREE_BUFFER: {
3390 binder_uintptr_t data_ptr;
3391 struct binder_buffer *buffer;
3392
3393 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3394 return -EFAULT;
3395 ptr += sizeof(binder_uintptr_t);
3396
3397 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3398 data_ptr);
3399 if (IS_ERR_OR_NULL(buffer)) {
3400 if (PTR_ERR(buffer) == -EPERM) {
3401 binder_user_error(
3402 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3403 proc->pid, thread->pid,
3404 (u64)data_ptr);
3405 } else {
3406 binder_user_error(
3407 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3408 proc->pid, thread->pid,
3409 (u64)data_ptr);
3410 }
3411 break;
3412 }
3413 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3414 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3415 proc->pid, thread->pid, (u64)data_ptr,
3416 buffer->debug_id,
3417 buffer->transaction ? "active" : "finished");
3418 binder_free_buf(proc, thread, buffer, false);
3419 break;
3420 }
3421
3422 case BC_TRANSACTION_SG:
3423 case BC_REPLY_SG: {
3424 struct binder_transaction_data_sg tr;
3425
3426 if (copy_from_user(&tr, ptr, sizeof(tr)))
3427 return -EFAULT;
3428 ptr += sizeof(tr);
3429 binder_transaction(proc, thread, &tr.transaction_data,
3430 cmd == BC_REPLY_SG, tr.buffers_size);
3431 break;
3432 }
3433 case BC_TRANSACTION:
3434 case BC_REPLY: {
3435 struct binder_transaction_data tr;
3436
3437 if (copy_from_user(&tr, ptr, sizeof(tr)))
3438 return -EFAULT;
3439 ptr += sizeof(tr);
3440 binder_transaction(proc, thread, &tr,
3441 cmd == BC_REPLY, 0);
3442 break;
3443 }
3444
3445 case BC_REGISTER_LOOPER:
3446 binder_debug(BINDER_DEBUG_THREADS,
3447 "%d:%d BC_REGISTER_LOOPER\n",
3448 proc->pid, thread->pid);
3449 binder_inner_proc_lock(proc);
3450 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3451 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3452 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3453 proc->pid, thread->pid);
3454 } else if (proc->requested_threads == 0) {
3455 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3456 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3457 proc->pid, thread->pid);
3458 } else {
3459 proc->requested_threads--;
3460 proc->requested_threads_started++;
3461 }
3462 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3463 binder_inner_proc_unlock(proc);
3464 break;
3465 case BC_ENTER_LOOPER:
3466 binder_debug(BINDER_DEBUG_THREADS,
3467 "%d:%d BC_ENTER_LOOPER\n",
3468 proc->pid, thread->pid);
3469 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3470 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3471 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3472 proc->pid, thread->pid);
3473 }
3474 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3475 break;
3476 case BC_EXIT_LOOPER:
3477 binder_debug(BINDER_DEBUG_THREADS,
3478 "%d:%d BC_EXIT_LOOPER\n",
3479 proc->pid, thread->pid);
3480 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3481 break;
3482
3483 case BC_REQUEST_DEATH_NOTIFICATION:
3484 case BC_CLEAR_DEATH_NOTIFICATION: {
3485 uint32_t target;
3486 binder_uintptr_t cookie;
3487 struct binder_ref *ref;
3488 struct binder_ref_death *death = NULL;
3489
3490 if (get_user(target, (uint32_t __user *)ptr))
3491 return -EFAULT;
3492 ptr += sizeof(uint32_t);
3493 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3494 return -EFAULT;
3495 ptr += sizeof(binder_uintptr_t);
3496 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3497 /*
3498 * Allocate memory for death notification
3499 * before taking lock
3500 */
3501 death = kzalloc(sizeof(*death), GFP_KERNEL);
3502 if (death == NULL) {
3503 WARN_ON(thread->return_error.cmd !=
3504 BR_OK);
3505 thread->return_error.cmd = BR_ERROR;
3506 binder_enqueue_thread_work(
3507 thread,
3508 &thread->return_error.work);
3509 binder_debug(
3510 BINDER_DEBUG_FAILED_TRANSACTION,
3511 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3512 proc->pid, thread->pid);
3513 break;
3514 }
3515 }
3516 binder_proc_lock(proc);
3517 ref = binder_get_ref_olocked(proc, target, false);
3518 if (ref == NULL) {
3519 binder_user_error("%d:%d %s invalid ref %d\n",
3520 proc->pid, thread->pid,
3521 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3522 "BC_REQUEST_DEATH_NOTIFICATION" :
3523 "BC_CLEAR_DEATH_NOTIFICATION",
3524 target);
3525 binder_proc_unlock(proc);
3526 kfree(death);
3527 break;
3528 }
3529
3530 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3531 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3532 proc->pid, thread->pid,
3533 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3534 "BC_REQUEST_DEATH_NOTIFICATION" :
3535 "BC_CLEAR_DEATH_NOTIFICATION",
3536 (u64)cookie, ref->data.debug_id,
3537 ref->data.desc, ref->data.strong,
3538 ref->data.weak, ref->node->debug_id);
3539
3540 binder_node_lock(ref->node);
3541 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3542 if (ref->death) {
3543 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3544 proc->pid, thread->pid);
3545 binder_node_unlock(ref->node);
3546 binder_proc_unlock(proc);
3547 kfree(death);
3548 break;
3549 }
3550 binder_stats_created(BINDER_STAT_DEATH);
3551 INIT_LIST_HEAD(&death->work.entry);
3552 death->cookie = cookie;
3553 ref->death = death;
3554 if (ref->node->proc == NULL) {
3555 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3556
3557 binder_inner_proc_lock(proc);
3558 binder_enqueue_work_ilocked(
3559 &ref->death->work, &proc->todo);
3560 binder_wakeup_proc_ilocked(proc);
3561 binder_inner_proc_unlock(proc);
3562 }
3563 } else {
3564 if (ref->death == NULL) {
3565 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3566 proc->pid, thread->pid);
3567 binder_node_unlock(ref->node);
3568 binder_proc_unlock(proc);
3569 break;
3570 }
3571 death = ref->death;
3572 if (death->cookie != cookie) {
3573 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3574 proc->pid, thread->pid,
3575 (u64)death->cookie,
3576 (u64)cookie);
3577 binder_node_unlock(ref->node);
3578 binder_proc_unlock(proc);
3579 break;
3580 }
3581 ref->death = NULL;
3582 binder_inner_proc_lock(proc);
3583 if (list_empty(&death->work.entry)) {
3584 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3585 if (thread->looper &
3586 (BINDER_LOOPER_STATE_REGISTERED |
3587 BINDER_LOOPER_STATE_ENTERED))
3588 binder_enqueue_thread_work_ilocked(
3589 thread,
3590 &death->work);
3591 else {
3592 binder_enqueue_work_ilocked(
3593 &death->work,
3594 &proc->todo);
3595 binder_wakeup_proc_ilocked(
3596 proc);
3597 }
3598 } else {
3599 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3600 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3601 }
3602 binder_inner_proc_unlock(proc);
3603 }
3604 binder_node_unlock(ref->node);
3605 binder_proc_unlock(proc);
3606 } break;
3607 case BC_DEAD_BINDER_DONE: {
3608 struct binder_work *w;
3609 binder_uintptr_t cookie;
3610 struct binder_ref_death *death = NULL;
3611
3612 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3613 return -EFAULT;
3614
3615 ptr += sizeof(cookie);
3616 binder_inner_proc_lock(proc);
3617 list_for_each_entry(w, &proc->delivered_death,
3618 entry) {
3619 struct binder_ref_death *tmp_death =
3620 container_of(w,
3621 struct binder_ref_death,
3622 work);
3623
3624 if (tmp_death->cookie == cookie) {
3625 death = tmp_death;
3626 break;
3627 }
3628 }
3629 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3630 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3631 proc->pid, thread->pid, (u64)cookie,
3632 death);
3633 if (death == NULL) {
3634 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3635 proc->pid, thread->pid, (u64)cookie);
3636 binder_inner_proc_unlock(proc);
3637 break;
3638 }
3639 binder_dequeue_work_ilocked(&death->work);
3640 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3641 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3642 if (thread->looper &
3643 (BINDER_LOOPER_STATE_REGISTERED |
3644 BINDER_LOOPER_STATE_ENTERED))
3645 binder_enqueue_thread_work_ilocked(
3646 thread, &death->work);
3647 else {
3648 binder_enqueue_work_ilocked(
3649 &death->work,
3650 &proc->todo);
3651 binder_wakeup_proc_ilocked(proc);
3652 }
3653 }
3654 binder_inner_proc_unlock(proc);
3655 } break;
3656
3657 default:
3658 pr_err("%d:%d unknown command %d\n",
3659 proc->pid, thread->pid, cmd);
3660 return -EINVAL;
3661 }
3662 *consumed = ptr - buffer;
3663 }
3664 return 0;
3665}
3666
3667static void binder_stat_br(struct binder_proc *proc,
3668 struct binder_thread *thread, uint32_t cmd)
3669{
3670 trace_binder_return(cmd);
3671 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3672 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3673 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3674 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3675 }
3676}
3677
3678static int binder_put_node_cmd(struct binder_proc *proc,
3679 struct binder_thread *thread,
3680 void __user **ptrp,
3681 binder_uintptr_t node_ptr,
3682 binder_uintptr_t node_cookie,
3683 int node_debug_id,
3684 uint32_t cmd, const char *cmd_name)
3685{
3686 void __user *ptr = *ptrp;
3687
3688 if (put_user(cmd, (uint32_t __user *)ptr))
3689 return -EFAULT;
3690 ptr += sizeof(uint32_t);
3691
3692 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3693 return -EFAULT;
3694 ptr += sizeof(binder_uintptr_t);
3695
3696 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3697 return -EFAULT;
3698 ptr += sizeof(binder_uintptr_t);
3699
3700 binder_stat_br(proc, thread, cmd);
3701 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3702 proc->pid, thread->pid, cmd_name, node_debug_id,
3703 (u64)node_ptr, (u64)node_cookie);
3704
3705 *ptrp = ptr;
3706 return 0;
3707}
3708
3709static int binder_wait_for_work(struct binder_thread *thread,
3710 bool do_proc_work)
3711{
3712 DEFINE_WAIT(wait);
3713 struct binder_proc *proc = thread->proc;
3714 int ret = 0;
3715
3716 freezer_do_not_count();
3717 binder_inner_proc_lock(proc);
3718 for (;;) {
3719 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3720 if (binder_has_work_ilocked(thread, do_proc_work))
3721 break;
3722 if (do_proc_work)
3723 list_add(&thread->waiting_thread_node,
3724 &proc->waiting_threads);
3725 binder_inner_proc_unlock(proc);
3726 schedule();
3727 binder_inner_proc_lock(proc);
3728 list_del_init(&thread->waiting_thread_node);
3729 if (signal_pending(current)) {
3730 ret = -EINTR;
3731 break;
3732 }
3733 }
3734 finish_wait(&thread->wait, &wait);
3735 binder_inner_proc_unlock(proc);
3736 freezer_count();
3737
3738 return ret;
3739}
3740
3741/**
3742 * binder_apply_fd_fixups() - finish fd translation
3743 * @proc: binder_proc associated @t->buffer
3744 * @t: binder transaction with list of fd fixups
3745 *
3746 * Now that we are in the context of the transaction target
3747 * process, we can allocate and install fds. Process the
3748 * list of fds to translate and fixup the buffer with the
3749 * new fds.
3750 *
3751 * If we fail to allocate an fd, then free the resources by
3752 * fput'ing files that have not been processed and ksys_close'ing
3753 * any fds that have already been allocated.
3754 */
3755static int binder_apply_fd_fixups(struct binder_proc *proc,
3756 struct binder_transaction *t)
3757{
3758 struct binder_txn_fd_fixup *fixup, *tmp;
3759 int ret = 0;
3760
3761 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3762 int fd = get_unused_fd_flags(O_CLOEXEC);
3763
3764 if (fd < 0) {
3765 binder_debug(BINDER_DEBUG_TRANSACTION,
3766 "failed fd fixup txn %d fd %d\n",
3767 t->debug_id, fd);
3768 ret = -ENOMEM;
3769 break;
3770 }
3771 binder_debug(BINDER_DEBUG_TRANSACTION,
3772 "fd fixup txn %d fd %d\n",
3773 t->debug_id, fd);
3774 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3775 fd_install(fd, fixup->file);
3776 fixup->file = NULL;
3777 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3778 fixup->offset, &fd,
3779 sizeof(u32))) {
3780 ret = -EINVAL;
3781 break;
3782 }
3783 }
3784 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3785 if (fixup->file) {
3786 fput(fixup->file);
3787 } else if (ret) {
3788 u32 fd;
3789 int err;
3790
3791 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3792 t->buffer,
3793 fixup->offset,
3794 sizeof(fd));
3795 WARN_ON(err);
3796 if (!err)
3797 binder_deferred_fd_close(fd);
3798 }
3799 list_del(&fixup->fixup_entry);
3800 kfree(fixup);
3801 }
3802
3803 return ret;
3804}
3805
3806static int binder_thread_read(struct binder_proc *proc,
3807 struct binder_thread *thread,
3808 binder_uintptr_t binder_buffer, size_t size,
3809 binder_size_t *consumed, int non_block)
3810{
3811 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3812 void __user *ptr = buffer + *consumed;
3813 void __user *end = buffer + size;
3814
3815 int ret = 0;
3816 int wait_for_proc_work;
3817
3818 if (*consumed == 0) {
3819 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3820 return -EFAULT;
3821 ptr += sizeof(uint32_t);
3822 }
3823
3824retry:
3825 binder_inner_proc_lock(proc);
3826 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3827 binder_inner_proc_unlock(proc);
3828
3829 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3830
3831 trace_binder_wait_for_work(wait_for_proc_work,
3832 !!thread->transaction_stack,
3833 !binder_worklist_empty(proc, &thread->todo));
3834 if (wait_for_proc_work) {
3835 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3836 BINDER_LOOPER_STATE_ENTERED))) {
3837 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3838 proc->pid, thread->pid, thread->looper);
3839 wait_event_interruptible(binder_user_error_wait,
3840 binder_stop_on_user_error < 2);
3841 }
3842 binder_set_nice(proc->default_priority);
3843 }
3844
3845 if (non_block) {
3846 if (!binder_has_work(thread, wait_for_proc_work))
3847 ret = -EAGAIN;
3848 } else {
3849 ret = binder_wait_for_work(thread, wait_for_proc_work);
3850 }
3851
3852 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3853
3854 if (ret)
3855 return ret;
3856
3857 while (1) {
3858 uint32_t cmd;
3859 struct binder_transaction_data_secctx tr;
3860 struct binder_transaction_data *trd = &tr.transaction_data;
3861 struct binder_work *w = NULL;
3862 struct list_head *list = NULL;
3863 struct binder_transaction *t = NULL;
3864 struct binder_thread *t_from;
3865 size_t trsize = sizeof(*trd);
3866
3867 binder_inner_proc_lock(proc);
3868 if (!binder_worklist_empty_ilocked(&thread->todo))
3869 list = &thread->todo;
3870 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3871 wait_for_proc_work)
3872 list = &proc->todo;
3873 else {
3874 binder_inner_proc_unlock(proc);
3875
3876 /* no data added */
3877 if (ptr - buffer == 4 && !thread->looper_need_return)
3878 goto retry;
3879 break;
3880 }
3881
3882 if (end - ptr < sizeof(tr) + 4) {
3883 binder_inner_proc_unlock(proc);
3884 break;
3885 }
3886 w = binder_dequeue_work_head_ilocked(list);
3887 if (binder_worklist_empty_ilocked(&thread->todo))
3888 thread->process_todo = false;
3889
3890 switch (w->type) {
3891 case BINDER_WORK_TRANSACTION: {
3892 binder_inner_proc_unlock(proc);
3893 t = container_of(w, struct binder_transaction, work);
3894 } break;
3895 case BINDER_WORK_RETURN_ERROR: {
3896 struct binder_error *e = container_of(
3897 w, struct binder_error, work);
3898
3899 WARN_ON(e->cmd == BR_OK);
3900 binder_inner_proc_unlock(proc);
3901 if (put_user(e->cmd, (uint32_t __user *)ptr))
3902 return -EFAULT;
3903 cmd = e->cmd;
3904 e->cmd = BR_OK;
3905 ptr += sizeof(uint32_t);
3906
3907 binder_stat_br(proc, thread, cmd);
3908 } break;
3909 case BINDER_WORK_TRANSACTION_COMPLETE:
3910 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3911 if (proc->oneway_spam_detection_enabled &&
3912 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3913 cmd = BR_ONEWAY_SPAM_SUSPECT;
3914 else
3915 cmd = BR_TRANSACTION_COMPLETE;
3916 binder_inner_proc_unlock(proc);
3917 kfree(w);
3918 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3919 if (put_user(cmd, (uint32_t __user *)ptr))
3920 return -EFAULT;
3921 ptr += sizeof(uint32_t);
3922
3923 binder_stat_br(proc, thread, cmd);
3924 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3925 "%d:%d BR_TRANSACTION_COMPLETE\n",
3926 proc->pid, thread->pid);
3927 } break;
3928 case BINDER_WORK_NODE: {
3929 struct binder_node *node = container_of(w, struct binder_node, work);
3930 int strong, weak;
3931 binder_uintptr_t node_ptr = node->ptr;
3932 binder_uintptr_t node_cookie = node->cookie;
3933 int node_debug_id = node->debug_id;
3934 int has_weak_ref;
3935 int has_strong_ref;
3936 void __user *orig_ptr = ptr;
3937
3938 BUG_ON(proc != node->proc);
3939 strong = node->internal_strong_refs ||
3940 node->local_strong_refs;
3941 weak = !hlist_empty(&node->refs) ||
3942 node->local_weak_refs ||
3943 node->tmp_refs || strong;
3944 has_strong_ref = node->has_strong_ref;
3945 has_weak_ref = node->has_weak_ref;
3946
3947 if (weak && !has_weak_ref) {
3948 node->has_weak_ref = 1;
3949 node->pending_weak_ref = 1;
3950 node->local_weak_refs++;
3951 }
3952 if (strong && !has_strong_ref) {
3953 node->has_strong_ref = 1;
3954 node->pending_strong_ref = 1;
3955 node->local_strong_refs++;
3956 }
3957 if (!strong && has_strong_ref)
3958 node->has_strong_ref = 0;
3959 if (!weak && has_weak_ref)
3960 node->has_weak_ref = 0;
3961 if (!weak && !strong) {
3962 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3963 "%d:%d node %d u%016llx c%016llx deleted\n",
3964 proc->pid, thread->pid,
3965 node_debug_id,
3966 (u64)node_ptr,
3967 (u64)node_cookie);
3968 rb_erase(&node->rb_node, &proc->nodes);
3969 binder_inner_proc_unlock(proc);
3970 binder_node_lock(node);
3971 /*
3972 * Acquire the node lock before freeing the
3973 * node to serialize with other threads that
3974 * may have been holding the node lock while
3975 * decrementing this node (avoids race where
3976 * this thread frees while the other thread
3977 * is unlocking the node after the final
3978 * decrement)
3979 */
3980 binder_node_unlock(node);
3981 binder_free_node(node);
3982 } else
3983 binder_inner_proc_unlock(proc);
3984
3985 if (weak && !has_weak_ref)
3986 ret = binder_put_node_cmd(
3987 proc, thread, &ptr, node_ptr,
3988 node_cookie, node_debug_id,
3989 BR_INCREFS, "BR_INCREFS");
3990 if (!ret && strong && !has_strong_ref)
3991 ret = binder_put_node_cmd(
3992 proc, thread, &ptr, node_ptr,
3993 node_cookie, node_debug_id,
3994 BR_ACQUIRE, "BR_ACQUIRE");
3995 if (!ret && !strong && has_strong_ref)
3996 ret = binder_put_node_cmd(
3997 proc, thread, &ptr, node_ptr,
3998 node_cookie, node_debug_id,
3999 BR_RELEASE, "BR_RELEASE");
4000 if (!ret && !weak && has_weak_ref)
4001 ret = binder_put_node_cmd(
4002 proc, thread, &ptr, node_ptr,
4003 node_cookie, node_debug_id,
4004 BR_DECREFS, "BR_DECREFS");
4005 if (orig_ptr == ptr)
4006 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4007 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4008 proc->pid, thread->pid,
4009 node_debug_id,
4010 (u64)node_ptr,
4011 (u64)node_cookie);
4012 if (ret)
4013 return ret;
4014 } break;
4015 case BINDER_WORK_DEAD_BINDER:
4016 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4017 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4018 struct binder_ref_death *death;
4019 uint32_t cmd;
4020 binder_uintptr_t cookie;
4021
4022 death = container_of(w, struct binder_ref_death, work);
4023 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4024 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4025 else
4026 cmd = BR_DEAD_BINDER;
4027 cookie = death->cookie;
4028
4029 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4030 "%d:%d %s %016llx\n",
4031 proc->pid, thread->pid,
4032 cmd == BR_DEAD_BINDER ?
4033 "BR_DEAD_BINDER" :
4034 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4035 (u64)cookie);
4036 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4037 binder_inner_proc_unlock(proc);
4038 kfree(death);
4039 binder_stats_deleted(BINDER_STAT_DEATH);
4040 } else {
4041 binder_enqueue_work_ilocked(
4042 w, &proc->delivered_death);
4043 binder_inner_proc_unlock(proc);
4044 }
4045 if (put_user(cmd, (uint32_t __user *)ptr))
4046 return -EFAULT;
4047 ptr += sizeof(uint32_t);
4048 if (put_user(cookie,
4049 (binder_uintptr_t __user *)ptr))
4050 return -EFAULT;
4051 ptr += sizeof(binder_uintptr_t);
4052 binder_stat_br(proc, thread, cmd);
4053 if (cmd == BR_DEAD_BINDER)
4054 goto done; /* DEAD_BINDER notifications can cause transactions */
4055 } break;
4056 default:
4057 binder_inner_proc_unlock(proc);
4058 pr_err("%d:%d: bad work type %d\n",
4059 proc->pid, thread->pid, w->type);
4060 break;
4061 }
4062
4063 if (!t)
4064 continue;
4065
4066 BUG_ON(t->buffer == NULL);
4067 if (t->buffer->target_node) {
4068 struct binder_node *target_node = t->buffer->target_node;
4069
4070 trd->target.ptr = target_node->ptr;
4071 trd->cookie = target_node->cookie;
4072 t->saved_priority = task_nice(current);
4073 if (t->priority < target_node->min_priority &&
4074 !(t->flags & TF_ONE_WAY))
4075 binder_set_nice(t->priority);
4076 else if (!(t->flags & TF_ONE_WAY) ||
4077 t->saved_priority > target_node->min_priority)
4078 binder_set_nice(target_node->min_priority);
4079 cmd = BR_TRANSACTION;
4080 } else {
4081 trd->target.ptr = 0;
4082 trd->cookie = 0;
4083 cmd = BR_REPLY;
4084 }
4085 trd->code = t->code;
4086 trd->flags = t->flags;
4087 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4088
4089 t_from = binder_get_txn_from(t);
4090 if (t_from) {
4091 struct task_struct *sender = t_from->proc->tsk;
4092
4093 trd->sender_pid =
4094 task_tgid_nr_ns(sender,
4095 task_active_pid_ns(current));
4096 } else {
4097 trd->sender_pid = 0;
4098 }
4099
4100 ret = binder_apply_fd_fixups(proc, t);
4101 if (ret) {
4102 struct binder_buffer *buffer = t->buffer;
4103 bool oneway = !!(t->flags & TF_ONE_WAY);
4104 int tid = t->debug_id;
4105
4106 if (t_from)
4107 binder_thread_dec_tmpref(t_from);
4108 buffer->transaction = NULL;
4109 binder_cleanup_transaction(t, "fd fixups failed",
4110 BR_FAILED_REPLY);
4111 binder_free_buf(proc, thread, buffer, true);
4112 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4113 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4114 proc->pid, thread->pid,
4115 oneway ? "async " :
4116 (cmd == BR_REPLY ? "reply " : ""),
4117 tid, BR_FAILED_REPLY, ret, __LINE__);
4118 if (cmd == BR_REPLY) {
4119 cmd = BR_FAILED_REPLY;
4120 if (put_user(cmd, (uint32_t __user *)ptr))
4121 return -EFAULT;
4122 ptr += sizeof(uint32_t);
4123 binder_stat_br(proc, thread, cmd);
4124 break;
4125 }
4126 continue;
4127 }
4128 trd->data_size = t->buffer->data_size;
4129 trd->offsets_size = t->buffer->offsets_size;
4130 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4131 trd->data.ptr.offsets = trd->data.ptr.buffer +
4132 ALIGN(t->buffer->data_size,
4133 sizeof(void *));
4134
4135 tr.secctx = t->security_ctx;
4136 if (t->security_ctx) {
4137 cmd = BR_TRANSACTION_SEC_CTX;
4138 trsize = sizeof(tr);
4139 }
4140 if (put_user(cmd, (uint32_t __user *)ptr)) {
4141 if (t_from)
4142 binder_thread_dec_tmpref(t_from);
4143
4144 binder_cleanup_transaction(t, "put_user failed",
4145 BR_FAILED_REPLY);
4146
4147 return -EFAULT;
4148 }
4149 ptr += sizeof(uint32_t);
4150 if (copy_to_user(ptr, &tr, trsize)) {
4151 if (t_from)
4152 binder_thread_dec_tmpref(t_from);
4153
4154 binder_cleanup_transaction(t, "copy_to_user failed",
4155 BR_FAILED_REPLY);
4156
4157 return -EFAULT;
4158 }
4159 ptr += trsize;
4160
4161 trace_binder_transaction_received(t);
4162 binder_stat_br(proc, thread, cmd);
4163 binder_debug(BINDER_DEBUG_TRANSACTION,
4164 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4165 proc->pid, thread->pid,
4166 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4167 (cmd == BR_TRANSACTION_SEC_CTX) ?
4168 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4169 t->debug_id, t_from ? t_from->proc->pid : 0,
4170 t_from ? t_from->pid : 0, cmd,
4171 t->buffer->data_size, t->buffer->offsets_size,
4172 (u64)trd->data.ptr.buffer,
4173 (u64)trd->data.ptr.offsets);
4174
4175 if (t_from)
4176 binder_thread_dec_tmpref(t_from);
4177 t->buffer->allow_user_free = 1;
4178 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4179 binder_inner_proc_lock(thread->proc);
4180 t->to_parent = thread->transaction_stack;
4181 t->to_thread = thread;
4182 thread->transaction_stack = t;
4183 binder_inner_proc_unlock(thread->proc);
4184 } else {
4185 binder_free_transaction(t);
4186 }
4187 break;
4188 }
4189
4190done:
4191
4192 *consumed = ptr - buffer;
4193 binder_inner_proc_lock(proc);
4194 if (proc->requested_threads == 0 &&
4195 list_empty(&thread->proc->waiting_threads) &&
4196 proc->requested_threads_started < proc->max_threads &&
4197 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4198 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4199 /*spawn a new thread if we leave this out */) {
4200 proc->requested_threads++;
4201 binder_inner_proc_unlock(proc);
4202 binder_debug(BINDER_DEBUG_THREADS,
4203 "%d:%d BR_SPAWN_LOOPER\n",
4204 proc->pid, thread->pid);
4205 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4206 return -EFAULT;
4207 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4208 } else
4209 binder_inner_proc_unlock(proc);
4210 return 0;
4211}
4212
4213static void binder_release_work(struct binder_proc *proc,
4214 struct list_head *list)
4215{
4216 struct binder_work *w;
4217 enum binder_work_type wtype;
4218
4219 while (1) {
4220 binder_inner_proc_lock(proc);
4221 w = binder_dequeue_work_head_ilocked(list);
4222 wtype = w ? w->type : 0;
4223 binder_inner_proc_unlock(proc);
4224 if (!w)
4225 return;
4226
4227 switch (wtype) {
4228 case BINDER_WORK_TRANSACTION: {
4229 struct binder_transaction *t;
4230
4231 t = container_of(w, struct binder_transaction, work);
4232
4233 binder_cleanup_transaction(t, "process died.",
4234 BR_DEAD_REPLY);
4235 } break;
4236 case BINDER_WORK_RETURN_ERROR: {
4237 struct binder_error *e = container_of(
4238 w, struct binder_error, work);
4239
4240 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4241 "undelivered TRANSACTION_ERROR: %u\n",
4242 e->cmd);
4243 } break;
4244 case BINDER_WORK_TRANSACTION_COMPLETE: {
4245 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4246 "undelivered TRANSACTION_COMPLETE\n");
4247 kfree(w);
4248 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4249 } break;
4250 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4251 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4252 struct binder_ref_death *death;
4253
4254 death = container_of(w, struct binder_ref_death, work);
4255 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4256 "undelivered death notification, %016llx\n",
4257 (u64)death->cookie);
4258 kfree(death);
4259 binder_stats_deleted(BINDER_STAT_DEATH);
4260 } break;
4261 case BINDER_WORK_NODE:
4262 break;
4263 default:
4264 pr_err("unexpected work type, %d, not freed\n",
4265 wtype);
4266 break;
4267 }
4268 }
4269
4270}
4271
4272static struct binder_thread *binder_get_thread_ilocked(
4273 struct binder_proc *proc, struct binder_thread *new_thread)
4274{
4275 struct binder_thread *thread = NULL;
4276 struct rb_node *parent = NULL;
4277 struct rb_node **p = &proc->threads.rb_node;
4278
4279 while (*p) {
4280 parent = *p;
4281 thread = rb_entry(parent, struct binder_thread, rb_node);
4282
4283 if (current->pid < thread->pid)
4284 p = &(*p)->rb_left;
4285 else if (current->pid > thread->pid)
4286 p = &(*p)->rb_right;
4287 else
4288 return thread;
4289 }
4290 if (!new_thread)
4291 return NULL;
4292 thread = new_thread;
4293 binder_stats_created(BINDER_STAT_THREAD);
4294 thread->proc = proc;
4295 thread->pid = current->pid;
4296 atomic_set(&thread->tmp_ref, 0);
4297 init_waitqueue_head(&thread->wait);
4298 INIT_LIST_HEAD(&thread->todo);
4299 rb_link_node(&thread->rb_node, parent, p);
4300 rb_insert_color(&thread->rb_node, &proc->threads);
4301 thread->looper_need_return = true;
4302 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4303 thread->return_error.cmd = BR_OK;
4304 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4305 thread->reply_error.cmd = BR_OK;
4306 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4307 return thread;
4308}
4309
4310static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4311{
4312 struct binder_thread *thread;
4313 struct binder_thread *new_thread;
4314
4315 binder_inner_proc_lock(proc);
4316 thread = binder_get_thread_ilocked(proc, NULL);
4317 binder_inner_proc_unlock(proc);
4318 if (!thread) {
4319 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4320 if (new_thread == NULL)
4321 return NULL;
4322 binder_inner_proc_lock(proc);
4323 thread = binder_get_thread_ilocked(proc, new_thread);
4324 binder_inner_proc_unlock(proc);
4325 if (thread != new_thread)
4326 kfree(new_thread);
4327 }
4328 return thread;
4329}
4330
4331static void binder_free_proc(struct binder_proc *proc)
4332{
4333 struct binder_device *device;
4334
4335 BUG_ON(!list_empty(&proc->todo));
4336 BUG_ON(!list_empty(&proc->delivered_death));
4337 if (proc->outstanding_txns)
4338 pr_warn("%s: Unexpected outstanding_txns %d\n",
4339 __func__, proc->outstanding_txns);
4340 device = container_of(proc->context, struct binder_device, context);
4341 if (refcount_dec_and_test(&device->ref)) {
4342 kfree(proc->context->name);
4343 kfree(device);
4344 }
4345 binder_alloc_deferred_release(&proc->alloc);
4346 put_task_struct(proc->tsk);
4347 put_cred(proc->cred);
4348 binder_stats_deleted(BINDER_STAT_PROC);
4349 kfree(proc);
4350}
4351
4352static void binder_free_thread(struct binder_thread *thread)
4353{
4354 BUG_ON(!list_empty(&thread->todo));
4355 binder_stats_deleted(BINDER_STAT_THREAD);
4356 binder_proc_dec_tmpref(thread->proc);
4357 kfree(thread);
4358}
4359
4360static int binder_thread_release(struct binder_proc *proc,
4361 struct binder_thread *thread)
4362{
4363 struct binder_transaction *t;
4364 struct binder_transaction *send_reply = NULL;
4365 int active_transactions = 0;
4366 struct binder_transaction *last_t = NULL;
4367
4368 binder_inner_proc_lock(thread->proc);
4369 /*
4370 * take a ref on the proc so it survives
4371 * after we remove this thread from proc->threads.
4372 * The corresponding dec is when we actually
4373 * free the thread in binder_free_thread()
4374 */
4375 proc->tmp_ref++;
4376 /*
4377 * take a ref on this thread to ensure it
4378 * survives while we are releasing it
4379 */
4380 atomic_inc(&thread->tmp_ref);
4381 rb_erase(&thread->rb_node, &proc->threads);
4382 t = thread->transaction_stack;
4383 if (t) {
4384 spin_lock(&t->lock);
4385 if (t->to_thread == thread)
4386 send_reply = t;
4387 } else {
4388 __acquire(&t->lock);
4389 }
4390 thread->is_dead = true;
4391
4392 while (t) {
4393 last_t = t;
4394 active_transactions++;
4395 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4396 "release %d:%d transaction %d %s, still active\n",
4397 proc->pid, thread->pid,
4398 t->debug_id,
4399 (t->to_thread == thread) ? "in" : "out");
4400
4401 if (t->to_thread == thread) {
4402 thread->proc->outstanding_txns--;
4403 t->to_proc = NULL;
4404 t->to_thread = NULL;
4405 if (t->buffer) {
4406 t->buffer->transaction = NULL;
4407 t->buffer = NULL;
4408 }
4409 t = t->to_parent;
4410 } else if (t->from == thread) {
4411 t->from = NULL;
4412 t = t->from_parent;
4413 } else
4414 BUG();
4415 spin_unlock(&last_t->lock);
4416 if (t)
4417 spin_lock(&t->lock);
4418 else
4419 __acquire(&t->lock);
4420 }
4421 /* annotation for sparse, lock not acquired in last iteration above */
4422 __release(&t->lock);
4423
4424 /*
4425 * If this thread used poll, make sure we remove the waitqueue from any
4426 * poll data structures holding it.
4427 */
4428 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4429 wake_up_pollfree(&thread->wait);
4430
4431 binder_inner_proc_unlock(thread->proc);
4432
4433 /*
4434 * This is needed to avoid races between wake_up_pollfree() above and
4435 * someone else removing the last entry from the queue for other reasons
4436 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4437 * descriptor being closed). Such other users hold an RCU read lock, so
4438 * we can be sure they're done after we call synchronize_rcu().
4439 */
4440 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4441 synchronize_rcu();
4442
4443 if (send_reply)
4444 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4445 binder_release_work(proc, &thread->todo);
4446 binder_thread_dec_tmpref(thread);
4447 return active_transactions;
4448}
4449
4450static __poll_t binder_poll(struct file *filp,
4451 struct poll_table_struct *wait)
4452{
4453 struct binder_proc *proc = filp->private_data;
4454 struct binder_thread *thread = NULL;
4455 bool wait_for_proc_work;
4456
4457 thread = binder_get_thread(proc);
4458 if (!thread)
4459 return POLLERR;
4460
4461 binder_inner_proc_lock(thread->proc);
4462 thread->looper |= BINDER_LOOPER_STATE_POLL;
4463 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4464
4465 binder_inner_proc_unlock(thread->proc);
4466
4467 poll_wait(filp, &thread->wait, wait);
4468
4469 if (binder_has_work(thread, wait_for_proc_work))
4470 return EPOLLIN;
4471
4472 return 0;
4473}
4474
4475static int binder_ioctl_write_read(struct file *filp,
4476 unsigned int cmd, unsigned long arg,
4477 struct binder_thread *thread)
4478{
4479 int ret = 0;
4480 struct binder_proc *proc = filp->private_data;
4481 unsigned int size = _IOC_SIZE(cmd);
4482 void __user *ubuf = (void __user *)arg;
4483 struct binder_write_read bwr;
4484
4485 if (size != sizeof(struct binder_write_read)) {
4486 ret = -EINVAL;
4487 goto out;
4488 }
4489 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4490 ret = -EFAULT;
4491 goto out;
4492 }
4493 binder_debug(BINDER_DEBUG_READ_WRITE,
4494 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4495 proc->pid, thread->pid,
4496 (u64)bwr.write_size, (u64)bwr.write_buffer,
4497 (u64)bwr.read_size, (u64)bwr.read_buffer);
4498
4499 if (bwr.write_size > 0) {
4500 ret = binder_thread_write(proc, thread,
4501 bwr.write_buffer,
4502 bwr.write_size,
4503 &bwr.write_consumed);
4504 trace_binder_write_done(ret);
4505 if (ret < 0) {
4506 bwr.read_consumed = 0;
4507 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4508 ret = -EFAULT;
4509 goto out;
4510 }
4511 }
4512 if (bwr.read_size > 0) {
4513 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4514 bwr.read_size,
4515 &bwr.read_consumed,
4516 filp->f_flags & O_NONBLOCK);
4517 trace_binder_read_done(ret);
4518 binder_inner_proc_lock(proc);
4519 if (!binder_worklist_empty_ilocked(&proc->todo))
4520 binder_wakeup_proc_ilocked(proc);
4521 binder_inner_proc_unlock(proc);
4522 if (ret < 0) {
4523 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4524 ret = -EFAULT;
4525 goto out;
4526 }
4527 }
4528 binder_debug(BINDER_DEBUG_READ_WRITE,
4529 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4530 proc->pid, thread->pid,
4531 (u64)bwr.write_consumed, (u64)bwr.write_size,
4532 (u64)bwr.read_consumed, (u64)bwr.read_size);
4533 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4534 ret = -EFAULT;
4535 goto out;
4536 }
4537out:
4538 return ret;
4539}
4540
4541static int binder_ioctl_set_ctx_mgr(struct file *filp,
4542 struct flat_binder_object *fbo)
4543{
4544 int ret = 0;
4545 struct binder_proc *proc = filp->private_data;
4546 struct binder_context *context = proc->context;
4547 struct binder_node *new_node;
4548 kuid_t curr_euid = current_euid();
4549
4550 mutex_lock(&context->context_mgr_node_lock);
4551 if (context->binder_context_mgr_node) {
4552 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4553 ret = -EBUSY;
4554 goto out;
4555 }
4556 ret = security_binder_set_context_mgr(proc->cred);
4557 if (ret < 0)
4558 goto out;
4559 if (uid_valid(context->binder_context_mgr_uid)) {
4560 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4561 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4562 from_kuid(&init_user_ns, curr_euid),
4563 from_kuid(&init_user_ns,
4564 context->binder_context_mgr_uid));
4565 ret = -EPERM;
4566 goto out;
4567 }
4568 } else {
4569 context->binder_context_mgr_uid = curr_euid;
4570 }
4571 new_node = binder_new_node(proc, fbo);
4572 if (!new_node) {
4573 ret = -ENOMEM;
4574 goto out;
4575 }
4576 binder_node_lock(new_node);
4577 new_node->local_weak_refs++;
4578 new_node->local_strong_refs++;
4579 new_node->has_strong_ref = 1;
4580 new_node->has_weak_ref = 1;
4581 context->binder_context_mgr_node = new_node;
4582 binder_node_unlock(new_node);
4583 binder_put_node(new_node);
4584out:
4585 mutex_unlock(&context->context_mgr_node_lock);
4586 return ret;
4587}
4588
4589static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4590 struct binder_node_info_for_ref *info)
4591{
4592 struct binder_node *node;
4593 struct binder_context *context = proc->context;
4594 __u32 handle = info->handle;
4595
4596 if (info->strong_count || info->weak_count || info->reserved1 ||
4597 info->reserved2 || info->reserved3) {
4598 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4599 proc->pid);
4600 return -EINVAL;
4601 }
4602
4603 /* This ioctl may only be used by the context manager */
4604 mutex_lock(&context->context_mgr_node_lock);
4605 if (!context->binder_context_mgr_node ||
4606 context->binder_context_mgr_node->proc != proc) {
4607 mutex_unlock(&context->context_mgr_node_lock);
4608 return -EPERM;
4609 }
4610 mutex_unlock(&context->context_mgr_node_lock);
4611
4612 node = binder_get_node_from_ref(proc, handle, true, NULL);
4613 if (!node)
4614 return -EINVAL;
4615
4616 info->strong_count = node->local_strong_refs +
4617 node->internal_strong_refs;
4618 info->weak_count = node->local_weak_refs;
4619
4620 binder_put_node(node);
4621
4622 return 0;
4623}
4624
4625static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4626 struct binder_node_debug_info *info)
4627{
4628 struct rb_node *n;
4629 binder_uintptr_t ptr = info->ptr;
4630
4631 memset(info, 0, sizeof(*info));
4632
4633 binder_inner_proc_lock(proc);
4634 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4635 struct binder_node *node = rb_entry(n, struct binder_node,
4636 rb_node);
4637 if (node->ptr > ptr) {
4638 info->ptr = node->ptr;
4639 info->cookie = node->cookie;
4640 info->has_strong_ref = node->has_strong_ref;
4641 info->has_weak_ref = node->has_weak_ref;
4642 break;
4643 }
4644 }
4645 binder_inner_proc_unlock(proc);
4646
4647 return 0;
4648}
4649
4650static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4651{
4652 struct rb_node *n;
4653 struct binder_thread *thread;
4654
4655 if (proc->outstanding_txns > 0)
4656 return true;
4657
4658 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4659 thread = rb_entry(n, struct binder_thread, rb_node);
4660 if (thread->transaction_stack)
4661 return true;
4662 }
4663 return false;
4664}
4665
4666static int binder_ioctl_freeze(struct binder_freeze_info *info,
4667 struct binder_proc *target_proc)
4668{
4669 int ret = 0;
4670
4671 if (!info->enable) {
4672 binder_inner_proc_lock(target_proc);
4673 target_proc->sync_recv = false;
4674 target_proc->async_recv = false;
4675 target_proc->is_frozen = false;
4676 binder_inner_proc_unlock(target_proc);
4677 return 0;
4678 }
4679
4680 /*
4681 * Freezing the target. Prevent new transactions by
4682 * setting frozen state. If timeout specified, wait
4683 * for transactions to drain.
4684 */
4685 binder_inner_proc_lock(target_proc);
4686 target_proc->sync_recv = false;
4687 target_proc->async_recv = false;
4688 target_proc->is_frozen = true;
4689 binder_inner_proc_unlock(target_proc);
4690
4691 if (info->timeout_ms > 0)
4692 ret = wait_event_interruptible_timeout(
4693 target_proc->freeze_wait,
4694 (!target_proc->outstanding_txns),
4695 msecs_to_jiffies(info->timeout_ms));
4696
4697 /* Check pending transactions that wait for reply */
4698 if (ret >= 0) {
4699 binder_inner_proc_lock(target_proc);
4700 if (binder_txns_pending_ilocked(target_proc))
4701 ret = -EAGAIN;
4702 binder_inner_proc_unlock(target_proc);
4703 }
4704
4705 if (ret < 0) {
4706 binder_inner_proc_lock(target_proc);
4707 target_proc->is_frozen = false;
4708 binder_inner_proc_unlock(target_proc);
4709 }
4710
4711 return ret;
4712}
4713
4714static int binder_ioctl_get_freezer_info(
4715 struct binder_frozen_status_info *info)
4716{
4717 struct binder_proc *target_proc;
4718 bool found = false;
4719 __u32 txns_pending;
4720
4721 info->sync_recv = 0;
4722 info->async_recv = 0;
4723
4724 mutex_lock(&binder_procs_lock);
4725 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4726 if (target_proc->pid == info->pid) {
4727 found = true;
4728 binder_inner_proc_lock(target_proc);
4729 txns_pending = binder_txns_pending_ilocked(target_proc);
4730 info->sync_recv |= target_proc->sync_recv |
4731 (txns_pending << 1);
4732 info->async_recv |= target_proc->async_recv;
4733 binder_inner_proc_unlock(target_proc);
4734 }
4735 }
4736 mutex_unlock(&binder_procs_lock);
4737
4738 if (!found)
4739 return -EINVAL;
4740
4741 return 0;
4742}
4743
4744static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4745{
4746 int ret;
4747 struct binder_proc *proc = filp->private_data;
4748 struct binder_thread *thread;
4749 unsigned int size = _IOC_SIZE(cmd);
4750 void __user *ubuf = (void __user *)arg;
4751
4752 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4753 proc->pid, current->pid, cmd, arg);*/
4754
4755 binder_selftest_alloc(&proc->alloc);
4756
4757 trace_binder_ioctl(cmd, arg);
4758
4759 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4760 if (ret)
4761 goto err_unlocked;
4762
4763 thread = binder_get_thread(proc);
4764 if (thread == NULL) {
4765 ret = -ENOMEM;
4766 goto err;
4767 }
4768
4769 switch (cmd) {
4770 case BINDER_WRITE_READ:
4771 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4772 if (ret)
4773 goto err;
4774 break;
4775 case BINDER_SET_MAX_THREADS: {
4776 int max_threads;
4777
4778 if (copy_from_user(&max_threads, ubuf,
4779 sizeof(max_threads))) {
4780 ret = -EINVAL;
4781 goto err;
4782 }
4783 binder_inner_proc_lock(proc);
4784 proc->max_threads = max_threads;
4785 binder_inner_proc_unlock(proc);
4786 break;
4787 }
4788 case BINDER_SET_CONTEXT_MGR_EXT: {
4789 struct flat_binder_object fbo;
4790
4791 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4792 ret = -EINVAL;
4793 goto err;
4794 }
4795 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4796 if (ret)
4797 goto err;
4798 break;
4799 }
4800 case BINDER_SET_CONTEXT_MGR:
4801 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4802 if (ret)
4803 goto err;
4804 break;
4805 case BINDER_THREAD_EXIT:
4806 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4807 proc->pid, thread->pid);
4808 binder_thread_release(proc, thread);
4809 thread = NULL;
4810 break;
4811 case BINDER_VERSION: {
4812 struct binder_version __user *ver = ubuf;
4813
4814 if (size != sizeof(struct binder_version)) {
4815 ret = -EINVAL;
4816 goto err;
4817 }
4818 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4819 &ver->protocol_version)) {
4820 ret = -EINVAL;
4821 goto err;
4822 }
4823 break;
4824 }
4825 case BINDER_GET_NODE_INFO_FOR_REF: {
4826 struct binder_node_info_for_ref info;
4827
4828 if (copy_from_user(&info, ubuf, sizeof(info))) {
4829 ret = -EFAULT;
4830 goto err;
4831 }
4832
4833 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4834 if (ret < 0)
4835 goto err;
4836
4837 if (copy_to_user(ubuf, &info, sizeof(info))) {
4838 ret = -EFAULT;
4839 goto err;
4840 }
4841
4842 break;
4843 }
4844 case BINDER_GET_NODE_DEBUG_INFO: {
4845 struct binder_node_debug_info info;
4846
4847 if (copy_from_user(&info, ubuf, sizeof(info))) {
4848 ret = -EFAULT;
4849 goto err;
4850 }
4851
4852 ret = binder_ioctl_get_node_debug_info(proc, &info);
4853 if (ret < 0)
4854 goto err;
4855
4856 if (copy_to_user(ubuf, &info, sizeof(info))) {
4857 ret = -EFAULT;
4858 goto err;
4859 }
4860 break;
4861 }
4862 case BINDER_FREEZE: {
4863 struct binder_freeze_info info;
4864 struct binder_proc **target_procs = NULL, *target_proc;
4865 int target_procs_count = 0, i = 0;
4866
4867 ret = 0;
4868
4869 if (copy_from_user(&info, ubuf, sizeof(info))) {
4870 ret = -EFAULT;
4871 goto err;
4872 }
4873
4874 mutex_lock(&binder_procs_lock);
4875 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4876 if (target_proc->pid == info.pid)
4877 target_procs_count++;
4878 }
4879
4880 if (target_procs_count == 0) {
4881 mutex_unlock(&binder_procs_lock);
4882 ret = -EINVAL;
4883 goto err;
4884 }
4885
4886 target_procs = kcalloc(target_procs_count,
4887 sizeof(struct binder_proc *),
4888 GFP_KERNEL);
4889
4890 if (!target_procs) {
4891 mutex_unlock(&binder_procs_lock);
4892 ret = -ENOMEM;
4893 goto err;
4894 }
4895
4896 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4897 if (target_proc->pid != info.pid)
4898 continue;
4899
4900 binder_inner_proc_lock(target_proc);
4901 target_proc->tmp_ref++;
4902 binder_inner_proc_unlock(target_proc);
4903
4904 target_procs[i++] = target_proc;
4905 }
4906 mutex_unlock(&binder_procs_lock);
4907
4908 for (i = 0; i < target_procs_count; i++) {
4909 if (ret >= 0)
4910 ret = binder_ioctl_freeze(&info,
4911 target_procs[i]);
4912
4913 binder_proc_dec_tmpref(target_procs[i]);
4914 }
4915
4916 kfree(target_procs);
4917
4918 if (ret < 0)
4919 goto err;
4920 break;
4921 }
4922 case BINDER_GET_FROZEN_INFO: {
4923 struct binder_frozen_status_info info;
4924
4925 if (copy_from_user(&info, ubuf, sizeof(info))) {
4926 ret = -EFAULT;
4927 goto err;
4928 }
4929
4930 ret = binder_ioctl_get_freezer_info(&info);
4931 if (ret < 0)
4932 goto err;
4933
4934 if (copy_to_user(ubuf, &info, sizeof(info))) {
4935 ret = -EFAULT;
4936 goto err;
4937 }
4938 break;
4939 }
4940 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
4941 uint32_t enable;
4942
4943 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
4944 ret = -EFAULT;
4945 goto err;
4946 }
4947 binder_inner_proc_lock(proc);
4948 proc->oneway_spam_detection_enabled = (bool)enable;
4949 binder_inner_proc_unlock(proc);
4950 break;
4951 }
4952 default:
4953 ret = -EINVAL;
4954 goto err;
4955 }
4956 ret = 0;
4957err:
4958 if (thread)
4959 thread->looper_need_return = false;
4960 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4961 if (ret && ret != -EINTR)
4962 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4963err_unlocked:
4964 trace_binder_ioctl_done(ret);
4965 return ret;
4966}
4967
4968static void binder_vma_open(struct vm_area_struct *vma)
4969{
4970 struct binder_proc *proc = vma->vm_private_data;
4971
4972 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4973 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4974 proc->pid, vma->vm_start, vma->vm_end,
4975 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4976 (unsigned long)pgprot_val(vma->vm_page_prot));
4977}
4978
4979static void binder_vma_close(struct vm_area_struct *vma)
4980{
4981 struct binder_proc *proc = vma->vm_private_data;
4982
4983 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4984 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4985 proc->pid, vma->vm_start, vma->vm_end,
4986 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4987 (unsigned long)pgprot_val(vma->vm_page_prot));
4988 binder_alloc_vma_close(&proc->alloc);
4989}
4990
4991static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4992{
4993 return VM_FAULT_SIGBUS;
4994}
4995
4996static const struct vm_operations_struct binder_vm_ops = {
4997 .open = binder_vma_open,
4998 .close = binder_vma_close,
4999 .fault = binder_vm_fault,
5000};
5001
5002static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5003{
5004 struct binder_proc *proc = filp->private_data;
5005
5006 if (proc->tsk != current->group_leader)
5007 return -EINVAL;
5008
5009 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5010 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5011 __func__, proc->pid, vma->vm_start, vma->vm_end,
5012 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5013 (unsigned long)pgprot_val(vma->vm_page_prot));
5014
5015 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5016 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5017 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5018 return -EPERM;
5019 }
5020 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5021 vma->vm_flags &= ~VM_MAYWRITE;
5022
5023 vma->vm_ops = &binder_vm_ops;
5024 vma->vm_private_data = proc;
5025
5026 return binder_alloc_mmap_handler(&proc->alloc, vma);
5027}
5028
5029static int binder_open(struct inode *nodp, struct file *filp)
5030{
5031 struct binder_proc *proc, *itr;
5032 struct binder_device *binder_dev;
5033 struct binderfs_info *info;
5034 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5035 bool existing_pid = false;
5036
5037 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5038 current->group_leader->pid, current->pid);
5039
5040 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5041 if (proc == NULL)
5042 return -ENOMEM;
5043 spin_lock_init(&proc->inner_lock);
5044 spin_lock_init(&proc->outer_lock);
5045 get_task_struct(current->group_leader);
5046 proc->tsk = current->group_leader;
5047 proc->cred = get_cred(filp->f_cred);
5048 INIT_LIST_HEAD(&proc->todo);
5049 init_waitqueue_head(&proc->freeze_wait);
5050 proc->default_priority = task_nice(current);
5051 /* binderfs stashes devices in i_private */
5052 if (is_binderfs_device(nodp)) {
5053 binder_dev = nodp->i_private;
5054 info = nodp->i_sb->s_fs_info;
5055 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5056 } else {
5057 binder_dev = container_of(filp->private_data,
5058 struct binder_device, miscdev);
5059 }
5060 refcount_inc(&binder_dev->ref);
5061 proc->context = &binder_dev->context;
5062 binder_alloc_init(&proc->alloc);
5063
5064 binder_stats_created(BINDER_STAT_PROC);
5065 proc->pid = current->group_leader->pid;
5066 INIT_LIST_HEAD(&proc->delivered_death);
5067 INIT_LIST_HEAD(&proc->waiting_threads);
5068 filp->private_data = proc;
5069
5070 mutex_lock(&binder_procs_lock);
5071 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5072 if (itr->pid == proc->pid) {
5073 existing_pid = true;
5074 break;
5075 }
5076 }
5077 hlist_add_head(&proc->proc_node, &binder_procs);
5078 mutex_unlock(&binder_procs_lock);
5079
5080 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5081 char strbuf[11];
5082
5083 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5084 /*
5085 * proc debug entries are shared between contexts.
5086 * Only create for the first PID to avoid debugfs log spamming
5087 * The printing code will anyway print all contexts for a given
5088 * PID so this is not a problem.
5089 */
5090 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5091 binder_debugfs_dir_entry_proc,
5092 (void *)(unsigned long)proc->pid,
5093 &proc_fops);
5094 }
5095
5096 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5097 char strbuf[11];
5098 struct dentry *binderfs_entry;
5099
5100 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5101 /*
5102 * Similar to debugfs, the process specific log file is shared
5103 * between contexts. Only create for the first PID.
5104 * This is ok since same as debugfs, the log file will contain
5105 * information on all contexts of a given PID.
5106 */
5107 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5108 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5109 if (!IS_ERR(binderfs_entry)) {
5110 proc->binderfs_entry = binderfs_entry;
5111 } else {
5112 int error;
5113
5114 error = PTR_ERR(binderfs_entry);
5115 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5116 strbuf, error);
5117 }
5118 }
5119
5120 return 0;
5121}
5122
5123static int binder_flush(struct file *filp, fl_owner_t id)
5124{
5125 struct binder_proc *proc = filp->private_data;
5126
5127 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5128
5129 return 0;
5130}
5131
5132static void binder_deferred_flush(struct binder_proc *proc)
5133{
5134 struct rb_node *n;
5135 int wake_count = 0;
5136
5137 binder_inner_proc_lock(proc);
5138 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5139 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5140
5141 thread->looper_need_return = true;
5142 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5143 wake_up_interruptible(&thread->wait);
5144 wake_count++;
5145 }
5146 }
5147 binder_inner_proc_unlock(proc);
5148
5149 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5150 "binder_flush: %d woke %d threads\n", proc->pid,
5151 wake_count);
5152}
5153
5154static int binder_release(struct inode *nodp, struct file *filp)
5155{
5156 struct binder_proc *proc = filp->private_data;
5157
5158 debugfs_remove(proc->debugfs_entry);
5159
5160 if (proc->binderfs_entry) {
5161 binderfs_remove_file(proc->binderfs_entry);
5162 proc->binderfs_entry = NULL;
5163 }
5164
5165 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5166
5167 return 0;
5168}
5169
5170static int binder_node_release(struct binder_node *node, int refs)
5171{
5172 struct binder_ref *ref;
5173 int death = 0;
5174 struct binder_proc *proc = node->proc;
5175
5176 binder_release_work(proc, &node->async_todo);
5177
5178 binder_node_lock(node);
5179 binder_inner_proc_lock(proc);
5180 binder_dequeue_work_ilocked(&node->work);
5181 /*
5182 * The caller must have taken a temporary ref on the node,
5183 */
5184 BUG_ON(!node->tmp_refs);
5185 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5186 binder_inner_proc_unlock(proc);
5187 binder_node_unlock(node);
5188 binder_free_node(node);
5189
5190 return refs;
5191 }
5192
5193 node->proc = NULL;
5194 node->local_strong_refs = 0;
5195 node->local_weak_refs = 0;
5196 binder_inner_proc_unlock(proc);
5197
5198 spin_lock(&binder_dead_nodes_lock);
5199 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5200 spin_unlock(&binder_dead_nodes_lock);
5201
5202 hlist_for_each_entry(ref, &node->refs, node_entry) {
5203 refs++;
5204 /*
5205 * Need the node lock to synchronize
5206 * with new notification requests and the
5207 * inner lock to synchronize with queued
5208 * death notifications.
5209 */
5210 binder_inner_proc_lock(ref->proc);
5211 if (!ref->death) {
5212 binder_inner_proc_unlock(ref->proc);
5213 continue;
5214 }
5215
5216 death++;
5217
5218 BUG_ON(!list_empty(&ref->death->work.entry));
5219 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5220 binder_enqueue_work_ilocked(&ref->death->work,
5221 &ref->proc->todo);
5222 binder_wakeup_proc_ilocked(ref->proc);
5223 binder_inner_proc_unlock(ref->proc);
5224 }
5225
5226 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5227 "node %d now dead, refs %d, death %d\n",
5228 node->debug_id, refs, death);
5229 binder_node_unlock(node);
5230 binder_put_node(node);
5231
5232 return refs;
5233}
5234
5235static void binder_deferred_release(struct binder_proc *proc)
5236{
5237 struct binder_context *context = proc->context;
5238 struct rb_node *n;
5239 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5240
5241 mutex_lock(&binder_procs_lock);
5242 hlist_del(&proc->proc_node);
5243 mutex_unlock(&binder_procs_lock);
5244
5245 mutex_lock(&context->context_mgr_node_lock);
5246 if (context->binder_context_mgr_node &&
5247 context->binder_context_mgr_node->proc == proc) {
5248 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5249 "%s: %d context_mgr_node gone\n",
5250 __func__, proc->pid);
5251 context->binder_context_mgr_node = NULL;
5252 }
5253 mutex_unlock(&context->context_mgr_node_lock);
5254 binder_inner_proc_lock(proc);
5255 /*
5256 * Make sure proc stays alive after we
5257 * remove all the threads
5258 */
5259 proc->tmp_ref++;
5260
5261 proc->is_dead = true;
5262 proc->is_frozen = false;
5263 proc->sync_recv = false;
5264 proc->async_recv = false;
5265 threads = 0;
5266 active_transactions = 0;
5267 while ((n = rb_first(&proc->threads))) {
5268 struct binder_thread *thread;
5269
5270 thread = rb_entry(n, struct binder_thread, rb_node);
5271 binder_inner_proc_unlock(proc);
5272 threads++;
5273 active_transactions += binder_thread_release(proc, thread);
5274 binder_inner_proc_lock(proc);
5275 }
5276
5277 nodes = 0;
5278 incoming_refs = 0;
5279 while ((n = rb_first(&proc->nodes))) {
5280 struct binder_node *node;
5281
5282 node = rb_entry(n, struct binder_node, rb_node);
5283 nodes++;
5284 /*
5285 * take a temporary ref on the node before
5286 * calling binder_node_release() which will either
5287 * kfree() the node or call binder_put_node()
5288 */
5289 binder_inc_node_tmpref_ilocked(node);
5290 rb_erase(&node->rb_node, &proc->nodes);
5291 binder_inner_proc_unlock(proc);
5292 incoming_refs = binder_node_release(node, incoming_refs);
5293 binder_inner_proc_lock(proc);
5294 }
5295 binder_inner_proc_unlock(proc);
5296
5297 outgoing_refs = 0;
5298 binder_proc_lock(proc);
5299 while ((n = rb_first(&proc->refs_by_desc))) {
5300 struct binder_ref *ref;
5301
5302 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5303 outgoing_refs++;
5304 binder_cleanup_ref_olocked(ref);
5305 binder_proc_unlock(proc);
5306 binder_free_ref(ref);
5307 binder_proc_lock(proc);
5308 }
5309 binder_proc_unlock(proc);
5310
5311 binder_release_work(proc, &proc->todo);
5312 binder_release_work(proc, &proc->delivered_death);
5313
5314 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5315 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5316 __func__, proc->pid, threads, nodes, incoming_refs,
5317 outgoing_refs, active_transactions);
5318
5319 binder_proc_dec_tmpref(proc);
5320}
5321
5322static void binder_deferred_func(struct work_struct *work)
5323{
5324 struct binder_proc *proc;
5325
5326 int defer;
5327
5328 do {
5329 mutex_lock(&binder_deferred_lock);
5330 if (!hlist_empty(&binder_deferred_list)) {
5331 proc = hlist_entry(binder_deferred_list.first,
5332 struct binder_proc, deferred_work_node);
5333 hlist_del_init(&proc->deferred_work_node);
5334 defer = proc->deferred_work;
5335 proc->deferred_work = 0;
5336 } else {
5337 proc = NULL;
5338 defer = 0;
5339 }
5340 mutex_unlock(&binder_deferred_lock);
5341
5342 if (defer & BINDER_DEFERRED_FLUSH)
5343 binder_deferred_flush(proc);
5344
5345 if (defer & BINDER_DEFERRED_RELEASE)
5346 binder_deferred_release(proc); /* frees proc */
5347 } while (proc);
5348}
5349static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5350
5351static void
5352binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5353{
5354 mutex_lock(&binder_deferred_lock);
5355 proc->deferred_work |= defer;
5356 if (hlist_unhashed(&proc->deferred_work_node)) {
5357 hlist_add_head(&proc->deferred_work_node,
5358 &binder_deferred_list);
5359 schedule_work(&binder_deferred_work);
5360 }
5361 mutex_unlock(&binder_deferred_lock);
5362}
5363
5364static void print_binder_transaction_ilocked(struct seq_file *m,
5365 struct binder_proc *proc,
5366 const char *prefix,
5367 struct binder_transaction *t)
5368{
5369 struct binder_proc *to_proc;
5370 struct binder_buffer *buffer = t->buffer;
5371
5372 spin_lock(&t->lock);
5373 to_proc = t->to_proc;
5374 seq_printf(m,
5375 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5376 prefix, t->debug_id, t,
5377 t->from ? t->from->proc->pid : 0,
5378 t->from ? t->from->pid : 0,
5379 to_proc ? to_proc->pid : 0,
5380 t->to_thread ? t->to_thread->pid : 0,
5381 t->code, t->flags, t->priority, t->need_reply);
5382 spin_unlock(&t->lock);
5383
5384 if (proc != to_proc) {
5385 /*
5386 * Can only safely deref buffer if we are holding the
5387 * correct proc inner lock for this node
5388 */
5389 seq_puts(m, "\n");
5390 return;
5391 }
5392
5393 if (buffer == NULL) {
5394 seq_puts(m, " buffer free\n");
5395 return;
5396 }
5397 if (buffer->target_node)
5398 seq_printf(m, " node %d", buffer->target_node->debug_id);
5399 seq_printf(m, " size %zd:%zd data %pK\n",
5400 buffer->data_size, buffer->offsets_size,
5401 buffer->user_data);
5402}
5403
5404static void print_binder_work_ilocked(struct seq_file *m,
5405 struct binder_proc *proc,
5406 const char *prefix,
5407 const char *transaction_prefix,
5408 struct binder_work *w)
5409{
5410 struct binder_node *node;
5411 struct binder_transaction *t;
5412
5413 switch (w->type) {
5414 case BINDER_WORK_TRANSACTION:
5415 t = container_of(w, struct binder_transaction, work);
5416 print_binder_transaction_ilocked(
5417 m, proc, transaction_prefix, t);
5418 break;
5419 case BINDER_WORK_RETURN_ERROR: {
5420 struct binder_error *e = container_of(
5421 w, struct binder_error, work);
5422
5423 seq_printf(m, "%stransaction error: %u\n",
5424 prefix, e->cmd);
5425 } break;
5426 case BINDER_WORK_TRANSACTION_COMPLETE:
5427 seq_printf(m, "%stransaction complete\n", prefix);
5428 break;
5429 case BINDER_WORK_NODE:
5430 node = container_of(w, struct binder_node, work);
5431 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5432 prefix, node->debug_id,
5433 (u64)node->ptr, (u64)node->cookie);
5434 break;
5435 case BINDER_WORK_DEAD_BINDER:
5436 seq_printf(m, "%shas dead binder\n", prefix);
5437 break;
5438 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5439 seq_printf(m, "%shas cleared dead binder\n", prefix);
5440 break;
5441 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5442 seq_printf(m, "%shas cleared death notification\n", prefix);
5443 break;
5444 default:
5445 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5446 break;
5447 }
5448}
5449
5450static void print_binder_thread_ilocked(struct seq_file *m,
5451 struct binder_thread *thread,
5452 int print_always)
5453{
5454 struct binder_transaction *t;
5455 struct binder_work *w;
5456 size_t start_pos = m->count;
5457 size_t header_pos;
5458
5459 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5460 thread->pid, thread->looper,
5461 thread->looper_need_return,
5462 atomic_read(&thread->tmp_ref));
5463 header_pos = m->count;
5464 t = thread->transaction_stack;
5465 while (t) {
5466 if (t->from == thread) {
5467 print_binder_transaction_ilocked(m, thread->proc,
5468 " outgoing transaction", t);
5469 t = t->from_parent;
5470 } else if (t->to_thread == thread) {
5471 print_binder_transaction_ilocked(m, thread->proc,
5472 " incoming transaction", t);
5473 t = t->to_parent;
5474 } else {
5475 print_binder_transaction_ilocked(m, thread->proc,
5476 " bad transaction", t);
5477 t = NULL;
5478 }
5479 }
5480 list_for_each_entry(w, &thread->todo, entry) {
5481 print_binder_work_ilocked(m, thread->proc, " ",
5482 " pending transaction", w);
5483 }
5484 if (!print_always && m->count == header_pos)
5485 m->count = start_pos;
5486}
5487
5488static void print_binder_node_nilocked(struct seq_file *m,
5489 struct binder_node *node)
5490{
5491 struct binder_ref *ref;
5492 struct binder_work *w;
5493 int count;
5494
5495 count = 0;
5496 hlist_for_each_entry(ref, &node->refs, node_entry)
5497 count++;
5498
5499 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5500 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5501 node->has_strong_ref, node->has_weak_ref,
5502 node->local_strong_refs, node->local_weak_refs,
5503 node->internal_strong_refs, count, node->tmp_refs);
5504 if (count) {
5505 seq_puts(m, " proc");
5506 hlist_for_each_entry(ref, &node->refs, node_entry)
5507 seq_printf(m, " %d", ref->proc->pid);
5508 }
5509 seq_puts(m, "\n");
5510 if (node->proc) {
5511 list_for_each_entry(w, &node->async_todo, entry)
5512 print_binder_work_ilocked(m, node->proc, " ",
5513 " pending async transaction", w);
5514 }
5515}
5516
5517static void print_binder_ref_olocked(struct seq_file *m,
5518 struct binder_ref *ref)
5519{
5520 binder_node_lock(ref->node);
5521 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5522 ref->data.debug_id, ref->data.desc,
5523 ref->node->proc ? "" : "dead ",
5524 ref->node->debug_id, ref->data.strong,
5525 ref->data.weak, ref->death);
5526 binder_node_unlock(ref->node);
5527}
5528
5529static void print_binder_proc(struct seq_file *m,
5530 struct binder_proc *proc, int print_all)
5531{
5532 struct binder_work *w;
5533 struct rb_node *n;
5534 size_t start_pos = m->count;
5535 size_t header_pos;
5536 struct binder_node *last_node = NULL;
5537
5538 seq_printf(m, "proc %d\n", proc->pid);
5539 seq_printf(m, "context %s\n", proc->context->name);
5540 header_pos = m->count;
5541
5542 binder_inner_proc_lock(proc);
5543 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5544 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5545 rb_node), print_all);
5546
5547 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5548 struct binder_node *node = rb_entry(n, struct binder_node,
5549 rb_node);
5550 if (!print_all && !node->has_async_transaction)
5551 continue;
5552
5553 /*
5554 * take a temporary reference on the node so it
5555 * survives and isn't removed from the tree
5556 * while we print it.
5557 */
5558 binder_inc_node_tmpref_ilocked(node);
5559 /* Need to drop inner lock to take node lock */
5560 binder_inner_proc_unlock(proc);
5561 if (last_node)
5562 binder_put_node(last_node);
5563 binder_node_inner_lock(node);
5564 print_binder_node_nilocked(m, node);
5565 binder_node_inner_unlock(node);
5566 last_node = node;
5567 binder_inner_proc_lock(proc);
5568 }
5569 binder_inner_proc_unlock(proc);
5570 if (last_node)
5571 binder_put_node(last_node);
5572
5573 if (print_all) {
5574 binder_proc_lock(proc);
5575 for (n = rb_first(&proc->refs_by_desc);
5576 n != NULL;
5577 n = rb_next(n))
5578 print_binder_ref_olocked(m, rb_entry(n,
5579 struct binder_ref,
5580 rb_node_desc));
5581 binder_proc_unlock(proc);
5582 }
5583 binder_alloc_print_allocated(m, &proc->alloc);
5584 binder_inner_proc_lock(proc);
5585 list_for_each_entry(w, &proc->todo, entry)
5586 print_binder_work_ilocked(m, proc, " ",
5587 " pending transaction", w);
5588 list_for_each_entry(w, &proc->delivered_death, entry) {
5589 seq_puts(m, " has delivered dead binder\n");
5590 break;
5591 }
5592 binder_inner_proc_unlock(proc);
5593 if (!print_all && m->count == header_pos)
5594 m->count = start_pos;
5595}
5596
5597static const char * const binder_return_strings[] = {
5598 "BR_ERROR",
5599 "BR_OK",
5600 "BR_TRANSACTION",
5601 "BR_REPLY",
5602 "BR_ACQUIRE_RESULT",
5603 "BR_DEAD_REPLY",
5604 "BR_TRANSACTION_COMPLETE",
5605 "BR_INCREFS",
5606 "BR_ACQUIRE",
5607 "BR_RELEASE",
5608 "BR_DECREFS",
5609 "BR_ATTEMPT_ACQUIRE",
5610 "BR_NOOP",
5611 "BR_SPAWN_LOOPER",
5612 "BR_FINISHED",
5613 "BR_DEAD_BINDER",
5614 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5615 "BR_FAILED_REPLY",
5616 "BR_FROZEN_REPLY",
5617 "BR_ONEWAY_SPAM_SUSPECT",
5618};
5619
5620static const char * const binder_command_strings[] = {
5621 "BC_TRANSACTION",
5622 "BC_REPLY",
5623 "BC_ACQUIRE_RESULT",
5624 "BC_FREE_BUFFER",
5625 "BC_INCREFS",
5626 "BC_ACQUIRE",
5627 "BC_RELEASE",
5628 "BC_DECREFS",
5629 "BC_INCREFS_DONE",
5630 "BC_ACQUIRE_DONE",
5631 "BC_ATTEMPT_ACQUIRE",
5632 "BC_REGISTER_LOOPER",
5633 "BC_ENTER_LOOPER",
5634 "BC_EXIT_LOOPER",
5635 "BC_REQUEST_DEATH_NOTIFICATION",
5636 "BC_CLEAR_DEATH_NOTIFICATION",
5637 "BC_DEAD_BINDER_DONE",
5638 "BC_TRANSACTION_SG",
5639 "BC_REPLY_SG",
5640};
5641
5642static const char * const binder_objstat_strings[] = {
5643 "proc",
5644 "thread",
5645 "node",
5646 "ref",
5647 "death",
5648 "transaction",
5649 "transaction_complete"
5650};
5651
5652static void print_binder_stats(struct seq_file *m, const char *prefix,
5653 struct binder_stats *stats)
5654{
5655 int i;
5656
5657 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5658 ARRAY_SIZE(binder_command_strings));
5659 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5660 int temp = atomic_read(&stats->bc[i]);
5661
5662 if (temp)
5663 seq_printf(m, "%s%s: %d\n", prefix,
5664 binder_command_strings[i], temp);
5665 }
5666
5667 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5668 ARRAY_SIZE(binder_return_strings));
5669 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5670 int temp = atomic_read(&stats->br[i]);
5671
5672 if (temp)
5673 seq_printf(m, "%s%s: %d\n", prefix,
5674 binder_return_strings[i], temp);
5675 }
5676
5677 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5678 ARRAY_SIZE(binder_objstat_strings));
5679 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5680 ARRAY_SIZE(stats->obj_deleted));
5681 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5682 int created = atomic_read(&stats->obj_created[i]);
5683 int deleted = atomic_read(&stats->obj_deleted[i]);
5684
5685 if (created || deleted)
5686 seq_printf(m, "%s%s: active %d total %d\n",
5687 prefix,
5688 binder_objstat_strings[i],
5689 created - deleted,
5690 created);
5691 }
5692}
5693
5694static void print_binder_proc_stats(struct seq_file *m,
5695 struct binder_proc *proc)
5696{
5697 struct binder_work *w;
5698 struct binder_thread *thread;
5699 struct rb_node *n;
5700 int count, strong, weak, ready_threads;
5701 size_t free_async_space =
5702 binder_alloc_get_free_async_space(&proc->alloc);
5703
5704 seq_printf(m, "proc %d\n", proc->pid);
5705 seq_printf(m, "context %s\n", proc->context->name);
5706 count = 0;
5707 ready_threads = 0;
5708 binder_inner_proc_lock(proc);
5709 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5710 count++;
5711
5712 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5713 ready_threads++;
5714
5715 seq_printf(m, " threads: %d\n", count);
5716 seq_printf(m, " requested threads: %d+%d/%d\n"
5717 " ready threads %d\n"
5718 " free async space %zd\n", proc->requested_threads,
5719 proc->requested_threads_started, proc->max_threads,
5720 ready_threads,
5721 free_async_space);
5722 count = 0;
5723 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5724 count++;
5725 binder_inner_proc_unlock(proc);
5726 seq_printf(m, " nodes: %d\n", count);
5727 count = 0;
5728 strong = 0;
5729 weak = 0;
5730 binder_proc_lock(proc);
5731 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5732 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5733 rb_node_desc);
5734 count++;
5735 strong += ref->data.strong;
5736 weak += ref->data.weak;
5737 }
5738 binder_proc_unlock(proc);
5739 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5740
5741 count = binder_alloc_get_allocated_count(&proc->alloc);
5742 seq_printf(m, " buffers: %d\n", count);
5743
5744 binder_alloc_print_pages(m, &proc->alloc);
5745
5746 count = 0;
5747 binder_inner_proc_lock(proc);
5748 list_for_each_entry(w, &proc->todo, entry) {
5749 if (w->type == BINDER_WORK_TRANSACTION)
5750 count++;
5751 }
5752 binder_inner_proc_unlock(proc);
5753 seq_printf(m, " pending transactions: %d\n", count);
5754
5755 print_binder_stats(m, " ", &proc->stats);
5756}
5757
5758
5759int binder_state_show(struct seq_file *m, void *unused)
5760{
5761 struct binder_proc *proc;
5762 struct binder_node *node;
5763 struct binder_node *last_node = NULL;
5764
5765 seq_puts(m, "binder state:\n");
5766
5767 spin_lock(&binder_dead_nodes_lock);
5768 if (!hlist_empty(&binder_dead_nodes))
5769 seq_puts(m, "dead nodes:\n");
5770 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5771 /*
5772 * take a temporary reference on the node so it
5773 * survives and isn't removed from the list
5774 * while we print it.
5775 */
5776 node->tmp_refs++;
5777 spin_unlock(&binder_dead_nodes_lock);
5778 if (last_node)
5779 binder_put_node(last_node);
5780 binder_node_lock(node);
5781 print_binder_node_nilocked(m, node);
5782 binder_node_unlock(node);
5783 last_node = node;
5784 spin_lock(&binder_dead_nodes_lock);
5785 }
5786 spin_unlock(&binder_dead_nodes_lock);
5787 if (last_node)
5788 binder_put_node(last_node);
5789
5790 mutex_lock(&binder_procs_lock);
5791 hlist_for_each_entry(proc, &binder_procs, proc_node)
5792 print_binder_proc(m, proc, 1);
5793 mutex_unlock(&binder_procs_lock);
5794
5795 return 0;
5796}
5797
5798int binder_stats_show(struct seq_file *m, void *unused)
5799{
5800 struct binder_proc *proc;
5801
5802 seq_puts(m, "binder stats:\n");
5803
5804 print_binder_stats(m, "", &binder_stats);
5805
5806 mutex_lock(&binder_procs_lock);
5807 hlist_for_each_entry(proc, &binder_procs, proc_node)
5808 print_binder_proc_stats(m, proc);
5809 mutex_unlock(&binder_procs_lock);
5810
5811 return 0;
5812}
5813
5814int binder_transactions_show(struct seq_file *m, void *unused)
5815{
5816 struct binder_proc *proc;
5817
5818 seq_puts(m, "binder transactions:\n");
5819 mutex_lock(&binder_procs_lock);
5820 hlist_for_each_entry(proc, &binder_procs, proc_node)
5821 print_binder_proc(m, proc, 0);
5822 mutex_unlock(&binder_procs_lock);
5823
5824 return 0;
5825}
5826
5827static int proc_show(struct seq_file *m, void *unused)
5828{
5829 struct binder_proc *itr;
5830 int pid = (unsigned long)m->private;
5831
5832 mutex_lock(&binder_procs_lock);
5833 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5834 if (itr->pid == pid) {
5835 seq_puts(m, "binder proc state:\n");
5836 print_binder_proc(m, itr, 1);
5837 }
5838 }
5839 mutex_unlock(&binder_procs_lock);
5840
5841 return 0;
5842}
5843
5844static void print_binder_transaction_log_entry(struct seq_file *m,
5845 struct binder_transaction_log_entry *e)
5846{
5847 int debug_id = READ_ONCE(e->debug_id_done);
5848 /*
5849 * read barrier to guarantee debug_id_done read before
5850 * we print the log values
5851 */
5852 smp_rmb();
5853 seq_printf(m,
5854 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5855 e->debug_id, (e->call_type == 2) ? "reply" :
5856 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5857 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5858 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5859 e->return_error, e->return_error_param,
5860 e->return_error_line);
5861 /*
5862 * read-barrier to guarantee read of debug_id_done after
5863 * done printing the fields of the entry
5864 */
5865 smp_rmb();
5866 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5867 "\n" : " (incomplete)\n");
5868}
5869
5870int binder_transaction_log_show(struct seq_file *m, void *unused)
5871{
5872 struct binder_transaction_log *log = m->private;
5873 unsigned int log_cur = atomic_read(&log->cur);
5874 unsigned int count;
5875 unsigned int cur;
5876 int i;
5877
5878 count = log_cur + 1;
5879 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5880 0 : count % ARRAY_SIZE(log->entry);
5881 if (count > ARRAY_SIZE(log->entry) || log->full)
5882 count = ARRAY_SIZE(log->entry);
5883 for (i = 0; i < count; i++) {
5884 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5885
5886 print_binder_transaction_log_entry(m, &log->entry[index]);
5887 }
5888 return 0;
5889}
5890
5891const struct file_operations binder_fops = {
5892 .owner = THIS_MODULE,
5893 .poll = binder_poll,
5894 .unlocked_ioctl = binder_ioctl,
5895 .compat_ioctl = compat_ptr_ioctl,
5896 .mmap = binder_mmap,
5897 .open = binder_open,
5898 .flush = binder_flush,
5899 .release = binder_release,
5900};
5901
5902static int __init init_binder_device(const char *name)
5903{
5904 int ret;
5905 struct binder_device *binder_device;
5906
5907 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5908 if (!binder_device)
5909 return -ENOMEM;
5910
5911 binder_device->miscdev.fops = &binder_fops;
5912 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5913 binder_device->miscdev.name = name;
5914
5915 refcount_set(&binder_device->ref, 1);
5916 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5917 binder_device->context.name = name;
5918 mutex_init(&binder_device->context.context_mgr_node_lock);
5919
5920 ret = misc_register(&binder_device->miscdev);
5921 if (ret < 0) {
5922 kfree(binder_device);
5923 return ret;
5924 }
5925
5926 hlist_add_head(&binder_device->hlist, &binder_devices);
5927
5928 return ret;
5929}
5930
5931static int __init binder_init(void)
5932{
5933 int ret;
5934 char *device_name, *device_tmp;
5935 struct binder_device *device;
5936 struct hlist_node *tmp;
5937 char *device_names = NULL;
5938
5939 ret = binder_alloc_shrinker_init();
5940 if (ret)
5941 return ret;
5942
5943 atomic_set(&binder_transaction_log.cur, ~0U);
5944 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5945
5946 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5947 if (binder_debugfs_dir_entry_root)
5948 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5949 binder_debugfs_dir_entry_root);
5950
5951 if (binder_debugfs_dir_entry_root) {
5952 debugfs_create_file("state",
5953 0444,
5954 binder_debugfs_dir_entry_root,
5955 NULL,
5956 &binder_state_fops);
5957 debugfs_create_file("stats",
5958 0444,
5959 binder_debugfs_dir_entry_root,
5960 NULL,
5961 &binder_stats_fops);
5962 debugfs_create_file("transactions",
5963 0444,
5964 binder_debugfs_dir_entry_root,
5965 NULL,
5966 &binder_transactions_fops);
5967 debugfs_create_file("transaction_log",
5968 0444,
5969 binder_debugfs_dir_entry_root,
5970 &binder_transaction_log,
5971 &binder_transaction_log_fops);
5972 debugfs_create_file("failed_transaction_log",
5973 0444,
5974 binder_debugfs_dir_entry_root,
5975 &binder_transaction_log_failed,
5976 &binder_transaction_log_fops);
5977 }
5978
5979 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
5980 strcmp(binder_devices_param, "") != 0) {
5981 /*
5982 * Copy the module_parameter string, because we don't want to
5983 * tokenize it in-place.
5984 */
5985 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5986 if (!device_names) {
5987 ret = -ENOMEM;
5988 goto err_alloc_device_names_failed;
5989 }
5990
5991 device_tmp = device_names;
5992 while ((device_name = strsep(&device_tmp, ","))) {
5993 ret = init_binder_device(device_name);
5994 if (ret)
5995 goto err_init_binder_device_failed;
5996 }
5997 }
5998
5999 ret = init_binderfs();
6000 if (ret)
6001 goto err_init_binder_device_failed;
6002
6003 return ret;
6004
6005err_init_binder_device_failed:
6006 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6007 misc_deregister(&device->miscdev);
6008 hlist_del(&device->hlist);
6009 kfree(device);
6010 }
6011
6012 kfree(device_names);
6013
6014err_alloc_device_names_failed:
6015 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6016
6017 return ret;
6018}
6019
6020device_initcall(binder_init);
6021
6022#define CREATE_TRACE_POINTS
6023#include "binder_trace.h"
6024
6025MODULE_LICENSE("GPL v2");