Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69
70#include <uapi/linux/android/binder.h>
71
72#include <linux/cacheflush.h>
73
74#include "binder_internal.h"
75#include "binder_trace.h"
76
77static HLIST_HEAD(binder_deferred_list);
78static DEFINE_MUTEX(binder_deferred_lock);
79
80static HLIST_HEAD(binder_devices);
81static HLIST_HEAD(binder_procs);
82static DEFINE_MUTEX(binder_procs_lock);
83
84static HLIST_HEAD(binder_dead_nodes);
85static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87static struct dentry *binder_debugfs_dir_entry_root;
88static struct dentry *binder_debugfs_dir_entry_proc;
89static atomic_t binder_last_id;
90
91static int proc_show(struct seq_file *m, void *unused);
92DEFINE_SHOW_ATTRIBUTE(proc);
93
94#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
95
96enum {
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
112};
113static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118module_param_named(devices, binder_devices_param, charp, 0444);
119
120static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121static int binder_stop_on_user_error;
122
123static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
125{
126 int ret;
127
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
131 return ret;
132}
133module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
135
136static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
137{
138 struct va_format vaf;
139 va_list args;
140
141 if (binder_debug_mask & mask) {
142 va_start(args, format);
143 vaf.va = &args;
144 vaf.fmt = format;
145 pr_info_ratelimited("%pV", &vaf);
146 va_end(args);
147 }
148}
149
150#define binder_txn_error(x...) \
151 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
152
153static __printf(1, 2) void binder_user_error(const char *format, ...)
154{
155 struct va_format vaf;
156 va_list args;
157
158 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
159 va_start(args, format);
160 vaf.va = &args;
161 vaf.fmt = format;
162 pr_info_ratelimited("%pV", &vaf);
163 va_end(args);
164 }
165
166 if (binder_stop_on_user_error)
167 binder_stop_on_user_error = 2;
168}
169
170#define binder_set_extended_error(ee, _id, _command, _param) \
171 do { \
172 (ee)->id = _id; \
173 (ee)->command = _command; \
174 (ee)->param = _param; \
175 } while (0)
176
177#define to_flat_binder_object(hdr) \
178 container_of(hdr, struct flat_binder_object, hdr)
179
180#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
181
182#define to_binder_buffer_object(hdr) \
183 container_of(hdr, struct binder_buffer_object, hdr)
184
185#define to_binder_fd_array_object(hdr) \
186 container_of(hdr, struct binder_fd_array_object, hdr)
187
188static struct binder_stats binder_stats;
189
190static inline void binder_stats_deleted(enum binder_stat_types type)
191{
192 atomic_inc(&binder_stats.obj_deleted[type]);
193}
194
195static inline void binder_stats_created(enum binder_stat_types type)
196{
197 atomic_inc(&binder_stats.obj_created[type]);
198}
199
200struct binder_transaction_log_entry {
201 int debug_id;
202 int debug_id_done;
203 int call_type;
204 int from_proc;
205 int from_thread;
206 int target_handle;
207 int to_proc;
208 int to_thread;
209 int to_node;
210 int data_size;
211 int offsets_size;
212 int return_error_line;
213 uint32_t return_error;
214 uint32_t return_error_param;
215 char context_name[BINDERFS_MAX_NAME + 1];
216};
217
218struct binder_transaction_log {
219 atomic_t cur;
220 bool full;
221 struct binder_transaction_log_entry entry[32];
222};
223
224static struct binder_transaction_log binder_transaction_log;
225static struct binder_transaction_log binder_transaction_log_failed;
226
227static struct binder_transaction_log_entry *binder_transaction_log_add(
228 struct binder_transaction_log *log)
229{
230 struct binder_transaction_log_entry *e;
231 unsigned int cur = atomic_inc_return(&log->cur);
232
233 if (cur >= ARRAY_SIZE(log->entry))
234 log->full = true;
235 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
236 WRITE_ONCE(e->debug_id_done, 0);
237 /*
238 * write-barrier to synchronize access to e->debug_id_done.
239 * We make sure the initialized 0 value is seen before
240 * memset() other fields are zeroed by memset.
241 */
242 smp_wmb();
243 memset(e, 0, sizeof(*e));
244 return e;
245}
246
247enum binder_deferred_state {
248 BINDER_DEFERRED_FLUSH = 0x01,
249 BINDER_DEFERRED_RELEASE = 0x02,
250};
251
252enum {
253 BINDER_LOOPER_STATE_REGISTERED = 0x01,
254 BINDER_LOOPER_STATE_ENTERED = 0x02,
255 BINDER_LOOPER_STATE_EXITED = 0x04,
256 BINDER_LOOPER_STATE_INVALID = 0x08,
257 BINDER_LOOPER_STATE_WAITING = 0x10,
258 BINDER_LOOPER_STATE_POLL = 0x20,
259};
260
261/**
262 * binder_proc_lock() - Acquire outer lock for given binder_proc
263 * @proc: struct binder_proc to acquire
264 *
265 * Acquires proc->outer_lock. Used to protect binder_ref
266 * structures associated with the given proc.
267 */
268#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
269static void
270_binder_proc_lock(struct binder_proc *proc, int line)
271 __acquires(&proc->outer_lock)
272{
273 binder_debug(BINDER_DEBUG_SPINLOCKS,
274 "%s: line=%d\n", __func__, line);
275 spin_lock(&proc->outer_lock);
276}
277
278/**
279 * binder_proc_unlock() - Release spinlock for given binder_proc
280 * @proc: struct binder_proc to acquire
281 *
282 * Release lock acquired via binder_proc_lock()
283 */
284#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
285static void
286_binder_proc_unlock(struct binder_proc *proc, int line)
287 __releases(&proc->outer_lock)
288{
289 binder_debug(BINDER_DEBUG_SPINLOCKS,
290 "%s: line=%d\n", __func__, line);
291 spin_unlock(&proc->outer_lock);
292}
293
294/**
295 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
296 * @proc: struct binder_proc to acquire
297 *
298 * Acquires proc->inner_lock. Used to protect todo lists
299 */
300#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
301static void
302_binder_inner_proc_lock(struct binder_proc *proc, int line)
303 __acquires(&proc->inner_lock)
304{
305 binder_debug(BINDER_DEBUG_SPINLOCKS,
306 "%s: line=%d\n", __func__, line);
307 spin_lock(&proc->inner_lock);
308}
309
310/**
311 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
312 * @proc: struct binder_proc to acquire
313 *
314 * Release lock acquired via binder_inner_proc_lock()
315 */
316#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
317static void
318_binder_inner_proc_unlock(struct binder_proc *proc, int line)
319 __releases(&proc->inner_lock)
320{
321 binder_debug(BINDER_DEBUG_SPINLOCKS,
322 "%s: line=%d\n", __func__, line);
323 spin_unlock(&proc->inner_lock);
324}
325
326/**
327 * binder_node_lock() - Acquire spinlock for given binder_node
328 * @node: struct binder_node to acquire
329 *
330 * Acquires node->lock. Used to protect binder_node fields
331 */
332#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
333static void
334_binder_node_lock(struct binder_node *node, int line)
335 __acquires(&node->lock)
336{
337 binder_debug(BINDER_DEBUG_SPINLOCKS,
338 "%s: line=%d\n", __func__, line);
339 spin_lock(&node->lock);
340}
341
342/**
343 * binder_node_unlock() - Release spinlock for given binder_proc
344 * @node: struct binder_node to acquire
345 *
346 * Release lock acquired via binder_node_lock()
347 */
348#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
349static void
350_binder_node_unlock(struct binder_node *node, int line)
351 __releases(&node->lock)
352{
353 binder_debug(BINDER_DEBUG_SPINLOCKS,
354 "%s: line=%d\n", __func__, line);
355 spin_unlock(&node->lock);
356}
357
358/**
359 * binder_node_inner_lock() - Acquire node and inner locks
360 * @node: struct binder_node to acquire
361 *
362 * Acquires node->lock. If node->proc also acquires
363 * proc->inner_lock. Used to protect binder_node fields
364 */
365#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
366static void
367_binder_node_inner_lock(struct binder_node *node, int line)
368 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
369{
370 binder_debug(BINDER_DEBUG_SPINLOCKS,
371 "%s: line=%d\n", __func__, line);
372 spin_lock(&node->lock);
373 if (node->proc)
374 binder_inner_proc_lock(node->proc);
375 else
376 /* annotation for sparse */
377 __acquire(&node->proc->inner_lock);
378}
379
380/**
381 * binder_node_unlock() - Release node and inner locks
382 * @node: struct binder_node to acquire
383 *
384 * Release lock acquired via binder_node_lock()
385 */
386#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
387static void
388_binder_node_inner_unlock(struct binder_node *node, int line)
389 __releases(&node->lock) __releases(&node->proc->inner_lock)
390{
391 struct binder_proc *proc = node->proc;
392
393 binder_debug(BINDER_DEBUG_SPINLOCKS,
394 "%s: line=%d\n", __func__, line);
395 if (proc)
396 binder_inner_proc_unlock(proc);
397 else
398 /* annotation for sparse */
399 __release(&node->proc->inner_lock);
400 spin_unlock(&node->lock);
401}
402
403static bool binder_worklist_empty_ilocked(struct list_head *list)
404{
405 return list_empty(list);
406}
407
408/**
409 * binder_worklist_empty() - Check if no items on the work list
410 * @proc: binder_proc associated with list
411 * @list: list to check
412 *
413 * Return: true if there are no items on list, else false
414 */
415static bool binder_worklist_empty(struct binder_proc *proc,
416 struct list_head *list)
417{
418 bool ret;
419
420 binder_inner_proc_lock(proc);
421 ret = binder_worklist_empty_ilocked(list);
422 binder_inner_proc_unlock(proc);
423 return ret;
424}
425
426/**
427 * binder_enqueue_work_ilocked() - Add an item to the work list
428 * @work: struct binder_work to add to list
429 * @target_list: list to add work to
430 *
431 * Adds the work to the specified list. Asserts that work
432 * is not already on a list.
433 *
434 * Requires the proc->inner_lock to be held.
435 */
436static void
437binder_enqueue_work_ilocked(struct binder_work *work,
438 struct list_head *target_list)
439{
440 BUG_ON(target_list == NULL);
441 BUG_ON(work->entry.next && !list_empty(&work->entry));
442 list_add_tail(&work->entry, target_list);
443}
444
445/**
446 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
447 * @thread: thread to queue work to
448 * @work: struct binder_work to add to list
449 *
450 * Adds the work to the todo list of the thread. Doesn't set the process_todo
451 * flag, which means that (if it wasn't already set) the thread will go to
452 * sleep without handling this work when it calls read.
453 *
454 * Requires the proc->inner_lock to be held.
455 */
456static void
457binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
458 struct binder_work *work)
459{
460 WARN_ON(!list_empty(&thread->waiting_thread_node));
461 binder_enqueue_work_ilocked(work, &thread->todo);
462}
463
464/**
465 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
466 * @thread: thread to queue work to
467 * @work: struct binder_work to add to list
468 *
469 * Adds the work to the todo list of the thread, and enables processing
470 * of the todo queue.
471 *
472 * Requires the proc->inner_lock to be held.
473 */
474static void
475binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
476 struct binder_work *work)
477{
478 WARN_ON(!list_empty(&thread->waiting_thread_node));
479 binder_enqueue_work_ilocked(work, &thread->todo);
480 thread->process_todo = true;
481}
482
483/**
484 * binder_enqueue_thread_work() - Add an item to the thread work list
485 * @thread: thread to queue work to
486 * @work: struct binder_work to add to list
487 *
488 * Adds the work to the todo list of the thread, and enables processing
489 * of the todo queue.
490 */
491static void
492binder_enqueue_thread_work(struct binder_thread *thread,
493 struct binder_work *work)
494{
495 binder_inner_proc_lock(thread->proc);
496 binder_enqueue_thread_work_ilocked(thread, work);
497 binder_inner_proc_unlock(thread->proc);
498}
499
500static void
501binder_dequeue_work_ilocked(struct binder_work *work)
502{
503 list_del_init(&work->entry);
504}
505
506/**
507 * binder_dequeue_work() - Removes an item from the work list
508 * @proc: binder_proc associated with list
509 * @work: struct binder_work to remove from list
510 *
511 * Removes the specified work item from whatever list it is on.
512 * Can safely be called if work is not on any list.
513 */
514static void
515binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
516{
517 binder_inner_proc_lock(proc);
518 binder_dequeue_work_ilocked(work);
519 binder_inner_proc_unlock(proc);
520}
521
522static struct binder_work *binder_dequeue_work_head_ilocked(
523 struct list_head *list)
524{
525 struct binder_work *w;
526
527 w = list_first_entry_or_null(list, struct binder_work, entry);
528 if (w)
529 list_del_init(&w->entry);
530 return w;
531}
532
533static void
534binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
535static void binder_free_thread(struct binder_thread *thread);
536static void binder_free_proc(struct binder_proc *proc);
537static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
538
539static bool binder_has_work_ilocked(struct binder_thread *thread,
540 bool do_proc_work)
541{
542 return thread->process_todo ||
543 thread->looper_need_return ||
544 (do_proc_work &&
545 !binder_worklist_empty_ilocked(&thread->proc->todo));
546}
547
548static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
549{
550 bool has_work;
551
552 binder_inner_proc_lock(thread->proc);
553 has_work = binder_has_work_ilocked(thread, do_proc_work);
554 binder_inner_proc_unlock(thread->proc);
555
556 return has_work;
557}
558
559static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
560{
561 return !thread->transaction_stack &&
562 binder_worklist_empty_ilocked(&thread->todo) &&
563 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
564 BINDER_LOOPER_STATE_REGISTERED));
565}
566
567static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
568 bool sync)
569{
570 struct rb_node *n;
571 struct binder_thread *thread;
572
573 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
574 thread = rb_entry(n, struct binder_thread, rb_node);
575 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
576 binder_available_for_proc_work_ilocked(thread)) {
577 if (sync)
578 wake_up_interruptible_sync(&thread->wait);
579 else
580 wake_up_interruptible(&thread->wait);
581 }
582 }
583}
584
585/**
586 * binder_select_thread_ilocked() - selects a thread for doing proc work.
587 * @proc: process to select a thread from
588 *
589 * Note that calling this function moves the thread off the waiting_threads
590 * list, so it can only be woken up by the caller of this function, or a
591 * signal. Therefore, callers *should* always wake up the thread this function
592 * returns.
593 *
594 * Return: If there's a thread currently waiting for process work,
595 * returns that thread. Otherwise returns NULL.
596 */
597static struct binder_thread *
598binder_select_thread_ilocked(struct binder_proc *proc)
599{
600 struct binder_thread *thread;
601
602 assert_spin_locked(&proc->inner_lock);
603 thread = list_first_entry_or_null(&proc->waiting_threads,
604 struct binder_thread,
605 waiting_thread_node);
606
607 if (thread)
608 list_del_init(&thread->waiting_thread_node);
609
610 return thread;
611}
612
613/**
614 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
615 * @proc: process to wake up a thread in
616 * @thread: specific thread to wake-up (may be NULL)
617 * @sync: whether to do a synchronous wake-up
618 *
619 * This function wakes up a thread in the @proc process.
620 * The caller may provide a specific thread to wake-up in
621 * the @thread parameter. If @thread is NULL, this function
622 * will wake up threads that have called poll().
623 *
624 * Note that for this function to work as expected, callers
625 * should first call binder_select_thread() to find a thread
626 * to handle the work (if they don't have a thread already),
627 * and pass the result into the @thread parameter.
628 */
629static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
630 struct binder_thread *thread,
631 bool sync)
632{
633 assert_spin_locked(&proc->inner_lock);
634
635 if (thread) {
636 if (sync)
637 wake_up_interruptible_sync(&thread->wait);
638 else
639 wake_up_interruptible(&thread->wait);
640 return;
641 }
642
643 /* Didn't find a thread waiting for proc work; this can happen
644 * in two scenarios:
645 * 1. All threads are busy handling transactions
646 * In that case, one of those threads should call back into
647 * the kernel driver soon and pick up this work.
648 * 2. Threads are using the (e)poll interface, in which case
649 * they may be blocked on the waitqueue without having been
650 * added to waiting_threads. For this case, we just iterate
651 * over all threads not handling transaction work, and
652 * wake them all up. We wake all because we don't know whether
653 * a thread that called into (e)poll is handling non-binder
654 * work currently.
655 */
656 binder_wakeup_poll_threads_ilocked(proc, sync);
657}
658
659static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
660{
661 struct binder_thread *thread = binder_select_thread_ilocked(proc);
662
663 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
664}
665
666static void binder_set_nice(long nice)
667{
668 long min_nice;
669
670 if (can_nice(current, nice)) {
671 set_user_nice(current, nice);
672 return;
673 }
674 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
675 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
676 "%d: nice value %ld not allowed use %ld instead\n",
677 current->pid, nice, min_nice);
678 set_user_nice(current, min_nice);
679 if (min_nice <= MAX_NICE)
680 return;
681 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
682}
683
684static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
685 binder_uintptr_t ptr)
686{
687 struct rb_node *n = proc->nodes.rb_node;
688 struct binder_node *node;
689
690 assert_spin_locked(&proc->inner_lock);
691
692 while (n) {
693 node = rb_entry(n, struct binder_node, rb_node);
694
695 if (ptr < node->ptr)
696 n = n->rb_left;
697 else if (ptr > node->ptr)
698 n = n->rb_right;
699 else {
700 /*
701 * take an implicit weak reference
702 * to ensure node stays alive until
703 * call to binder_put_node()
704 */
705 binder_inc_node_tmpref_ilocked(node);
706 return node;
707 }
708 }
709 return NULL;
710}
711
712static struct binder_node *binder_get_node(struct binder_proc *proc,
713 binder_uintptr_t ptr)
714{
715 struct binder_node *node;
716
717 binder_inner_proc_lock(proc);
718 node = binder_get_node_ilocked(proc, ptr);
719 binder_inner_proc_unlock(proc);
720 return node;
721}
722
723static struct binder_node *binder_init_node_ilocked(
724 struct binder_proc *proc,
725 struct binder_node *new_node,
726 struct flat_binder_object *fp)
727{
728 struct rb_node **p = &proc->nodes.rb_node;
729 struct rb_node *parent = NULL;
730 struct binder_node *node;
731 binder_uintptr_t ptr = fp ? fp->binder : 0;
732 binder_uintptr_t cookie = fp ? fp->cookie : 0;
733 __u32 flags = fp ? fp->flags : 0;
734
735 assert_spin_locked(&proc->inner_lock);
736
737 while (*p) {
738
739 parent = *p;
740 node = rb_entry(parent, struct binder_node, rb_node);
741
742 if (ptr < node->ptr)
743 p = &(*p)->rb_left;
744 else if (ptr > node->ptr)
745 p = &(*p)->rb_right;
746 else {
747 /*
748 * A matching node is already in
749 * the rb tree. Abandon the init
750 * and return it.
751 */
752 binder_inc_node_tmpref_ilocked(node);
753 return node;
754 }
755 }
756 node = new_node;
757 binder_stats_created(BINDER_STAT_NODE);
758 node->tmp_refs++;
759 rb_link_node(&node->rb_node, parent, p);
760 rb_insert_color(&node->rb_node, &proc->nodes);
761 node->debug_id = atomic_inc_return(&binder_last_id);
762 node->proc = proc;
763 node->ptr = ptr;
764 node->cookie = cookie;
765 node->work.type = BINDER_WORK_NODE;
766 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
767 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
768 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
769 spin_lock_init(&node->lock);
770 INIT_LIST_HEAD(&node->work.entry);
771 INIT_LIST_HEAD(&node->async_todo);
772 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
773 "%d:%d node %d u%016llx c%016llx created\n",
774 proc->pid, current->pid, node->debug_id,
775 (u64)node->ptr, (u64)node->cookie);
776
777 return node;
778}
779
780static struct binder_node *binder_new_node(struct binder_proc *proc,
781 struct flat_binder_object *fp)
782{
783 struct binder_node *node;
784 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
785
786 if (!new_node)
787 return NULL;
788 binder_inner_proc_lock(proc);
789 node = binder_init_node_ilocked(proc, new_node, fp);
790 binder_inner_proc_unlock(proc);
791 if (node != new_node)
792 /*
793 * The node was already added by another thread
794 */
795 kfree(new_node);
796
797 return node;
798}
799
800static void binder_free_node(struct binder_node *node)
801{
802 kfree(node);
803 binder_stats_deleted(BINDER_STAT_NODE);
804}
805
806static int binder_inc_node_nilocked(struct binder_node *node, int strong,
807 int internal,
808 struct list_head *target_list)
809{
810 struct binder_proc *proc = node->proc;
811
812 assert_spin_locked(&node->lock);
813 if (proc)
814 assert_spin_locked(&proc->inner_lock);
815 if (strong) {
816 if (internal) {
817 if (target_list == NULL &&
818 node->internal_strong_refs == 0 &&
819 !(node->proc &&
820 node == node->proc->context->binder_context_mgr_node &&
821 node->has_strong_ref)) {
822 pr_err("invalid inc strong node for %d\n",
823 node->debug_id);
824 return -EINVAL;
825 }
826 node->internal_strong_refs++;
827 } else
828 node->local_strong_refs++;
829 if (!node->has_strong_ref && target_list) {
830 struct binder_thread *thread = container_of(target_list,
831 struct binder_thread, todo);
832 binder_dequeue_work_ilocked(&node->work);
833 BUG_ON(&thread->todo != target_list);
834 binder_enqueue_deferred_thread_work_ilocked(thread,
835 &node->work);
836 }
837 } else {
838 if (!internal)
839 node->local_weak_refs++;
840 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
841 if (target_list == NULL) {
842 pr_err("invalid inc weak node for %d\n",
843 node->debug_id);
844 return -EINVAL;
845 }
846 /*
847 * See comment above
848 */
849 binder_enqueue_work_ilocked(&node->work, target_list);
850 }
851 }
852 return 0;
853}
854
855static int binder_inc_node(struct binder_node *node, int strong, int internal,
856 struct list_head *target_list)
857{
858 int ret;
859
860 binder_node_inner_lock(node);
861 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
862 binder_node_inner_unlock(node);
863
864 return ret;
865}
866
867static bool binder_dec_node_nilocked(struct binder_node *node,
868 int strong, int internal)
869{
870 struct binder_proc *proc = node->proc;
871
872 assert_spin_locked(&node->lock);
873 if (proc)
874 assert_spin_locked(&proc->inner_lock);
875 if (strong) {
876 if (internal)
877 node->internal_strong_refs--;
878 else
879 node->local_strong_refs--;
880 if (node->local_strong_refs || node->internal_strong_refs)
881 return false;
882 } else {
883 if (!internal)
884 node->local_weak_refs--;
885 if (node->local_weak_refs || node->tmp_refs ||
886 !hlist_empty(&node->refs))
887 return false;
888 }
889
890 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
891 if (list_empty(&node->work.entry)) {
892 binder_enqueue_work_ilocked(&node->work, &proc->todo);
893 binder_wakeup_proc_ilocked(proc);
894 }
895 } else {
896 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
897 !node->local_weak_refs && !node->tmp_refs) {
898 if (proc) {
899 binder_dequeue_work_ilocked(&node->work);
900 rb_erase(&node->rb_node, &proc->nodes);
901 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
902 "refless node %d deleted\n",
903 node->debug_id);
904 } else {
905 BUG_ON(!list_empty(&node->work.entry));
906 spin_lock(&binder_dead_nodes_lock);
907 /*
908 * tmp_refs could have changed so
909 * check it again
910 */
911 if (node->tmp_refs) {
912 spin_unlock(&binder_dead_nodes_lock);
913 return false;
914 }
915 hlist_del(&node->dead_node);
916 spin_unlock(&binder_dead_nodes_lock);
917 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
918 "dead node %d deleted\n",
919 node->debug_id);
920 }
921 return true;
922 }
923 }
924 return false;
925}
926
927static void binder_dec_node(struct binder_node *node, int strong, int internal)
928{
929 bool free_node;
930
931 binder_node_inner_lock(node);
932 free_node = binder_dec_node_nilocked(node, strong, internal);
933 binder_node_inner_unlock(node);
934 if (free_node)
935 binder_free_node(node);
936}
937
938static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
939{
940 /*
941 * No call to binder_inc_node() is needed since we
942 * don't need to inform userspace of any changes to
943 * tmp_refs
944 */
945 node->tmp_refs++;
946}
947
948/**
949 * binder_inc_node_tmpref() - take a temporary reference on node
950 * @node: node to reference
951 *
952 * Take reference on node to prevent the node from being freed
953 * while referenced only by a local variable. The inner lock is
954 * needed to serialize with the node work on the queue (which
955 * isn't needed after the node is dead). If the node is dead
956 * (node->proc is NULL), use binder_dead_nodes_lock to protect
957 * node->tmp_refs against dead-node-only cases where the node
958 * lock cannot be acquired (eg traversing the dead node list to
959 * print nodes)
960 */
961static void binder_inc_node_tmpref(struct binder_node *node)
962{
963 binder_node_lock(node);
964 if (node->proc)
965 binder_inner_proc_lock(node->proc);
966 else
967 spin_lock(&binder_dead_nodes_lock);
968 binder_inc_node_tmpref_ilocked(node);
969 if (node->proc)
970 binder_inner_proc_unlock(node->proc);
971 else
972 spin_unlock(&binder_dead_nodes_lock);
973 binder_node_unlock(node);
974}
975
976/**
977 * binder_dec_node_tmpref() - remove a temporary reference on node
978 * @node: node to reference
979 *
980 * Release temporary reference on node taken via binder_inc_node_tmpref()
981 */
982static void binder_dec_node_tmpref(struct binder_node *node)
983{
984 bool free_node;
985
986 binder_node_inner_lock(node);
987 if (!node->proc)
988 spin_lock(&binder_dead_nodes_lock);
989 else
990 __acquire(&binder_dead_nodes_lock);
991 node->tmp_refs--;
992 BUG_ON(node->tmp_refs < 0);
993 if (!node->proc)
994 spin_unlock(&binder_dead_nodes_lock);
995 else
996 __release(&binder_dead_nodes_lock);
997 /*
998 * Call binder_dec_node() to check if all refcounts are 0
999 * and cleanup is needed. Calling with strong=0 and internal=1
1000 * causes no actual reference to be released in binder_dec_node().
1001 * If that changes, a change is needed here too.
1002 */
1003 free_node = binder_dec_node_nilocked(node, 0, 1);
1004 binder_node_inner_unlock(node);
1005 if (free_node)
1006 binder_free_node(node);
1007}
1008
1009static void binder_put_node(struct binder_node *node)
1010{
1011 binder_dec_node_tmpref(node);
1012}
1013
1014static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1015 u32 desc, bool need_strong_ref)
1016{
1017 struct rb_node *n = proc->refs_by_desc.rb_node;
1018 struct binder_ref *ref;
1019
1020 while (n) {
1021 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1022
1023 if (desc < ref->data.desc) {
1024 n = n->rb_left;
1025 } else if (desc > ref->data.desc) {
1026 n = n->rb_right;
1027 } else if (need_strong_ref && !ref->data.strong) {
1028 binder_user_error("tried to use weak ref as strong ref\n");
1029 return NULL;
1030 } else {
1031 return ref;
1032 }
1033 }
1034 return NULL;
1035}
1036
1037/**
1038 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1039 * @proc: binder_proc that owns the ref
1040 * @node: binder_node of target
1041 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1042 *
1043 * Look up the ref for the given node and return it if it exists
1044 *
1045 * If it doesn't exist and the caller provides a newly allocated
1046 * ref, initialize the fields of the newly allocated ref and insert
1047 * into the given proc rb_trees and node refs list.
1048 *
1049 * Return: the ref for node. It is possible that another thread
1050 * allocated/initialized the ref first in which case the
1051 * returned ref would be different than the passed-in
1052 * new_ref. new_ref must be kfree'd by the caller in
1053 * this case.
1054 */
1055static struct binder_ref *binder_get_ref_for_node_olocked(
1056 struct binder_proc *proc,
1057 struct binder_node *node,
1058 struct binder_ref *new_ref)
1059{
1060 struct binder_context *context = proc->context;
1061 struct rb_node **p = &proc->refs_by_node.rb_node;
1062 struct rb_node *parent = NULL;
1063 struct binder_ref *ref;
1064 struct rb_node *n;
1065
1066 while (*p) {
1067 parent = *p;
1068 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1069
1070 if (node < ref->node)
1071 p = &(*p)->rb_left;
1072 else if (node > ref->node)
1073 p = &(*p)->rb_right;
1074 else
1075 return ref;
1076 }
1077 if (!new_ref)
1078 return NULL;
1079
1080 binder_stats_created(BINDER_STAT_REF);
1081 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1082 new_ref->proc = proc;
1083 new_ref->node = node;
1084 rb_link_node(&new_ref->rb_node_node, parent, p);
1085 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1086
1087 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1088 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1089 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1090 if (ref->data.desc > new_ref->data.desc)
1091 break;
1092 new_ref->data.desc = ref->data.desc + 1;
1093 }
1094
1095 p = &proc->refs_by_desc.rb_node;
1096 while (*p) {
1097 parent = *p;
1098 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1099
1100 if (new_ref->data.desc < ref->data.desc)
1101 p = &(*p)->rb_left;
1102 else if (new_ref->data.desc > ref->data.desc)
1103 p = &(*p)->rb_right;
1104 else
1105 BUG();
1106 }
1107 rb_link_node(&new_ref->rb_node_desc, parent, p);
1108 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1109
1110 binder_node_lock(node);
1111 hlist_add_head(&new_ref->node_entry, &node->refs);
1112
1113 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1114 "%d new ref %d desc %d for node %d\n",
1115 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1116 node->debug_id);
1117 binder_node_unlock(node);
1118 return new_ref;
1119}
1120
1121static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1122{
1123 bool delete_node = false;
1124
1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1126 "%d delete ref %d desc %d for node %d\n",
1127 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1128 ref->node->debug_id);
1129
1130 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1131 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1132
1133 binder_node_inner_lock(ref->node);
1134 if (ref->data.strong)
1135 binder_dec_node_nilocked(ref->node, 1, 1);
1136
1137 hlist_del(&ref->node_entry);
1138 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1139 binder_node_inner_unlock(ref->node);
1140 /*
1141 * Clear ref->node unless we want the caller to free the node
1142 */
1143 if (!delete_node) {
1144 /*
1145 * The caller uses ref->node to determine
1146 * whether the node needs to be freed. Clear
1147 * it since the node is still alive.
1148 */
1149 ref->node = NULL;
1150 }
1151
1152 if (ref->death) {
1153 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1154 "%d delete ref %d desc %d has death notification\n",
1155 ref->proc->pid, ref->data.debug_id,
1156 ref->data.desc);
1157 binder_dequeue_work(ref->proc, &ref->death->work);
1158 binder_stats_deleted(BINDER_STAT_DEATH);
1159 }
1160 binder_stats_deleted(BINDER_STAT_REF);
1161}
1162
1163/**
1164 * binder_inc_ref_olocked() - increment the ref for given handle
1165 * @ref: ref to be incremented
1166 * @strong: if true, strong increment, else weak
1167 * @target_list: list to queue node work on
1168 *
1169 * Increment the ref. @ref->proc->outer_lock must be held on entry
1170 *
1171 * Return: 0, if successful, else errno
1172 */
1173static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1174 struct list_head *target_list)
1175{
1176 int ret;
1177
1178 if (strong) {
1179 if (ref->data.strong == 0) {
1180 ret = binder_inc_node(ref->node, 1, 1, target_list);
1181 if (ret)
1182 return ret;
1183 }
1184 ref->data.strong++;
1185 } else {
1186 if (ref->data.weak == 0) {
1187 ret = binder_inc_node(ref->node, 0, 1, target_list);
1188 if (ret)
1189 return ret;
1190 }
1191 ref->data.weak++;
1192 }
1193 return 0;
1194}
1195
1196/**
1197 * binder_dec_ref() - dec the ref for given handle
1198 * @ref: ref to be decremented
1199 * @strong: if true, strong decrement, else weak
1200 *
1201 * Decrement the ref.
1202 *
1203 * Return: true if ref is cleaned up and ready to be freed
1204 */
1205static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1206{
1207 if (strong) {
1208 if (ref->data.strong == 0) {
1209 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1210 ref->proc->pid, ref->data.debug_id,
1211 ref->data.desc, ref->data.strong,
1212 ref->data.weak);
1213 return false;
1214 }
1215 ref->data.strong--;
1216 if (ref->data.strong == 0)
1217 binder_dec_node(ref->node, strong, 1);
1218 } else {
1219 if (ref->data.weak == 0) {
1220 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1221 ref->proc->pid, ref->data.debug_id,
1222 ref->data.desc, ref->data.strong,
1223 ref->data.weak);
1224 return false;
1225 }
1226 ref->data.weak--;
1227 }
1228 if (ref->data.strong == 0 && ref->data.weak == 0) {
1229 binder_cleanup_ref_olocked(ref);
1230 return true;
1231 }
1232 return false;
1233}
1234
1235/**
1236 * binder_get_node_from_ref() - get the node from the given proc/desc
1237 * @proc: proc containing the ref
1238 * @desc: the handle associated with the ref
1239 * @need_strong_ref: if true, only return node if ref is strong
1240 * @rdata: the id/refcount data for the ref
1241 *
1242 * Given a proc and ref handle, return the associated binder_node
1243 *
1244 * Return: a binder_node or NULL if not found or not strong when strong required
1245 */
1246static struct binder_node *binder_get_node_from_ref(
1247 struct binder_proc *proc,
1248 u32 desc, bool need_strong_ref,
1249 struct binder_ref_data *rdata)
1250{
1251 struct binder_node *node;
1252 struct binder_ref *ref;
1253
1254 binder_proc_lock(proc);
1255 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1256 if (!ref)
1257 goto err_no_ref;
1258 node = ref->node;
1259 /*
1260 * Take an implicit reference on the node to ensure
1261 * it stays alive until the call to binder_put_node()
1262 */
1263 binder_inc_node_tmpref(node);
1264 if (rdata)
1265 *rdata = ref->data;
1266 binder_proc_unlock(proc);
1267
1268 return node;
1269
1270err_no_ref:
1271 binder_proc_unlock(proc);
1272 return NULL;
1273}
1274
1275/**
1276 * binder_free_ref() - free the binder_ref
1277 * @ref: ref to free
1278 *
1279 * Free the binder_ref. Free the binder_node indicated by ref->node
1280 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1281 */
1282static void binder_free_ref(struct binder_ref *ref)
1283{
1284 if (ref->node)
1285 binder_free_node(ref->node);
1286 kfree(ref->death);
1287 kfree(ref);
1288}
1289
1290/**
1291 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1292 * @proc: proc containing the ref
1293 * @desc: the handle associated with the ref
1294 * @increment: true=inc reference, false=dec reference
1295 * @strong: true=strong reference, false=weak reference
1296 * @rdata: the id/refcount data for the ref
1297 *
1298 * Given a proc and ref handle, increment or decrement the ref
1299 * according to "increment" arg.
1300 *
1301 * Return: 0 if successful, else errno
1302 */
1303static int binder_update_ref_for_handle(struct binder_proc *proc,
1304 uint32_t desc, bool increment, bool strong,
1305 struct binder_ref_data *rdata)
1306{
1307 int ret = 0;
1308 struct binder_ref *ref;
1309 bool delete_ref = false;
1310
1311 binder_proc_lock(proc);
1312 ref = binder_get_ref_olocked(proc, desc, strong);
1313 if (!ref) {
1314 ret = -EINVAL;
1315 goto err_no_ref;
1316 }
1317 if (increment)
1318 ret = binder_inc_ref_olocked(ref, strong, NULL);
1319 else
1320 delete_ref = binder_dec_ref_olocked(ref, strong);
1321
1322 if (rdata)
1323 *rdata = ref->data;
1324 binder_proc_unlock(proc);
1325
1326 if (delete_ref)
1327 binder_free_ref(ref);
1328 return ret;
1329
1330err_no_ref:
1331 binder_proc_unlock(proc);
1332 return ret;
1333}
1334
1335/**
1336 * binder_dec_ref_for_handle() - dec the ref for given handle
1337 * @proc: proc containing the ref
1338 * @desc: the handle associated with the ref
1339 * @strong: true=strong reference, false=weak reference
1340 * @rdata: the id/refcount data for the ref
1341 *
1342 * Just calls binder_update_ref_for_handle() to decrement the ref.
1343 *
1344 * Return: 0 if successful, else errno
1345 */
1346static int binder_dec_ref_for_handle(struct binder_proc *proc,
1347 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1348{
1349 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1350}
1351
1352
1353/**
1354 * binder_inc_ref_for_node() - increment the ref for given proc/node
1355 * @proc: proc containing the ref
1356 * @node: target node
1357 * @strong: true=strong reference, false=weak reference
1358 * @target_list: worklist to use if node is incremented
1359 * @rdata: the id/refcount data for the ref
1360 *
1361 * Given a proc and node, increment the ref. Create the ref if it
1362 * doesn't already exist
1363 *
1364 * Return: 0 if successful, else errno
1365 */
1366static int binder_inc_ref_for_node(struct binder_proc *proc,
1367 struct binder_node *node,
1368 bool strong,
1369 struct list_head *target_list,
1370 struct binder_ref_data *rdata)
1371{
1372 struct binder_ref *ref;
1373 struct binder_ref *new_ref = NULL;
1374 int ret = 0;
1375
1376 binder_proc_lock(proc);
1377 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1378 if (!ref) {
1379 binder_proc_unlock(proc);
1380 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1381 if (!new_ref)
1382 return -ENOMEM;
1383 binder_proc_lock(proc);
1384 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1385 }
1386 ret = binder_inc_ref_olocked(ref, strong, target_list);
1387 *rdata = ref->data;
1388 binder_proc_unlock(proc);
1389 if (new_ref && ref != new_ref)
1390 /*
1391 * Another thread created the ref first so
1392 * free the one we allocated
1393 */
1394 kfree(new_ref);
1395 return ret;
1396}
1397
1398static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1399 struct binder_transaction *t)
1400{
1401 BUG_ON(!target_thread);
1402 assert_spin_locked(&target_thread->proc->inner_lock);
1403 BUG_ON(target_thread->transaction_stack != t);
1404 BUG_ON(target_thread->transaction_stack->from != target_thread);
1405 target_thread->transaction_stack =
1406 target_thread->transaction_stack->from_parent;
1407 t->from = NULL;
1408}
1409
1410/**
1411 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1412 * @thread: thread to decrement
1413 *
1414 * A thread needs to be kept alive while being used to create or
1415 * handle a transaction. binder_get_txn_from() is used to safely
1416 * extract t->from from a binder_transaction and keep the thread
1417 * indicated by t->from from being freed. When done with that
1418 * binder_thread, this function is called to decrement the
1419 * tmp_ref and free if appropriate (thread has been released
1420 * and no transaction being processed by the driver)
1421 */
1422static void binder_thread_dec_tmpref(struct binder_thread *thread)
1423{
1424 /*
1425 * atomic is used to protect the counter value while
1426 * it cannot reach zero or thread->is_dead is false
1427 */
1428 binder_inner_proc_lock(thread->proc);
1429 atomic_dec(&thread->tmp_ref);
1430 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1431 binder_inner_proc_unlock(thread->proc);
1432 binder_free_thread(thread);
1433 return;
1434 }
1435 binder_inner_proc_unlock(thread->proc);
1436}
1437
1438/**
1439 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1440 * @proc: proc to decrement
1441 *
1442 * A binder_proc needs to be kept alive while being used to create or
1443 * handle a transaction. proc->tmp_ref is incremented when
1444 * creating a new transaction or the binder_proc is currently in-use
1445 * by threads that are being released. When done with the binder_proc,
1446 * this function is called to decrement the counter and free the
1447 * proc if appropriate (proc has been released, all threads have
1448 * been released and not currenly in-use to process a transaction).
1449 */
1450static void binder_proc_dec_tmpref(struct binder_proc *proc)
1451{
1452 binder_inner_proc_lock(proc);
1453 proc->tmp_ref--;
1454 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1455 !proc->tmp_ref) {
1456 binder_inner_proc_unlock(proc);
1457 binder_free_proc(proc);
1458 return;
1459 }
1460 binder_inner_proc_unlock(proc);
1461}
1462
1463/**
1464 * binder_get_txn_from() - safely extract the "from" thread in transaction
1465 * @t: binder transaction for t->from
1466 *
1467 * Atomically return the "from" thread and increment the tmp_ref
1468 * count for the thread to ensure it stays alive until
1469 * binder_thread_dec_tmpref() is called.
1470 *
1471 * Return: the value of t->from
1472 */
1473static struct binder_thread *binder_get_txn_from(
1474 struct binder_transaction *t)
1475{
1476 struct binder_thread *from;
1477
1478 spin_lock(&t->lock);
1479 from = t->from;
1480 if (from)
1481 atomic_inc(&from->tmp_ref);
1482 spin_unlock(&t->lock);
1483 return from;
1484}
1485
1486/**
1487 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1488 * @t: binder transaction for t->from
1489 *
1490 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1491 * to guarantee that the thread cannot be released while operating on it.
1492 * The caller must call binder_inner_proc_unlock() to release the inner lock
1493 * as well as call binder_dec_thread_txn() to release the reference.
1494 *
1495 * Return: the value of t->from
1496 */
1497static struct binder_thread *binder_get_txn_from_and_acq_inner(
1498 struct binder_transaction *t)
1499 __acquires(&t->from->proc->inner_lock)
1500{
1501 struct binder_thread *from;
1502
1503 from = binder_get_txn_from(t);
1504 if (!from) {
1505 __acquire(&from->proc->inner_lock);
1506 return NULL;
1507 }
1508 binder_inner_proc_lock(from->proc);
1509 if (t->from) {
1510 BUG_ON(from != t->from);
1511 return from;
1512 }
1513 binder_inner_proc_unlock(from->proc);
1514 __acquire(&from->proc->inner_lock);
1515 binder_thread_dec_tmpref(from);
1516 return NULL;
1517}
1518
1519/**
1520 * binder_free_txn_fixups() - free unprocessed fd fixups
1521 * @t: binder transaction for t->from
1522 *
1523 * If the transaction is being torn down prior to being
1524 * processed by the target process, free all of the
1525 * fd fixups and fput the file structs. It is safe to
1526 * call this function after the fixups have been
1527 * processed -- in that case, the list will be empty.
1528 */
1529static void binder_free_txn_fixups(struct binder_transaction *t)
1530{
1531 struct binder_txn_fd_fixup *fixup, *tmp;
1532
1533 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1534 fput(fixup->file);
1535 if (fixup->target_fd >= 0)
1536 put_unused_fd(fixup->target_fd);
1537 list_del(&fixup->fixup_entry);
1538 kfree(fixup);
1539 }
1540}
1541
1542static void binder_txn_latency_free(struct binder_transaction *t)
1543{
1544 int from_proc, from_thread, to_proc, to_thread;
1545
1546 spin_lock(&t->lock);
1547 from_proc = t->from ? t->from->proc->pid : 0;
1548 from_thread = t->from ? t->from->pid : 0;
1549 to_proc = t->to_proc ? t->to_proc->pid : 0;
1550 to_thread = t->to_thread ? t->to_thread->pid : 0;
1551 spin_unlock(&t->lock);
1552
1553 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1554}
1555
1556static void binder_free_transaction(struct binder_transaction *t)
1557{
1558 struct binder_proc *target_proc = t->to_proc;
1559
1560 if (target_proc) {
1561 binder_inner_proc_lock(target_proc);
1562 target_proc->outstanding_txns--;
1563 if (target_proc->outstanding_txns < 0)
1564 pr_warn("%s: Unexpected outstanding_txns %d\n",
1565 __func__, target_proc->outstanding_txns);
1566 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1567 wake_up_interruptible_all(&target_proc->freeze_wait);
1568 if (t->buffer)
1569 t->buffer->transaction = NULL;
1570 binder_inner_proc_unlock(target_proc);
1571 }
1572 if (trace_binder_txn_latency_free_enabled())
1573 binder_txn_latency_free(t);
1574 /*
1575 * If the transaction has no target_proc, then
1576 * t->buffer->transaction has already been cleared.
1577 */
1578 binder_free_txn_fixups(t);
1579 kfree(t);
1580 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1581}
1582
1583static void binder_send_failed_reply(struct binder_transaction *t,
1584 uint32_t error_code)
1585{
1586 struct binder_thread *target_thread;
1587 struct binder_transaction *next;
1588
1589 BUG_ON(t->flags & TF_ONE_WAY);
1590 while (1) {
1591 target_thread = binder_get_txn_from_and_acq_inner(t);
1592 if (target_thread) {
1593 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1594 "send failed reply for transaction %d to %d:%d\n",
1595 t->debug_id,
1596 target_thread->proc->pid,
1597 target_thread->pid);
1598
1599 binder_pop_transaction_ilocked(target_thread, t);
1600 if (target_thread->reply_error.cmd == BR_OK) {
1601 target_thread->reply_error.cmd = error_code;
1602 binder_enqueue_thread_work_ilocked(
1603 target_thread,
1604 &target_thread->reply_error.work);
1605 wake_up_interruptible(&target_thread->wait);
1606 } else {
1607 /*
1608 * Cannot get here for normal operation, but
1609 * we can if multiple synchronous transactions
1610 * are sent without blocking for responses.
1611 * Just ignore the 2nd error in this case.
1612 */
1613 pr_warn("Unexpected reply error: %u\n",
1614 target_thread->reply_error.cmd);
1615 }
1616 binder_inner_proc_unlock(target_thread->proc);
1617 binder_thread_dec_tmpref(target_thread);
1618 binder_free_transaction(t);
1619 return;
1620 }
1621 __release(&target_thread->proc->inner_lock);
1622 next = t->from_parent;
1623
1624 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1625 "send failed reply for transaction %d, target dead\n",
1626 t->debug_id);
1627
1628 binder_free_transaction(t);
1629 if (next == NULL) {
1630 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1631 "reply failed, no target thread at root\n");
1632 return;
1633 }
1634 t = next;
1635 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1636 "reply failed, no target thread -- retry %d\n",
1637 t->debug_id);
1638 }
1639}
1640
1641/**
1642 * binder_cleanup_transaction() - cleans up undelivered transaction
1643 * @t: transaction that needs to be cleaned up
1644 * @reason: reason the transaction wasn't delivered
1645 * @error_code: error to return to caller (if synchronous call)
1646 */
1647static void binder_cleanup_transaction(struct binder_transaction *t,
1648 const char *reason,
1649 uint32_t error_code)
1650{
1651 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1652 binder_send_failed_reply(t, error_code);
1653 } else {
1654 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1655 "undelivered transaction %d, %s\n",
1656 t->debug_id, reason);
1657 binder_free_transaction(t);
1658 }
1659}
1660
1661/**
1662 * binder_get_object() - gets object and checks for valid metadata
1663 * @proc: binder_proc owning the buffer
1664 * @u: sender's user pointer to base of buffer
1665 * @buffer: binder_buffer that we're parsing.
1666 * @offset: offset in the @buffer at which to validate an object.
1667 * @object: struct binder_object to read into
1668 *
1669 * Copy the binder object at the given offset into @object. If @u is
1670 * provided then the copy is from the sender's buffer. If not, then
1671 * it is copied from the target's @buffer.
1672 *
1673 * Return: If there's a valid metadata object at @offset, the
1674 * size of that object. Otherwise, it returns zero. The object
1675 * is read into the struct binder_object pointed to by @object.
1676 */
1677static size_t binder_get_object(struct binder_proc *proc,
1678 const void __user *u,
1679 struct binder_buffer *buffer,
1680 unsigned long offset,
1681 struct binder_object *object)
1682{
1683 size_t read_size;
1684 struct binder_object_header *hdr;
1685 size_t object_size = 0;
1686
1687 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1688 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1689 return 0;
1690 if (u) {
1691 if (copy_from_user(object, u + offset, read_size))
1692 return 0;
1693 } else {
1694 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1695 offset, read_size))
1696 return 0;
1697 }
1698
1699 /* Ok, now see if we read a complete object. */
1700 hdr = &object->hdr;
1701 switch (hdr->type) {
1702 case BINDER_TYPE_BINDER:
1703 case BINDER_TYPE_WEAK_BINDER:
1704 case BINDER_TYPE_HANDLE:
1705 case BINDER_TYPE_WEAK_HANDLE:
1706 object_size = sizeof(struct flat_binder_object);
1707 break;
1708 case BINDER_TYPE_FD:
1709 object_size = sizeof(struct binder_fd_object);
1710 break;
1711 case BINDER_TYPE_PTR:
1712 object_size = sizeof(struct binder_buffer_object);
1713 break;
1714 case BINDER_TYPE_FDA:
1715 object_size = sizeof(struct binder_fd_array_object);
1716 break;
1717 default:
1718 return 0;
1719 }
1720 if (offset <= buffer->data_size - object_size &&
1721 buffer->data_size >= object_size)
1722 return object_size;
1723 else
1724 return 0;
1725}
1726
1727/**
1728 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1729 * @proc: binder_proc owning the buffer
1730 * @b: binder_buffer containing the object
1731 * @object: struct binder_object to read into
1732 * @index: index in offset array at which the binder_buffer_object is
1733 * located
1734 * @start_offset: points to the start of the offset array
1735 * @object_offsetp: offset of @object read from @b
1736 * @num_valid: the number of valid offsets in the offset array
1737 *
1738 * Return: If @index is within the valid range of the offset array
1739 * described by @start and @num_valid, and if there's a valid
1740 * binder_buffer_object at the offset found in index @index
1741 * of the offset array, that object is returned. Otherwise,
1742 * %NULL is returned.
1743 * Note that the offset found in index @index itself is not
1744 * verified; this function assumes that @num_valid elements
1745 * from @start were previously verified to have valid offsets.
1746 * If @object_offsetp is non-NULL, then the offset within
1747 * @b is written to it.
1748 */
1749static struct binder_buffer_object *binder_validate_ptr(
1750 struct binder_proc *proc,
1751 struct binder_buffer *b,
1752 struct binder_object *object,
1753 binder_size_t index,
1754 binder_size_t start_offset,
1755 binder_size_t *object_offsetp,
1756 binder_size_t num_valid)
1757{
1758 size_t object_size;
1759 binder_size_t object_offset;
1760 unsigned long buffer_offset;
1761
1762 if (index >= num_valid)
1763 return NULL;
1764
1765 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1766 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1767 b, buffer_offset,
1768 sizeof(object_offset)))
1769 return NULL;
1770 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1771 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1772 return NULL;
1773 if (object_offsetp)
1774 *object_offsetp = object_offset;
1775
1776 return &object->bbo;
1777}
1778
1779/**
1780 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1781 * @proc: binder_proc owning the buffer
1782 * @b: transaction buffer
1783 * @objects_start_offset: offset to start of objects buffer
1784 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1785 * @fixup_offset: start offset in @buffer to fix up
1786 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1787 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1788 *
1789 * Return: %true if a fixup in buffer @buffer at offset @offset is
1790 * allowed.
1791 *
1792 * For safety reasons, we only allow fixups inside a buffer to happen
1793 * at increasing offsets; additionally, we only allow fixup on the last
1794 * buffer object that was verified, or one of its parents.
1795 *
1796 * Example of what is allowed:
1797 *
1798 * A
1799 * B (parent = A, offset = 0)
1800 * C (parent = A, offset = 16)
1801 * D (parent = C, offset = 0)
1802 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1803 *
1804 * Examples of what is not allowed:
1805 *
1806 * Decreasing offsets within the same parent:
1807 * A
1808 * C (parent = A, offset = 16)
1809 * B (parent = A, offset = 0) // decreasing offset within A
1810 *
1811 * Referring to a parent that wasn't the last object or any of its parents:
1812 * A
1813 * B (parent = A, offset = 0)
1814 * C (parent = A, offset = 0)
1815 * C (parent = A, offset = 16)
1816 * D (parent = B, offset = 0) // B is not A or any of A's parents
1817 */
1818static bool binder_validate_fixup(struct binder_proc *proc,
1819 struct binder_buffer *b,
1820 binder_size_t objects_start_offset,
1821 binder_size_t buffer_obj_offset,
1822 binder_size_t fixup_offset,
1823 binder_size_t last_obj_offset,
1824 binder_size_t last_min_offset)
1825{
1826 if (!last_obj_offset) {
1827 /* Nothing to fix up in */
1828 return false;
1829 }
1830
1831 while (last_obj_offset != buffer_obj_offset) {
1832 unsigned long buffer_offset;
1833 struct binder_object last_object;
1834 struct binder_buffer_object *last_bbo;
1835 size_t object_size = binder_get_object(proc, NULL, b,
1836 last_obj_offset,
1837 &last_object);
1838 if (object_size != sizeof(*last_bbo))
1839 return false;
1840
1841 last_bbo = &last_object.bbo;
1842 /*
1843 * Safe to retrieve the parent of last_obj, since it
1844 * was already previously verified by the driver.
1845 */
1846 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1847 return false;
1848 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1849 buffer_offset = objects_start_offset +
1850 sizeof(binder_size_t) * last_bbo->parent;
1851 if (binder_alloc_copy_from_buffer(&proc->alloc,
1852 &last_obj_offset,
1853 b, buffer_offset,
1854 sizeof(last_obj_offset)))
1855 return false;
1856 }
1857 return (fixup_offset >= last_min_offset);
1858}
1859
1860/**
1861 * struct binder_task_work_cb - for deferred close
1862 *
1863 * @twork: callback_head for task work
1864 * @fd: fd to close
1865 *
1866 * Structure to pass task work to be handled after
1867 * returning from binder_ioctl() via task_work_add().
1868 */
1869struct binder_task_work_cb {
1870 struct callback_head twork;
1871 struct file *file;
1872};
1873
1874/**
1875 * binder_do_fd_close() - close list of file descriptors
1876 * @twork: callback head for task work
1877 *
1878 * It is not safe to call ksys_close() during the binder_ioctl()
1879 * function if there is a chance that binder's own file descriptor
1880 * might be closed. This is to meet the requirements for using
1881 * fdget() (see comments for __fget_light()). Therefore use
1882 * task_work_add() to schedule the close operation once we have
1883 * returned from binder_ioctl(). This function is a callback
1884 * for that mechanism and does the actual ksys_close() on the
1885 * given file descriptor.
1886 */
1887static void binder_do_fd_close(struct callback_head *twork)
1888{
1889 struct binder_task_work_cb *twcb = container_of(twork,
1890 struct binder_task_work_cb, twork);
1891
1892 fput(twcb->file);
1893 kfree(twcb);
1894}
1895
1896/**
1897 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1898 * @fd: file-descriptor to close
1899 *
1900 * See comments in binder_do_fd_close(). This function is used to schedule
1901 * a file-descriptor to be closed after returning from binder_ioctl().
1902 */
1903static void binder_deferred_fd_close(int fd)
1904{
1905 struct binder_task_work_cb *twcb;
1906
1907 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1908 if (!twcb)
1909 return;
1910 init_task_work(&twcb->twork, binder_do_fd_close);
1911 twcb->file = close_fd_get_file(fd);
1912 if (twcb->file) {
1913 // pin it until binder_do_fd_close(); see comments there
1914 get_file(twcb->file);
1915 filp_close(twcb->file, current->files);
1916 task_work_add(current, &twcb->twork, TWA_RESUME);
1917 } else {
1918 kfree(twcb);
1919 }
1920}
1921
1922static void binder_transaction_buffer_release(struct binder_proc *proc,
1923 struct binder_thread *thread,
1924 struct binder_buffer *buffer,
1925 binder_size_t failed_at,
1926 bool is_failure)
1927{
1928 int debug_id = buffer->debug_id;
1929 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1930
1931 binder_debug(BINDER_DEBUG_TRANSACTION,
1932 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1933 proc->pid, buffer->debug_id,
1934 buffer->data_size, buffer->offsets_size,
1935 (unsigned long long)failed_at);
1936
1937 if (buffer->target_node)
1938 binder_dec_node(buffer->target_node, 1, 0);
1939
1940 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1941 off_end_offset = is_failure && failed_at ? failed_at :
1942 off_start_offset + buffer->offsets_size;
1943 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1944 buffer_offset += sizeof(binder_size_t)) {
1945 struct binder_object_header *hdr;
1946 size_t object_size = 0;
1947 struct binder_object object;
1948 binder_size_t object_offset;
1949
1950 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1951 buffer, buffer_offset,
1952 sizeof(object_offset)))
1953 object_size = binder_get_object(proc, NULL, buffer,
1954 object_offset, &object);
1955 if (object_size == 0) {
1956 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1957 debug_id, (u64)object_offset, buffer->data_size);
1958 continue;
1959 }
1960 hdr = &object.hdr;
1961 switch (hdr->type) {
1962 case BINDER_TYPE_BINDER:
1963 case BINDER_TYPE_WEAK_BINDER: {
1964 struct flat_binder_object *fp;
1965 struct binder_node *node;
1966
1967 fp = to_flat_binder_object(hdr);
1968 node = binder_get_node(proc, fp->binder);
1969 if (node == NULL) {
1970 pr_err("transaction release %d bad node %016llx\n",
1971 debug_id, (u64)fp->binder);
1972 break;
1973 }
1974 binder_debug(BINDER_DEBUG_TRANSACTION,
1975 " node %d u%016llx\n",
1976 node->debug_id, (u64)node->ptr);
1977 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1978 0);
1979 binder_put_node(node);
1980 } break;
1981 case BINDER_TYPE_HANDLE:
1982 case BINDER_TYPE_WEAK_HANDLE: {
1983 struct flat_binder_object *fp;
1984 struct binder_ref_data rdata;
1985 int ret;
1986
1987 fp = to_flat_binder_object(hdr);
1988 ret = binder_dec_ref_for_handle(proc, fp->handle,
1989 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1990
1991 if (ret) {
1992 pr_err("transaction release %d bad handle %d, ret = %d\n",
1993 debug_id, fp->handle, ret);
1994 break;
1995 }
1996 binder_debug(BINDER_DEBUG_TRANSACTION,
1997 " ref %d desc %d\n",
1998 rdata.debug_id, rdata.desc);
1999 } break;
2000
2001 case BINDER_TYPE_FD: {
2002 /*
2003 * No need to close the file here since user-space
2004 * closes it for successfully delivered
2005 * transactions. For transactions that weren't
2006 * delivered, the new fd was never allocated so
2007 * there is no need to close and the fput on the
2008 * file is done when the transaction is torn
2009 * down.
2010 */
2011 } break;
2012 case BINDER_TYPE_PTR:
2013 /*
2014 * Nothing to do here, this will get cleaned up when the
2015 * transaction buffer gets freed
2016 */
2017 break;
2018 case BINDER_TYPE_FDA: {
2019 struct binder_fd_array_object *fda;
2020 struct binder_buffer_object *parent;
2021 struct binder_object ptr_object;
2022 binder_size_t fda_offset;
2023 size_t fd_index;
2024 binder_size_t fd_buf_size;
2025 binder_size_t num_valid;
2026
2027 if (is_failure) {
2028 /*
2029 * The fd fixups have not been applied so no
2030 * fds need to be closed.
2031 */
2032 continue;
2033 }
2034
2035 num_valid = (buffer_offset - off_start_offset) /
2036 sizeof(binder_size_t);
2037 fda = to_binder_fd_array_object(hdr);
2038 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2039 fda->parent,
2040 off_start_offset,
2041 NULL,
2042 num_valid);
2043 if (!parent) {
2044 pr_err("transaction release %d bad parent offset\n",
2045 debug_id);
2046 continue;
2047 }
2048 fd_buf_size = sizeof(u32) * fda->num_fds;
2049 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2050 pr_err("transaction release %d invalid number of fds (%lld)\n",
2051 debug_id, (u64)fda->num_fds);
2052 continue;
2053 }
2054 if (fd_buf_size > parent->length ||
2055 fda->parent_offset > parent->length - fd_buf_size) {
2056 /* No space for all file descriptors here. */
2057 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2058 debug_id, (u64)fda->num_fds);
2059 continue;
2060 }
2061 /*
2062 * the source data for binder_buffer_object is visible
2063 * to user-space and the @buffer element is the user
2064 * pointer to the buffer_object containing the fd_array.
2065 * Convert the address to an offset relative to
2066 * the base of the transaction buffer.
2067 */
2068 fda_offset =
2069 (parent->buffer - (uintptr_t)buffer->user_data) +
2070 fda->parent_offset;
2071 for (fd_index = 0; fd_index < fda->num_fds;
2072 fd_index++) {
2073 u32 fd;
2074 int err;
2075 binder_size_t offset = fda_offset +
2076 fd_index * sizeof(fd);
2077
2078 err = binder_alloc_copy_from_buffer(
2079 &proc->alloc, &fd, buffer,
2080 offset, sizeof(fd));
2081 WARN_ON(err);
2082 if (!err) {
2083 binder_deferred_fd_close(fd);
2084 /*
2085 * Need to make sure the thread goes
2086 * back to userspace to complete the
2087 * deferred close
2088 */
2089 if (thread)
2090 thread->looper_need_return = true;
2091 }
2092 }
2093 } break;
2094 default:
2095 pr_err("transaction release %d bad object type %x\n",
2096 debug_id, hdr->type);
2097 break;
2098 }
2099 }
2100}
2101
2102static int binder_translate_binder(struct flat_binder_object *fp,
2103 struct binder_transaction *t,
2104 struct binder_thread *thread)
2105{
2106 struct binder_node *node;
2107 struct binder_proc *proc = thread->proc;
2108 struct binder_proc *target_proc = t->to_proc;
2109 struct binder_ref_data rdata;
2110 int ret = 0;
2111
2112 node = binder_get_node(proc, fp->binder);
2113 if (!node) {
2114 node = binder_new_node(proc, fp);
2115 if (!node)
2116 return -ENOMEM;
2117 }
2118 if (fp->cookie != node->cookie) {
2119 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2120 proc->pid, thread->pid, (u64)fp->binder,
2121 node->debug_id, (u64)fp->cookie,
2122 (u64)node->cookie);
2123 ret = -EINVAL;
2124 goto done;
2125 }
2126 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2127 ret = -EPERM;
2128 goto done;
2129 }
2130
2131 ret = binder_inc_ref_for_node(target_proc, node,
2132 fp->hdr.type == BINDER_TYPE_BINDER,
2133 &thread->todo, &rdata);
2134 if (ret)
2135 goto done;
2136
2137 if (fp->hdr.type == BINDER_TYPE_BINDER)
2138 fp->hdr.type = BINDER_TYPE_HANDLE;
2139 else
2140 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2141 fp->binder = 0;
2142 fp->handle = rdata.desc;
2143 fp->cookie = 0;
2144
2145 trace_binder_transaction_node_to_ref(t, node, &rdata);
2146 binder_debug(BINDER_DEBUG_TRANSACTION,
2147 " node %d u%016llx -> ref %d desc %d\n",
2148 node->debug_id, (u64)node->ptr,
2149 rdata.debug_id, rdata.desc);
2150done:
2151 binder_put_node(node);
2152 return ret;
2153}
2154
2155static int binder_translate_handle(struct flat_binder_object *fp,
2156 struct binder_transaction *t,
2157 struct binder_thread *thread)
2158{
2159 struct binder_proc *proc = thread->proc;
2160 struct binder_proc *target_proc = t->to_proc;
2161 struct binder_node *node;
2162 struct binder_ref_data src_rdata;
2163 int ret = 0;
2164
2165 node = binder_get_node_from_ref(proc, fp->handle,
2166 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2167 if (!node) {
2168 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2169 proc->pid, thread->pid, fp->handle);
2170 return -EINVAL;
2171 }
2172 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2173 ret = -EPERM;
2174 goto done;
2175 }
2176
2177 binder_node_lock(node);
2178 if (node->proc == target_proc) {
2179 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2180 fp->hdr.type = BINDER_TYPE_BINDER;
2181 else
2182 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2183 fp->binder = node->ptr;
2184 fp->cookie = node->cookie;
2185 if (node->proc)
2186 binder_inner_proc_lock(node->proc);
2187 else
2188 __acquire(&node->proc->inner_lock);
2189 binder_inc_node_nilocked(node,
2190 fp->hdr.type == BINDER_TYPE_BINDER,
2191 0, NULL);
2192 if (node->proc)
2193 binder_inner_proc_unlock(node->proc);
2194 else
2195 __release(&node->proc->inner_lock);
2196 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2197 binder_debug(BINDER_DEBUG_TRANSACTION,
2198 " ref %d desc %d -> node %d u%016llx\n",
2199 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2200 (u64)node->ptr);
2201 binder_node_unlock(node);
2202 } else {
2203 struct binder_ref_data dest_rdata;
2204
2205 binder_node_unlock(node);
2206 ret = binder_inc_ref_for_node(target_proc, node,
2207 fp->hdr.type == BINDER_TYPE_HANDLE,
2208 NULL, &dest_rdata);
2209 if (ret)
2210 goto done;
2211
2212 fp->binder = 0;
2213 fp->handle = dest_rdata.desc;
2214 fp->cookie = 0;
2215 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2216 &dest_rdata);
2217 binder_debug(BINDER_DEBUG_TRANSACTION,
2218 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2219 src_rdata.debug_id, src_rdata.desc,
2220 dest_rdata.debug_id, dest_rdata.desc,
2221 node->debug_id);
2222 }
2223done:
2224 binder_put_node(node);
2225 return ret;
2226}
2227
2228static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2229 struct binder_transaction *t,
2230 struct binder_thread *thread,
2231 struct binder_transaction *in_reply_to)
2232{
2233 struct binder_proc *proc = thread->proc;
2234 struct binder_proc *target_proc = t->to_proc;
2235 struct binder_txn_fd_fixup *fixup;
2236 struct file *file;
2237 int ret = 0;
2238 bool target_allows_fd;
2239
2240 if (in_reply_to)
2241 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2242 else
2243 target_allows_fd = t->buffer->target_node->accept_fds;
2244 if (!target_allows_fd) {
2245 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2246 proc->pid, thread->pid,
2247 in_reply_to ? "reply" : "transaction",
2248 fd);
2249 ret = -EPERM;
2250 goto err_fd_not_accepted;
2251 }
2252
2253 file = fget(fd);
2254 if (!file) {
2255 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2256 proc->pid, thread->pid, fd);
2257 ret = -EBADF;
2258 goto err_fget;
2259 }
2260 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2261 if (ret < 0) {
2262 ret = -EPERM;
2263 goto err_security;
2264 }
2265
2266 /*
2267 * Add fixup record for this transaction. The allocation
2268 * of the fd in the target needs to be done from a
2269 * target thread.
2270 */
2271 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2272 if (!fixup) {
2273 ret = -ENOMEM;
2274 goto err_alloc;
2275 }
2276 fixup->file = file;
2277 fixup->offset = fd_offset;
2278 fixup->target_fd = -1;
2279 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2280 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2281
2282 return ret;
2283
2284err_alloc:
2285err_security:
2286 fput(file);
2287err_fget:
2288err_fd_not_accepted:
2289 return ret;
2290}
2291
2292/**
2293 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2294 * @offset offset in target buffer to fixup
2295 * @skip_size bytes to skip in copy (fixup will be written later)
2296 * @fixup_data data to write at fixup offset
2297 * @node list node
2298 *
2299 * This is used for the pointer fixup list (pf) which is created and consumed
2300 * during binder_transaction() and is only accessed locally. No
2301 * locking is necessary.
2302 *
2303 * The list is ordered by @offset.
2304 */
2305struct binder_ptr_fixup {
2306 binder_size_t offset;
2307 size_t skip_size;
2308 binder_uintptr_t fixup_data;
2309 struct list_head node;
2310};
2311
2312/**
2313 * struct binder_sg_copy - scatter-gather data to be copied
2314 * @offset offset in target buffer
2315 * @sender_uaddr user address in source buffer
2316 * @length bytes to copy
2317 * @node list node
2318 *
2319 * This is used for the sg copy list (sgc) which is created and consumed
2320 * during binder_transaction() and is only accessed locally. No
2321 * locking is necessary.
2322 *
2323 * The list is ordered by @offset.
2324 */
2325struct binder_sg_copy {
2326 binder_size_t offset;
2327 const void __user *sender_uaddr;
2328 size_t length;
2329 struct list_head node;
2330};
2331
2332/**
2333 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2334 * @alloc: binder_alloc associated with @buffer
2335 * @buffer: binder buffer in target process
2336 * @sgc_head: list_head of scatter-gather copy list
2337 * @pf_head: list_head of pointer fixup list
2338 *
2339 * Processes all elements of @sgc_head, applying fixups from @pf_head
2340 * and copying the scatter-gather data from the source process' user
2341 * buffer to the target's buffer. It is expected that the list creation
2342 * and processing all occurs during binder_transaction() so these lists
2343 * are only accessed in local context.
2344 *
2345 * Return: 0=success, else -errno
2346 */
2347static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2348 struct binder_buffer *buffer,
2349 struct list_head *sgc_head,
2350 struct list_head *pf_head)
2351{
2352 int ret = 0;
2353 struct binder_sg_copy *sgc, *tmpsgc;
2354 struct binder_ptr_fixup *tmppf;
2355 struct binder_ptr_fixup *pf =
2356 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2357 node);
2358
2359 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2360 size_t bytes_copied = 0;
2361
2362 while (bytes_copied < sgc->length) {
2363 size_t copy_size;
2364 size_t bytes_left = sgc->length - bytes_copied;
2365 size_t offset = sgc->offset + bytes_copied;
2366
2367 /*
2368 * We copy up to the fixup (pointed to by pf)
2369 */
2370 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2371 : bytes_left;
2372 if (!ret && copy_size)
2373 ret = binder_alloc_copy_user_to_buffer(
2374 alloc, buffer,
2375 offset,
2376 sgc->sender_uaddr + bytes_copied,
2377 copy_size);
2378 bytes_copied += copy_size;
2379 if (copy_size != bytes_left) {
2380 BUG_ON(!pf);
2381 /* we stopped at a fixup offset */
2382 if (pf->skip_size) {
2383 /*
2384 * we are just skipping. This is for
2385 * BINDER_TYPE_FDA where the translated
2386 * fds will be fixed up when we get
2387 * to target context.
2388 */
2389 bytes_copied += pf->skip_size;
2390 } else {
2391 /* apply the fixup indicated by pf */
2392 if (!ret)
2393 ret = binder_alloc_copy_to_buffer(
2394 alloc, buffer,
2395 pf->offset,
2396 &pf->fixup_data,
2397 sizeof(pf->fixup_data));
2398 bytes_copied += sizeof(pf->fixup_data);
2399 }
2400 list_del(&pf->node);
2401 kfree(pf);
2402 pf = list_first_entry_or_null(pf_head,
2403 struct binder_ptr_fixup, node);
2404 }
2405 }
2406 list_del(&sgc->node);
2407 kfree(sgc);
2408 }
2409 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2410 BUG_ON(pf->skip_size == 0);
2411 list_del(&pf->node);
2412 kfree(pf);
2413 }
2414 BUG_ON(!list_empty(sgc_head));
2415
2416 return ret > 0 ? -EINVAL : ret;
2417}
2418
2419/**
2420 * binder_cleanup_deferred_txn_lists() - free specified lists
2421 * @sgc_head: list_head of scatter-gather copy list
2422 * @pf_head: list_head of pointer fixup list
2423 *
2424 * Called to clean up @sgc_head and @pf_head if there is an
2425 * error.
2426 */
2427static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2428 struct list_head *pf_head)
2429{
2430 struct binder_sg_copy *sgc, *tmpsgc;
2431 struct binder_ptr_fixup *pf, *tmppf;
2432
2433 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2434 list_del(&sgc->node);
2435 kfree(sgc);
2436 }
2437 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2438 list_del(&pf->node);
2439 kfree(pf);
2440 }
2441}
2442
2443/**
2444 * binder_defer_copy() - queue a scatter-gather buffer for copy
2445 * @sgc_head: list_head of scatter-gather copy list
2446 * @offset: binder buffer offset in target process
2447 * @sender_uaddr: user address in source process
2448 * @length: bytes to copy
2449 *
2450 * Specify a scatter-gather block to be copied. The actual copy must
2451 * be deferred until all the needed fixups are identified and queued.
2452 * Then the copy and fixups are done together so un-translated values
2453 * from the source are never visible in the target buffer.
2454 *
2455 * We are guaranteed that repeated calls to this function will have
2456 * monotonically increasing @offset values so the list will naturally
2457 * be ordered.
2458 *
2459 * Return: 0=success, else -errno
2460 */
2461static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2462 const void __user *sender_uaddr, size_t length)
2463{
2464 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2465
2466 if (!bc)
2467 return -ENOMEM;
2468
2469 bc->offset = offset;
2470 bc->sender_uaddr = sender_uaddr;
2471 bc->length = length;
2472 INIT_LIST_HEAD(&bc->node);
2473
2474 /*
2475 * We are guaranteed that the deferred copies are in-order
2476 * so just add to the tail.
2477 */
2478 list_add_tail(&bc->node, sgc_head);
2479
2480 return 0;
2481}
2482
2483/**
2484 * binder_add_fixup() - queue a fixup to be applied to sg copy
2485 * @pf_head: list_head of binder ptr fixup list
2486 * @offset: binder buffer offset in target process
2487 * @fixup: bytes to be copied for fixup
2488 * @skip_size: bytes to skip when copying (fixup will be applied later)
2489 *
2490 * Add the specified fixup to a list ordered by @offset. When copying
2491 * the scatter-gather buffers, the fixup will be copied instead of
2492 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2493 * will be applied later (in target process context), so we just skip
2494 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2495 * value in @fixup.
2496 *
2497 * This function is called *mostly* in @offset order, but there are
2498 * exceptions. Since out-of-order inserts are relatively uncommon,
2499 * we insert the new element by searching backward from the tail of
2500 * the list.
2501 *
2502 * Return: 0=success, else -errno
2503 */
2504static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2505 binder_uintptr_t fixup, size_t skip_size)
2506{
2507 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2508 struct binder_ptr_fixup *tmppf;
2509
2510 if (!pf)
2511 return -ENOMEM;
2512
2513 pf->offset = offset;
2514 pf->fixup_data = fixup;
2515 pf->skip_size = skip_size;
2516 INIT_LIST_HEAD(&pf->node);
2517
2518 /* Fixups are *mostly* added in-order, but there are some
2519 * exceptions. Look backwards through list for insertion point.
2520 */
2521 list_for_each_entry_reverse(tmppf, pf_head, node) {
2522 if (tmppf->offset < pf->offset) {
2523 list_add(&pf->node, &tmppf->node);
2524 return 0;
2525 }
2526 }
2527 /*
2528 * if we get here, then the new offset is the lowest so
2529 * insert at the head
2530 */
2531 list_add(&pf->node, pf_head);
2532 return 0;
2533}
2534
2535static int binder_translate_fd_array(struct list_head *pf_head,
2536 struct binder_fd_array_object *fda,
2537 const void __user *sender_ubuffer,
2538 struct binder_buffer_object *parent,
2539 struct binder_buffer_object *sender_uparent,
2540 struct binder_transaction *t,
2541 struct binder_thread *thread,
2542 struct binder_transaction *in_reply_to)
2543{
2544 binder_size_t fdi, fd_buf_size;
2545 binder_size_t fda_offset;
2546 const void __user *sender_ufda_base;
2547 struct binder_proc *proc = thread->proc;
2548 int ret;
2549
2550 if (fda->num_fds == 0)
2551 return 0;
2552
2553 fd_buf_size = sizeof(u32) * fda->num_fds;
2554 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2555 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2556 proc->pid, thread->pid, (u64)fda->num_fds);
2557 return -EINVAL;
2558 }
2559 if (fd_buf_size > parent->length ||
2560 fda->parent_offset > parent->length - fd_buf_size) {
2561 /* No space for all file descriptors here. */
2562 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2563 proc->pid, thread->pid, (u64)fda->num_fds);
2564 return -EINVAL;
2565 }
2566 /*
2567 * the source data for binder_buffer_object is visible
2568 * to user-space and the @buffer element is the user
2569 * pointer to the buffer_object containing the fd_array.
2570 * Convert the address to an offset relative to
2571 * the base of the transaction buffer.
2572 */
2573 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2574 fda->parent_offset;
2575 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2576 fda->parent_offset;
2577
2578 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2579 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2580 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2581 proc->pid, thread->pid);
2582 return -EINVAL;
2583 }
2584 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2585 if (ret)
2586 return ret;
2587
2588 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2589 u32 fd;
2590 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2591 binder_size_t sender_uoffset = fdi * sizeof(fd);
2592
2593 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2594 if (!ret)
2595 ret = binder_translate_fd(fd, offset, t, thread,
2596 in_reply_to);
2597 if (ret)
2598 return ret > 0 ? -EINVAL : ret;
2599 }
2600 return 0;
2601}
2602
2603static int binder_fixup_parent(struct list_head *pf_head,
2604 struct binder_transaction *t,
2605 struct binder_thread *thread,
2606 struct binder_buffer_object *bp,
2607 binder_size_t off_start_offset,
2608 binder_size_t num_valid,
2609 binder_size_t last_fixup_obj_off,
2610 binder_size_t last_fixup_min_off)
2611{
2612 struct binder_buffer_object *parent;
2613 struct binder_buffer *b = t->buffer;
2614 struct binder_proc *proc = thread->proc;
2615 struct binder_proc *target_proc = t->to_proc;
2616 struct binder_object object;
2617 binder_size_t buffer_offset;
2618 binder_size_t parent_offset;
2619
2620 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2621 return 0;
2622
2623 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2624 off_start_offset, &parent_offset,
2625 num_valid);
2626 if (!parent) {
2627 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2628 proc->pid, thread->pid);
2629 return -EINVAL;
2630 }
2631
2632 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2633 parent_offset, bp->parent_offset,
2634 last_fixup_obj_off,
2635 last_fixup_min_off)) {
2636 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2637 proc->pid, thread->pid);
2638 return -EINVAL;
2639 }
2640
2641 if (parent->length < sizeof(binder_uintptr_t) ||
2642 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2643 /* No space for a pointer here! */
2644 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2645 proc->pid, thread->pid);
2646 return -EINVAL;
2647 }
2648 buffer_offset = bp->parent_offset +
2649 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2650 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2651}
2652
2653/**
2654 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2655 * @t1: the pending async txn in the frozen process
2656 * @t2: the new async txn to supersede the outdated pending one
2657 *
2658 * Return: true if t2 can supersede t1
2659 * false if t2 can not supersede t1
2660 */
2661static bool binder_can_update_transaction(struct binder_transaction *t1,
2662 struct binder_transaction *t2)
2663{
2664 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2665 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2666 return false;
2667 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2668 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2669 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2670 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2671 return true;
2672 return false;
2673}
2674
2675/**
2676 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2677 * @t: new async transaction
2678 * @target_list: list to find outdated transaction
2679 *
2680 * Return: the outdated transaction if found
2681 * NULL if no outdated transacton can be found
2682 *
2683 * Requires the proc->inner_lock to be held.
2684 */
2685static struct binder_transaction *
2686binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2687 struct list_head *target_list)
2688{
2689 struct binder_work *w;
2690
2691 list_for_each_entry(w, target_list, entry) {
2692 struct binder_transaction *t_queued;
2693
2694 if (w->type != BINDER_WORK_TRANSACTION)
2695 continue;
2696 t_queued = container_of(w, struct binder_transaction, work);
2697 if (binder_can_update_transaction(t_queued, t))
2698 return t_queued;
2699 }
2700 return NULL;
2701}
2702
2703/**
2704 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2705 * @t: transaction to send
2706 * @proc: process to send the transaction to
2707 * @thread: thread in @proc to send the transaction to (may be NULL)
2708 *
2709 * This function queues a transaction to the specified process. It will try
2710 * to find a thread in the target process to handle the transaction and
2711 * wake it up. If no thread is found, the work is queued to the proc
2712 * waitqueue.
2713 *
2714 * If the @thread parameter is not NULL, the transaction is always queued
2715 * to the waitlist of that specific thread.
2716 *
2717 * Return: 0 if the transaction was successfully queued
2718 * BR_DEAD_REPLY if the target process or thread is dead
2719 * BR_FROZEN_REPLY if the target process or thread is frozen
2720 */
2721static int binder_proc_transaction(struct binder_transaction *t,
2722 struct binder_proc *proc,
2723 struct binder_thread *thread)
2724{
2725 struct binder_node *node = t->buffer->target_node;
2726 bool oneway = !!(t->flags & TF_ONE_WAY);
2727 bool pending_async = false;
2728 struct binder_transaction *t_outdated = NULL;
2729
2730 BUG_ON(!node);
2731 binder_node_lock(node);
2732 if (oneway) {
2733 BUG_ON(thread);
2734 if (node->has_async_transaction)
2735 pending_async = true;
2736 else
2737 node->has_async_transaction = true;
2738 }
2739
2740 binder_inner_proc_lock(proc);
2741 if (proc->is_frozen) {
2742 proc->sync_recv |= !oneway;
2743 proc->async_recv |= oneway;
2744 }
2745
2746 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2747 (thread && thread->is_dead)) {
2748 binder_inner_proc_unlock(proc);
2749 binder_node_unlock(node);
2750 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2751 }
2752
2753 if (!thread && !pending_async)
2754 thread = binder_select_thread_ilocked(proc);
2755
2756 if (thread) {
2757 binder_enqueue_thread_work_ilocked(thread, &t->work);
2758 } else if (!pending_async) {
2759 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2760 } else {
2761 if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
2762 t_outdated = binder_find_outdated_transaction_ilocked(t,
2763 &node->async_todo);
2764 if (t_outdated) {
2765 binder_debug(BINDER_DEBUG_TRANSACTION,
2766 "txn %d supersedes %d\n",
2767 t->debug_id, t_outdated->debug_id);
2768 list_del_init(&t_outdated->work.entry);
2769 proc->outstanding_txns--;
2770 }
2771 }
2772 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2773 }
2774
2775 if (!pending_async)
2776 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2777
2778 proc->outstanding_txns++;
2779 binder_inner_proc_unlock(proc);
2780 binder_node_unlock(node);
2781
2782 /*
2783 * To reduce potential contention, free the outdated transaction and
2784 * buffer after releasing the locks.
2785 */
2786 if (t_outdated) {
2787 struct binder_buffer *buffer = t_outdated->buffer;
2788
2789 t_outdated->buffer = NULL;
2790 buffer->transaction = NULL;
2791 trace_binder_transaction_update_buffer_release(buffer);
2792 binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
2793 binder_alloc_free_buf(&proc->alloc, buffer);
2794 kfree(t_outdated);
2795 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2796 }
2797
2798 return 0;
2799}
2800
2801/**
2802 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2803 * @node: struct binder_node for which to get refs
2804 * @proc: returns @node->proc if valid
2805 * @error: if no @proc then returns BR_DEAD_REPLY
2806 *
2807 * User-space normally keeps the node alive when creating a transaction
2808 * since it has a reference to the target. The local strong ref keeps it
2809 * alive if the sending process dies before the target process processes
2810 * the transaction. If the source process is malicious or has a reference
2811 * counting bug, relying on the local strong ref can fail.
2812 *
2813 * Since user-space can cause the local strong ref to go away, we also take
2814 * a tmpref on the node to ensure it survives while we are constructing
2815 * the transaction. We also need a tmpref on the proc while we are
2816 * constructing the transaction, so we take that here as well.
2817 *
2818 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2819 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2820 * target proc has died, @error is set to BR_DEAD_REPLY
2821 */
2822static struct binder_node *binder_get_node_refs_for_txn(
2823 struct binder_node *node,
2824 struct binder_proc **procp,
2825 uint32_t *error)
2826{
2827 struct binder_node *target_node = NULL;
2828
2829 binder_node_inner_lock(node);
2830 if (node->proc) {
2831 target_node = node;
2832 binder_inc_node_nilocked(node, 1, 0, NULL);
2833 binder_inc_node_tmpref_ilocked(node);
2834 node->proc->tmp_ref++;
2835 *procp = node->proc;
2836 } else
2837 *error = BR_DEAD_REPLY;
2838 binder_node_inner_unlock(node);
2839
2840 return target_node;
2841}
2842
2843static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2844 uint32_t command, int32_t param)
2845{
2846 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2847
2848 if (!from) {
2849 /* annotation for sparse */
2850 __release(&from->proc->inner_lock);
2851 return;
2852 }
2853
2854 /* don't override existing errors */
2855 if (from->ee.command == BR_OK)
2856 binder_set_extended_error(&from->ee, id, command, param);
2857 binder_inner_proc_unlock(from->proc);
2858 binder_thread_dec_tmpref(from);
2859}
2860
2861static void binder_transaction(struct binder_proc *proc,
2862 struct binder_thread *thread,
2863 struct binder_transaction_data *tr, int reply,
2864 binder_size_t extra_buffers_size)
2865{
2866 int ret;
2867 struct binder_transaction *t;
2868 struct binder_work *w;
2869 struct binder_work *tcomplete;
2870 binder_size_t buffer_offset = 0;
2871 binder_size_t off_start_offset, off_end_offset;
2872 binder_size_t off_min;
2873 binder_size_t sg_buf_offset, sg_buf_end_offset;
2874 binder_size_t user_offset = 0;
2875 struct binder_proc *target_proc = NULL;
2876 struct binder_thread *target_thread = NULL;
2877 struct binder_node *target_node = NULL;
2878 struct binder_transaction *in_reply_to = NULL;
2879 struct binder_transaction_log_entry *e;
2880 uint32_t return_error = 0;
2881 uint32_t return_error_param = 0;
2882 uint32_t return_error_line = 0;
2883 binder_size_t last_fixup_obj_off = 0;
2884 binder_size_t last_fixup_min_off = 0;
2885 struct binder_context *context = proc->context;
2886 int t_debug_id = atomic_inc_return(&binder_last_id);
2887 char *secctx = NULL;
2888 u32 secctx_sz = 0;
2889 struct list_head sgc_head;
2890 struct list_head pf_head;
2891 const void __user *user_buffer = (const void __user *)
2892 (uintptr_t)tr->data.ptr.buffer;
2893 INIT_LIST_HEAD(&sgc_head);
2894 INIT_LIST_HEAD(&pf_head);
2895
2896 e = binder_transaction_log_add(&binder_transaction_log);
2897 e->debug_id = t_debug_id;
2898 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2899 e->from_proc = proc->pid;
2900 e->from_thread = thread->pid;
2901 e->target_handle = tr->target.handle;
2902 e->data_size = tr->data_size;
2903 e->offsets_size = tr->offsets_size;
2904 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2905
2906 binder_inner_proc_lock(proc);
2907 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2908 binder_inner_proc_unlock(proc);
2909
2910 if (reply) {
2911 binder_inner_proc_lock(proc);
2912 in_reply_to = thread->transaction_stack;
2913 if (in_reply_to == NULL) {
2914 binder_inner_proc_unlock(proc);
2915 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2916 proc->pid, thread->pid);
2917 return_error = BR_FAILED_REPLY;
2918 return_error_param = -EPROTO;
2919 return_error_line = __LINE__;
2920 goto err_empty_call_stack;
2921 }
2922 if (in_reply_to->to_thread != thread) {
2923 spin_lock(&in_reply_to->lock);
2924 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2925 proc->pid, thread->pid, in_reply_to->debug_id,
2926 in_reply_to->to_proc ?
2927 in_reply_to->to_proc->pid : 0,
2928 in_reply_to->to_thread ?
2929 in_reply_to->to_thread->pid : 0);
2930 spin_unlock(&in_reply_to->lock);
2931 binder_inner_proc_unlock(proc);
2932 return_error = BR_FAILED_REPLY;
2933 return_error_param = -EPROTO;
2934 return_error_line = __LINE__;
2935 in_reply_to = NULL;
2936 goto err_bad_call_stack;
2937 }
2938 thread->transaction_stack = in_reply_to->to_parent;
2939 binder_inner_proc_unlock(proc);
2940 binder_set_nice(in_reply_to->saved_priority);
2941 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2942 if (target_thread == NULL) {
2943 /* annotation for sparse */
2944 __release(&target_thread->proc->inner_lock);
2945 binder_txn_error("%d:%d reply target not found\n",
2946 thread->pid, proc->pid);
2947 return_error = BR_DEAD_REPLY;
2948 return_error_line = __LINE__;
2949 goto err_dead_binder;
2950 }
2951 if (target_thread->transaction_stack != in_reply_to) {
2952 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2953 proc->pid, thread->pid,
2954 target_thread->transaction_stack ?
2955 target_thread->transaction_stack->debug_id : 0,
2956 in_reply_to->debug_id);
2957 binder_inner_proc_unlock(target_thread->proc);
2958 return_error = BR_FAILED_REPLY;
2959 return_error_param = -EPROTO;
2960 return_error_line = __LINE__;
2961 in_reply_to = NULL;
2962 target_thread = NULL;
2963 goto err_dead_binder;
2964 }
2965 target_proc = target_thread->proc;
2966 target_proc->tmp_ref++;
2967 binder_inner_proc_unlock(target_thread->proc);
2968 } else {
2969 if (tr->target.handle) {
2970 struct binder_ref *ref;
2971
2972 /*
2973 * There must already be a strong ref
2974 * on this node. If so, do a strong
2975 * increment on the node to ensure it
2976 * stays alive until the transaction is
2977 * done.
2978 */
2979 binder_proc_lock(proc);
2980 ref = binder_get_ref_olocked(proc, tr->target.handle,
2981 true);
2982 if (ref) {
2983 target_node = binder_get_node_refs_for_txn(
2984 ref->node, &target_proc,
2985 &return_error);
2986 } else {
2987 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2988 proc->pid, thread->pid, tr->target.handle);
2989 return_error = BR_FAILED_REPLY;
2990 }
2991 binder_proc_unlock(proc);
2992 } else {
2993 mutex_lock(&context->context_mgr_node_lock);
2994 target_node = context->binder_context_mgr_node;
2995 if (target_node)
2996 target_node = binder_get_node_refs_for_txn(
2997 target_node, &target_proc,
2998 &return_error);
2999 else
3000 return_error = BR_DEAD_REPLY;
3001 mutex_unlock(&context->context_mgr_node_lock);
3002 if (target_node && target_proc->pid == proc->pid) {
3003 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3004 proc->pid, thread->pid);
3005 return_error = BR_FAILED_REPLY;
3006 return_error_param = -EINVAL;
3007 return_error_line = __LINE__;
3008 goto err_invalid_target_handle;
3009 }
3010 }
3011 if (!target_node) {
3012 binder_txn_error("%d:%d cannot find target node\n",
3013 thread->pid, proc->pid);
3014 /*
3015 * return_error is set above
3016 */
3017 return_error_param = -EINVAL;
3018 return_error_line = __LINE__;
3019 goto err_dead_binder;
3020 }
3021 e->to_node = target_node->debug_id;
3022 if (WARN_ON(proc == target_proc)) {
3023 binder_txn_error("%d:%d self transactions not allowed\n",
3024 thread->pid, proc->pid);
3025 return_error = BR_FAILED_REPLY;
3026 return_error_param = -EINVAL;
3027 return_error_line = __LINE__;
3028 goto err_invalid_target_handle;
3029 }
3030 if (security_binder_transaction(proc->cred,
3031 target_proc->cred) < 0) {
3032 binder_txn_error("%d:%d transaction credentials failed\n",
3033 thread->pid, proc->pid);
3034 return_error = BR_FAILED_REPLY;
3035 return_error_param = -EPERM;
3036 return_error_line = __LINE__;
3037 goto err_invalid_target_handle;
3038 }
3039 binder_inner_proc_lock(proc);
3040
3041 w = list_first_entry_or_null(&thread->todo,
3042 struct binder_work, entry);
3043 if (!(tr->flags & TF_ONE_WAY) && w &&
3044 w->type == BINDER_WORK_TRANSACTION) {
3045 /*
3046 * Do not allow new outgoing transaction from a
3047 * thread that has a transaction at the head of
3048 * its todo list. Only need to check the head
3049 * because binder_select_thread_ilocked picks a
3050 * thread from proc->waiting_threads to enqueue
3051 * the transaction, and nothing is queued to the
3052 * todo list while the thread is on waiting_threads.
3053 */
3054 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3055 proc->pid, thread->pid);
3056 binder_inner_proc_unlock(proc);
3057 return_error = BR_FAILED_REPLY;
3058 return_error_param = -EPROTO;
3059 return_error_line = __LINE__;
3060 goto err_bad_todo_list;
3061 }
3062
3063 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3064 struct binder_transaction *tmp;
3065
3066 tmp = thread->transaction_stack;
3067 if (tmp->to_thread != thread) {
3068 spin_lock(&tmp->lock);
3069 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3070 proc->pid, thread->pid, tmp->debug_id,
3071 tmp->to_proc ? tmp->to_proc->pid : 0,
3072 tmp->to_thread ?
3073 tmp->to_thread->pid : 0);
3074 spin_unlock(&tmp->lock);
3075 binder_inner_proc_unlock(proc);
3076 return_error = BR_FAILED_REPLY;
3077 return_error_param = -EPROTO;
3078 return_error_line = __LINE__;
3079 goto err_bad_call_stack;
3080 }
3081 while (tmp) {
3082 struct binder_thread *from;
3083
3084 spin_lock(&tmp->lock);
3085 from = tmp->from;
3086 if (from && from->proc == target_proc) {
3087 atomic_inc(&from->tmp_ref);
3088 target_thread = from;
3089 spin_unlock(&tmp->lock);
3090 break;
3091 }
3092 spin_unlock(&tmp->lock);
3093 tmp = tmp->from_parent;
3094 }
3095 }
3096 binder_inner_proc_unlock(proc);
3097 }
3098 if (target_thread)
3099 e->to_thread = target_thread->pid;
3100 e->to_proc = target_proc->pid;
3101
3102 /* TODO: reuse incoming transaction for reply */
3103 t = kzalloc(sizeof(*t), GFP_KERNEL);
3104 if (t == NULL) {
3105 binder_txn_error("%d:%d cannot allocate transaction\n",
3106 thread->pid, proc->pid);
3107 return_error = BR_FAILED_REPLY;
3108 return_error_param = -ENOMEM;
3109 return_error_line = __LINE__;
3110 goto err_alloc_t_failed;
3111 }
3112 INIT_LIST_HEAD(&t->fd_fixups);
3113 binder_stats_created(BINDER_STAT_TRANSACTION);
3114 spin_lock_init(&t->lock);
3115
3116 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3117 if (tcomplete == NULL) {
3118 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3119 thread->pid, proc->pid);
3120 return_error = BR_FAILED_REPLY;
3121 return_error_param = -ENOMEM;
3122 return_error_line = __LINE__;
3123 goto err_alloc_tcomplete_failed;
3124 }
3125 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3126
3127 t->debug_id = t_debug_id;
3128
3129 if (reply)
3130 binder_debug(BINDER_DEBUG_TRANSACTION,
3131 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3132 proc->pid, thread->pid, t->debug_id,
3133 target_proc->pid, target_thread->pid,
3134 (u64)tr->data.ptr.buffer,
3135 (u64)tr->data.ptr.offsets,
3136 (u64)tr->data_size, (u64)tr->offsets_size,
3137 (u64)extra_buffers_size);
3138 else
3139 binder_debug(BINDER_DEBUG_TRANSACTION,
3140 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3141 proc->pid, thread->pid, t->debug_id,
3142 target_proc->pid, target_node->debug_id,
3143 (u64)tr->data.ptr.buffer,
3144 (u64)tr->data.ptr.offsets,
3145 (u64)tr->data_size, (u64)tr->offsets_size,
3146 (u64)extra_buffers_size);
3147
3148 if (!reply && !(tr->flags & TF_ONE_WAY))
3149 t->from = thread;
3150 else
3151 t->from = NULL;
3152 t->sender_euid = task_euid(proc->tsk);
3153 t->to_proc = target_proc;
3154 t->to_thread = target_thread;
3155 t->code = tr->code;
3156 t->flags = tr->flags;
3157 t->priority = task_nice(current);
3158
3159 if (target_node && target_node->txn_security_ctx) {
3160 u32 secid;
3161 size_t added_size;
3162
3163 security_cred_getsecid(proc->cred, &secid);
3164 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3165 if (ret) {
3166 binder_txn_error("%d:%d failed to get security context\n",
3167 thread->pid, proc->pid);
3168 return_error = BR_FAILED_REPLY;
3169 return_error_param = ret;
3170 return_error_line = __LINE__;
3171 goto err_get_secctx_failed;
3172 }
3173 added_size = ALIGN(secctx_sz, sizeof(u64));
3174 extra_buffers_size += added_size;
3175 if (extra_buffers_size < added_size) {
3176 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3177 thread->pid, proc->pid);
3178 return_error = BR_FAILED_REPLY;
3179 return_error_param = -EINVAL;
3180 return_error_line = __LINE__;
3181 goto err_bad_extra_size;
3182 }
3183 }
3184
3185 trace_binder_transaction(reply, t, target_node);
3186
3187 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3188 tr->offsets_size, extra_buffers_size,
3189 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3190 if (IS_ERR(t->buffer)) {
3191 char *s;
3192
3193 ret = PTR_ERR(t->buffer);
3194 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3195 : (ret == -ENOSPC) ? ": no space left"
3196 : (ret == -ENOMEM) ? ": memory allocation failed"
3197 : "";
3198 binder_txn_error("cannot allocate buffer%s", s);
3199
3200 return_error_param = PTR_ERR(t->buffer);
3201 return_error = return_error_param == -ESRCH ?
3202 BR_DEAD_REPLY : BR_FAILED_REPLY;
3203 return_error_line = __LINE__;
3204 t->buffer = NULL;
3205 goto err_binder_alloc_buf_failed;
3206 }
3207 if (secctx) {
3208 int err;
3209 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3210 ALIGN(tr->offsets_size, sizeof(void *)) +
3211 ALIGN(extra_buffers_size, sizeof(void *)) -
3212 ALIGN(secctx_sz, sizeof(u64));
3213
3214 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3215 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3216 t->buffer, buf_offset,
3217 secctx, secctx_sz);
3218 if (err) {
3219 t->security_ctx = 0;
3220 WARN_ON(1);
3221 }
3222 security_release_secctx(secctx, secctx_sz);
3223 secctx = NULL;
3224 }
3225 t->buffer->debug_id = t->debug_id;
3226 t->buffer->transaction = t;
3227 t->buffer->target_node = target_node;
3228 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3229 trace_binder_transaction_alloc_buf(t->buffer);
3230
3231 if (binder_alloc_copy_user_to_buffer(
3232 &target_proc->alloc,
3233 t->buffer,
3234 ALIGN(tr->data_size, sizeof(void *)),
3235 (const void __user *)
3236 (uintptr_t)tr->data.ptr.offsets,
3237 tr->offsets_size)) {
3238 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3239 proc->pid, thread->pid);
3240 return_error = BR_FAILED_REPLY;
3241 return_error_param = -EFAULT;
3242 return_error_line = __LINE__;
3243 goto err_copy_data_failed;
3244 }
3245 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3246 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3247 proc->pid, thread->pid, (u64)tr->offsets_size);
3248 return_error = BR_FAILED_REPLY;
3249 return_error_param = -EINVAL;
3250 return_error_line = __LINE__;
3251 goto err_bad_offset;
3252 }
3253 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3254 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3255 proc->pid, thread->pid,
3256 (u64)extra_buffers_size);
3257 return_error = BR_FAILED_REPLY;
3258 return_error_param = -EINVAL;
3259 return_error_line = __LINE__;
3260 goto err_bad_offset;
3261 }
3262 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3263 buffer_offset = off_start_offset;
3264 off_end_offset = off_start_offset + tr->offsets_size;
3265 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3266 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3267 ALIGN(secctx_sz, sizeof(u64));
3268 off_min = 0;
3269 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3270 buffer_offset += sizeof(binder_size_t)) {
3271 struct binder_object_header *hdr;
3272 size_t object_size;
3273 struct binder_object object;
3274 binder_size_t object_offset;
3275 binder_size_t copy_size;
3276
3277 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3278 &object_offset,
3279 t->buffer,
3280 buffer_offset,
3281 sizeof(object_offset))) {
3282 binder_txn_error("%d:%d copy offset from buffer failed\n",
3283 thread->pid, proc->pid);
3284 return_error = BR_FAILED_REPLY;
3285 return_error_param = -EINVAL;
3286 return_error_line = __LINE__;
3287 goto err_bad_offset;
3288 }
3289
3290 /*
3291 * Copy the source user buffer up to the next object
3292 * that will be processed.
3293 */
3294 copy_size = object_offset - user_offset;
3295 if (copy_size && (user_offset > object_offset ||
3296 binder_alloc_copy_user_to_buffer(
3297 &target_proc->alloc,
3298 t->buffer, user_offset,
3299 user_buffer + user_offset,
3300 copy_size))) {
3301 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3302 proc->pid, thread->pid);
3303 return_error = BR_FAILED_REPLY;
3304 return_error_param = -EFAULT;
3305 return_error_line = __LINE__;
3306 goto err_copy_data_failed;
3307 }
3308 object_size = binder_get_object(target_proc, user_buffer,
3309 t->buffer, object_offset, &object);
3310 if (object_size == 0 || object_offset < off_min) {
3311 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3312 proc->pid, thread->pid,
3313 (u64)object_offset,
3314 (u64)off_min,
3315 (u64)t->buffer->data_size);
3316 return_error = BR_FAILED_REPLY;
3317 return_error_param = -EINVAL;
3318 return_error_line = __LINE__;
3319 goto err_bad_offset;
3320 }
3321 /*
3322 * Set offset to the next buffer fragment to be
3323 * copied
3324 */
3325 user_offset = object_offset + object_size;
3326
3327 hdr = &object.hdr;
3328 off_min = object_offset + object_size;
3329 switch (hdr->type) {
3330 case BINDER_TYPE_BINDER:
3331 case BINDER_TYPE_WEAK_BINDER: {
3332 struct flat_binder_object *fp;
3333
3334 fp = to_flat_binder_object(hdr);
3335 ret = binder_translate_binder(fp, t, thread);
3336
3337 if (ret < 0 ||
3338 binder_alloc_copy_to_buffer(&target_proc->alloc,
3339 t->buffer,
3340 object_offset,
3341 fp, sizeof(*fp))) {
3342 binder_txn_error("%d:%d translate binder failed\n",
3343 thread->pid, proc->pid);
3344 return_error = BR_FAILED_REPLY;
3345 return_error_param = ret;
3346 return_error_line = __LINE__;
3347 goto err_translate_failed;
3348 }
3349 } break;
3350 case BINDER_TYPE_HANDLE:
3351 case BINDER_TYPE_WEAK_HANDLE: {
3352 struct flat_binder_object *fp;
3353
3354 fp = to_flat_binder_object(hdr);
3355 ret = binder_translate_handle(fp, t, thread);
3356 if (ret < 0 ||
3357 binder_alloc_copy_to_buffer(&target_proc->alloc,
3358 t->buffer,
3359 object_offset,
3360 fp, sizeof(*fp))) {
3361 binder_txn_error("%d:%d translate handle failed\n",
3362 thread->pid, proc->pid);
3363 return_error = BR_FAILED_REPLY;
3364 return_error_param = ret;
3365 return_error_line = __LINE__;
3366 goto err_translate_failed;
3367 }
3368 } break;
3369
3370 case BINDER_TYPE_FD: {
3371 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3372 binder_size_t fd_offset = object_offset +
3373 (uintptr_t)&fp->fd - (uintptr_t)fp;
3374 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3375 thread, in_reply_to);
3376
3377 fp->pad_binder = 0;
3378 if (ret < 0 ||
3379 binder_alloc_copy_to_buffer(&target_proc->alloc,
3380 t->buffer,
3381 object_offset,
3382 fp, sizeof(*fp))) {
3383 binder_txn_error("%d:%d translate fd failed\n",
3384 thread->pid, proc->pid);
3385 return_error = BR_FAILED_REPLY;
3386 return_error_param = ret;
3387 return_error_line = __LINE__;
3388 goto err_translate_failed;
3389 }
3390 } break;
3391 case BINDER_TYPE_FDA: {
3392 struct binder_object ptr_object;
3393 binder_size_t parent_offset;
3394 struct binder_object user_object;
3395 size_t user_parent_size;
3396 struct binder_fd_array_object *fda =
3397 to_binder_fd_array_object(hdr);
3398 size_t num_valid = (buffer_offset - off_start_offset) /
3399 sizeof(binder_size_t);
3400 struct binder_buffer_object *parent =
3401 binder_validate_ptr(target_proc, t->buffer,
3402 &ptr_object, fda->parent,
3403 off_start_offset,
3404 &parent_offset,
3405 num_valid);
3406 if (!parent) {
3407 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3408 proc->pid, thread->pid);
3409 return_error = BR_FAILED_REPLY;
3410 return_error_param = -EINVAL;
3411 return_error_line = __LINE__;
3412 goto err_bad_parent;
3413 }
3414 if (!binder_validate_fixup(target_proc, t->buffer,
3415 off_start_offset,
3416 parent_offset,
3417 fda->parent_offset,
3418 last_fixup_obj_off,
3419 last_fixup_min_off)) {
3420 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3421 proc->pid, thread->pid);
3422 return_error = BR_FAILED_REPLY;
3423 return_error_param = -EINVAL;
3424 return_error_line = __LINE__;
3425 goto err_bad_parent;
3426 }
3427 /*
3428 * We need to read the user version of the parent
3429 * object to get the original user offset
3430 */
3431 user_parent_size =
3432 binder_get_object(proc, user_buffer, t->buffer,
3433 parent_offset, &user_object);
3434 if (user_parent_size != sizeof(user_object.bbo)) {
3435 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3436 proc->pid, thread->pid,
3437 user_parent_size,
3438 sizeof(user_object.bbo));
3439 return_error = BR_FAILED_REPLY;
3440 return_error_param = -EINVAL;
3441 return_error_line = __LINE__;
3442 goto err_bad_parent;
3443 }
3444 ret = binder_translate_fd_array(&pf_head, fda,
3445 user_buffer, parent,
3446 &user_object.bbo, t,
3447 thread, in_reply_to);
3448 if (!ret)
3449 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3450 t->buffer,
3451 object_offset,
3452 fda, sizeof(*fda));
3453 if (ret) {
3454 binder_txn_error("%d:%d translate fd array failed\n",
3455 thread->pid, proc->pid);
3456 return_error = BR_FAILED_REPLY;
3457 return_error_param = ret > 0 ? -EINVAL : ret;
3458 return_error_line = __LINE__;
3459 goto err_translate_failed;
3460 }
3461 last_fixup_obj_off = parent_offset;
3462 last_fixup_min_off =
3463 fda->parent_offset + sizeof(u32) * fda->num_fds;
3464 } break;
3465 case BINDER_TYPE_PTR: {
3466 struct binder_buffer_object *bp =
3467 to_binder_buffer_object(hdr);
3468 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3469 size_t num_valid;
3470
3471 if (bp->length > buf_left) {
3472 binder_user_error("%d:%d got transaction with too large buffer\n",
3473 proc->pid, thread->pid);
3474 return_error = BR_FAILED_REPLY;
3475 return_error_param = -EINVAL;
3476 return_error_line = __LINE__;
3477 goto err_bad_offset;
3478 }
3479 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3480 (const void __user *)(uintptr_t)bp->buffer,
3481 bp->length);
3482 if (ret) {
3483 binder_txn_error("%d:%d deferred copy failed\n",
3484 thread->pid, proc->pid);
3485 return_error = BR_FAILED_REPLY;
3486 return_error_param = ret;
3487 return_error_line = __LINE__;
3488 goto err_translate_failed;
3489 }
3490 /* Fixup buffer pointer to target proc address space */
3491 bp->buffer = (uintptr_t)
3492 t->buffer->user_data + sg_buf_offset;
3493 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3494
3495 num_valid = (buffer_offset - off_start_offset) /
3496 sizeof(binder_size_t);
3497 ret = binder_fixup_parent(&pf_head, t,
3498 thread, bp,
3499 off_start_offset,
3500 num_valid,
3501 last_fixup_obj_off,
3502 last_fixup_min_off);
3503 if (ret < 0 ||
3504 binder_alloc_copy_to_buffer(&target_proc->alloc,
3505 t->buffer,
3506 object_offset,
3507 bp, sizeof(*bp))) {
3508 binder_txn_error("%d:%d failed to fixup parent\n",
3509 thread->pid, proc->pid);
3510 return_error = BR_FAILED_REPLY;
3511 return_error_param = ret;
3512 return_error_line = __LINE__;
3513 goto err_translate_failed;
3514 }
3515 last_fixup_obj_off = object_offset;
3516 last_fixup_min_off = 0;
3517 } break;
3518 default:
3519 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3520 proc->pid, thread->pid, hdr->type);
3521 return_error = BR_FAILED_REPLY;
3522 return_error_param = -EINVAL;
3523 return_error_line = __LINE__;
3524 goto err_bad_object_type;
3525 }
3526 }
3527 /* Done processing objects, copy the rest of the buffer */
3528 if (binder_alloc_copy_user_to_buffer(
3529 &target_proc->alloc,
3530 t->buffer, user_offset,
3531 user_buffer + user_offset,
3532 tr->data_size - user_offset)) {
3533 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3534 proc->pid, thread->pid);
3535 return_error = BR_FAILED_REPLY;
3536 return_error_param = -EFAULT;
3537 return_error_line = __LINE__;
3538 goto err_copy_data_failed;
3539 }
3540
3541 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3542 &sgc_head, &pf_head);
3543 if (ret) {
3544 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3545 proc->pid, thread->pid);
3546 return_error = BR_FAILED_REPLY;
3547 return_error_param = ret;
3548 return_error_line = __LINE__;
3549 goto err_copy_data_failed;
3550 }
3551 if (t->buffer->oneway_spam_suspect)
3552 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3553 else
3554 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3555 t->work.type = BINDER_WORK_TRANSACTION;
3556
3557 if (reply) {
3558 binder_enqueue_thread_work(thread, tcomplete);
3559 binder_inner_proc_lock(target_proc);
3560 if (target_thread->is_dead) {
3561 return_error = BR_DEAD_REPLY;
3562 binder_inner_proc_unlock(target_proc);
3563 goto err_dead_proc_or_thread;
3564 }
3565 BUG_ON(t->buffer->async_transaction != 0);
3566 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3567 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3568 target_proc->outstanding_txns++;
3569 binder_inner_proc_unlock(target_proc);
3570 wake_up_interruptible_sync(&target_thread->wait);
3571 binder_free_transaction(in_reply_to);
3572 } else if (!(t->flags & TF_ONE_WAY)) {
3573 BUG_ON(t->buffer->async_transaction != 0);
3574 binder_inner_proc_lock(proc);
3575 /*
3576 * Defer the TRANSACTION_COMPLETE, so we don't return to
3577 * userspace immediately; this allows the target process to
3578 * immediately start processing this transaction, reducing
3579 * latency. We will then return the TRANSACTION_COMPLETE when
3580 * the target replies (or there is an error).
3581 */
3582 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3583 t->need_reply = 1;
3584 t->from_parent = thread->transaction_stack;
3585 thread->transaction_stack = t;
3586 binder_inner_proc_unlock(proc);
3587 return_error = binder_proc_transaction(t,
3588 target_proc, target_thread);
3589 if (return_error) {
3590 binder_inner_proc_lock(proc);
3591 binder_pop_transaction_ilocked(thread, t);
3592 binder_inner_proc_unlock(proc);
3593 goto err_dead_proc_or_thread;
3594 }
3595 } else {
3596 BUG_ON(target_node == NULL);
3597 BUG_ON(t->buffer->async_transaction != 1);
3598 binder_enqueue_thread_work(thread, tcomplete);
3599 return_error = binder_proc_transaction(t, target_proc, NULL);
3600 if (return_error)
3601 goto err_dead_proc_or_thread;
3602 }
3603 if (target_thread)
3604 binder_thread_dec_tmpref(target_thread);
3605 binder_proc_dec_tmpref(target_proc);
3606 if (target_node)
3607 binder_dec_node_tmpref(target_node);
3608 /*
3609 * write barrier to synchronize with initialization
3610 * of log entry
3611 */
3612 smp_wmb();
3613 WRITE_ONCE(e->debug_id_done, t_debug_id);
3614 return;
3615
3616err_dead_proc_or_thread:
3617 binder_txn_error("%d:%d dead process or thread\n",
3618 thread->pid, proc->pid);
3619 return_error_line = __LINE__;
3620 binder_dequeue_work(proc, tcomplete);
3621err_translate_failed:
3622err_bad_object_type:
3623err_bad_offset:
3624err_bad_parent:
3625err_copy_data_failed:
3626 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3627 binder_free_txn_fixups(t);
3628 trace_binder_transaction_failed_buffer_release(t->buffer);
3629 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3630 buffer_offset, true);
3631 if (target_node)
3632 binder_dec_node_tmpref(target_node);
3633 target_node = NULL;
3634 t->buffer->transaction = NULL;
3635 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3636err_binder_alloc_buf_failed:
3637err_bad_extra_size:
3638 if (secctx)
3639 security_release_secctx(secctx, secctx_sz);
3640err_get_secctx_failed:
3641 kfree(tcomplete);
3642 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3643err_alloc_tcomplete_failed:
3644 if (trace_binder_txn_latency_free_enabled())
3645 binder_txn_latency_free(t);
3646 kfree(t);
3647 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3648err_alloc_t_failed:
3649err_bad_todo_list:
3650err_bad_call_stack:
3651err_empty_call_stack:
3652err_dead_binder:
3653err_invalid_target_handle:
3654 if (target_node) {
3655 binder_dec_node(target_node, 1, 0);
3656 binder_dec_node_tmpref(target_node);
3657 }
3658
3659 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3660 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3661 proc->pid, thread->pid, reply ? "reply" :
3662 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3663 target_proc ? target_proc->pid : 0,
3664 target_thread ? target_thread->pid : 0,
3665 t_debug_id, return_error, return_error_param,
3666 (u64)tr->data_size, (u64)tr->offsets_size,
3667 return_error_line);
3668
3669 if (target_thread)
3670 binder_thread_dec_tmpref(target_thread);
3671 if (target_proc)
3672 binder_proc_dec_tmpref(target_proc);
3673
3674 {
3675 struct binder_transaction_log_entry *fe;
3676
3677 e->return_error = return_error;
3678 e->return_error_param = return_error_param;
3679 e->return_error_line = return_error_line;
3680 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3681 *fe = *e;
3682 /*
3683 * write barrier to synchronize with initialization
3684 * of log entry
3685 */
3686 smp_wmb();
3687 WRITE_ONCE(e->debug_id_done, t_debug_id);
3688 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3689 }
3690
3691 BUG_ON(thread->return_error.cmd != BR_OK);
3692 if (in_reply_to) {
3693 binder_set_txn_from_error(in_reply_to, t_debug_id,
3694 return_error, return_error_param);
3695 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3696 binder_enqueue_thread_work(thread, &thread->return_error.work);
3697 binder_send_failed_reply(in_reply_to, return_error);
3698 } else {
3699 binder_inner_proc_lock(proc);
3700 binder_set_extended_error(&thread->ee, t_debug_id,
3701 return_error, return_error_param);
3702 binder_inner_proc_unlock(proc);
3703 thread->return_error.cmd = return_error;
3704 binder_enqueue_thread_work(thread, &thread->return_error.work);
3705 }
3706}
3707
3708/**
3709 * binder_free_buf() - free the specified buffer
3710 * @proc: binder proc that owns buffer
3711 * @buffer: buffer to be freed
3712 * @is_failure: failed to send transaction
3713 *
3714 * If buffer for an async transaction, enqueue the next async
3715 * transaction from the node.
3716 *
3717 * Cleanup buffer and free it.
3718 */
3719static void
3720binder_free_buf(struct binder_proc *proc,
3721 struct binder_thread *thread,
3722 struct binder_buffer *buffer, bool is_failure)
3723{
3724 binder_inner_proc_lock(proc);
3725 if (buffer->transaction) {
3726 buffer->transaction->buffer = NULL;
3727 buffer->transaction = NULL;
3728 }
3729 binder_inner_proc_unlock(proc);
3730 if (buffer->async_transaction && buffer->target_node) {
3731 struct binder_node *buf_node;
3732 struct binder_work *w;
3733
3734 buf_node = buffer->target_node;
3735 binder_node_inner_lock(buf_node);
3736 BUG_ON(!buf_node->has_async_transaction);
3737 BUG_ON(buf_node->proc != proc);
3738 w = binder_dequeue_work_head_ilocked(
3739 &buf_node->async_todo);
3740 if (!w) {
3741 buf_node->has_async_transaction = false;
3742 } else {
3743 binder_enqueue_work_ilocked(
3744 w, &proc->todo);
3745 binder_wakeup_proc_ilocked(proc);
3746 }
3747 binder_node_inner_unlock(buf_node);
3748 }
3749 trace_binder_transaction_buffer_release(buffer);
3750 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3751 binder_alloc_free_buf(&proc->alloc, buffer);
3752}
3753
3754static int binder_thread_write(struct binder_proc *proc,
3755 struct binder_thread *thread,
3756 binder_uintptr_t binder_buffer, size_t size,
3757 binder_size_t *consumed)
3758{
3759 uint32_t cmd;
3760 struct binder_context *context = proc->context;
3761 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3762 void __user *ptr = buffer + *consumed;
3763 void __user *end = buffer + size;
3764
3765 while (ptr < end && thread->return_error.cmd == BR_OK) {
3766 int ret;
3767
3768 if (get_user(cmd, (uint32_t __user *)ptr))
3769 return -EFAULT;
3770 ptr += sizeof(uint32_t);
3771 trace_binder_command(cmd);
3772 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3773 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3774 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3775 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3776 }
3777 switch (cmd) {
3778 case BC_INCREFS:
3779 case BC_ACQUIRE:
3780 case BC_RELEASE:
3781 case BC_DECREFS: {
3782 uint32_t target;
3783 const char *debug_string;
3784 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3785 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3786 struct binder_ref_data rdata;
3787
3788 if (get_user(target, (uint32_t __user *)ptr))
3789 return -EFAULT;
3790
3791 ptr += sizeof(uint32_t);
3792 ret = -1;
3793 if (increment && !target) {
3794 struct binder_node *ctx_mgr_node;
3795
3796 mutex_lock(&context->context_mgr_node_lock);
3797 ctx_mgr_node = context->binder_context_mgr_node;
3798 if (ctx_mgr_node) {
3799 if (ctx_mgr_node->proc == proc) {
3800 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3801 proc->pid, thread->pid);
3802 mutex_unlock(&context->context_mgr_node_lock);
3803 return -EINVAL;
3804 }
3805 ret = binder_inc_ref_for_node(
3806 proc, ctx_mgr_node,
3807 strong, NULL, &rdata);
3808 }
3809 mutex_unlock(&context->context_mgr_node_lock);
3810 }
3811 if (ret)
3812 ret = binder_update_ref_for_handle(
3813 proc, target, increment, strong,
3814 &rdata);
3815 if (!ret && rdata.desc != target) {
3816 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3817 proc->pid, thread->pid,
3818 target, rdata.desc);
3819 }
3820 switch (cmd) {
3821 case BC_INCREFS:
3822 debug_string = "IncRefs";
3823 break;
3824 case BC_ACQUIRE:
3825 debug_string = "Acquire";
3826 break;
3827 case BC_RELEASE:
3828 debug_string = "Release";
3829 break;
3830 case BC_DECREFS:
3831 default:
3832 debug_string = "DecRefs";
3833 break;
3834 }
3835 if (ret) {
3836 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3837 proc->pid, thread->pid, debug_string,
3838 strong, target, ret);
3839 break;
3840 }
3841 binder_debug(BINDER_DEBUG_USER_REFS,
3842 "%d:%d %s ref %d desc %d s %d w %d\n",
3843 proc->pid, thread->pid, debug_string,
3844 rdata.debug_id, rdata.desc, rdata.strong,
3845 rdata.weak);
3846 break;
3847 }
3848 case BC_INCREFS_DONE:
3849 case BC_ACQUIRE_DONE: {
3850 binder_uintptr_t node_ptr;
3851 binder_uintptr_t cookie;
3852 struct binder_node *node;
3853 bool free_node;
3854
3855 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3856 return -EFAULT;
3857 ptr += sizeof(binder_uintptr_t);
3858 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3859 return -EFAULT;
3860 ptr += sizeof(binder_uintptr_t);
3861 node = binder_get_node(proc, node_ptr);
3862 if (node == NULL) {
3863 binder_user_error("%d:%d %s u%016llx no match\n",
3864 proc->pid, thread->pid,
3865 cmd == BC_INCREFS_DONE ?
3866 "BC_INCREFS_DONE" :
3867 "BC_ACQUIRE_DONE",
3868 (u64)node_ptr);
3869 break;
3870 }
3871 if (cookie != node->cookie) {
3872 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3873 proc->pid, thread->pid,
3874 cmd == BC_INCREFS_DONE ?
3875 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3876 (u64)node_ptr, node->debug_id,
3877 (u64)cookie, (u64)node->cookie);
3878 binder_put_node(node);
3879 break;
3880 }
3881 binder_node_inner_lock(node);
3882 if (cmd == BC_ACQUIRE_DONE) {
3883 if (node->pending_strong_ref == 0) {
3884 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3885 proc->pid, thread->pid,
3886 node->debug_id);
3887 binder_node_inner_unlock(node);
3888 binder_put_node(node);
3889 break;
3890 }
3891 node->pending_strong_ref = 0;
3892 } else {
3893 if (node->pending_weak_ref == 0) {
3894 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3895 proc->pid, thread->pid,
3896 node->debug_id);
3897 binder_node_inner_unlock(node);
3898 binder_put_node(node);
3899 break;
3900 }
3901 node->pending_weak_ref = 0;
3902 }
3903 free_node = binder_dec_node_nilocked(node,
3904 cmd == BC_ACQUIRE_DONE, 0);
3905 WARN_ON(free_node);
3906 binder_debug(BINDER_DEBUG_USER_REFS,
3907 "%d:%d %s node %d ls %d lw %d tr %d\n",
3908 proc->pid, thread->pid,
3909 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3910 node->debug_id, node->local_strong_refs,
3911 node->local_weak_refs, node->tmp_refs);
3912 binder_node_inner_unlock(node);
3913 binder_put_node(node);
3914 break;
3915 }
3916 case BC_ATTEMPT_ACQUIRE:
3917 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3918 return -EINVAL;
3919 case BC_ACQUIRE_RESULT:
3920 pr_err("BC_ACQUIRE_RESULT not supported\n");
3921 return -EINVAL;
3922
3923 case BC_FREE_BUFFER: {
3924 binder_uintptr_t data_ptr;
3925 struct binder_buffer *buffer;
3926
3927 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3928 return -EFAULT;
3929 ptr += sizeof(binder_uintptr_t);
3930
3931 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3932 data_ptr);
3933 if (IS_ERR_OR_NULL(buffer)) {
3934 if (PTR_ERR(buffer) == -EPERM) {
3935 binder_user_error(
3936 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3937 proc->pid, thread->pid,
3938 (u64)data_ptr);
3939 } else {
3940 binder_user_error(
3941 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3942 proc->pid, thread->pid,
3943 (u64)data_ptr);
3944 }
3945 break;
3946 }
3947 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3948 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3949 proc->pid, thread->pid, (u64)data_ptr,
3950 buffer->debug_id,
3951 buffer->transaction ? "active" : "finished");
3952 binder_free_buf(proc, thread, buffer, false);
3953 break;
3954 }
3955
3956 case BC_TRANSACTION_SG:
3957 case BC_REPLY_SG: {
3958 struct binder_transaction_data_sg tr;
3959
3960 if (copy_from_user(&tr, ptr, sizeof(tr)))
3961 return -EFAULT;
3962 ptr += sizeof(tr);
3963 binder_transaction(proc, thread, &tr.transaction_data,
3964 cmd == BC_REPLY_SG, tr.buffers_size);
3965 break;
3966 }
3967 case BC_TRANSACTION:
3968 case BC_REPLY: {
3969 struct binder_transaction_data tr;
3970
3971 if (copy_from_user(&tr, ptr, sizeof(tr)))
3972 return -EFAULT;
3973 ptr += sizeof(tr);
3974 binder_transaction(proc, thread, &tr,
3975 cmd == BC_REPLY, 0);
3976 break;
3977 }
3978
3979 case BC_REGISTER_LOOPER:
3980 binder_debug(BINDER_DEBUG_THREADS,
3981 "%d:%d BC_REGISTER_LOOPER\n",
3982 proc->pid, thread->pid);
3983 binder_inner_proc_lock(proc);
3984 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3985 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3986 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3987 proc->pid, thread->pid);
3988 } else if (proc->requested_threads == 0) {
3989 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3990 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3991 proc->pid, thread->pid);
3992 } else {
3993 proc->requested_threads--;
3994 proc->requested_threads_started++;
3995 }
3996 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3997 binder_inner_proc_unlock(proc);
3998 break;
3999 case BC_ENTER_LOOPER:
4000 binder_debug(BINDER_DEBUG_THREADS,
4001 "%d:%d BC_ENTER_LOOPER\n",
4002 proc->pid, thread->pid);
4003 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4004 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4005 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4006 proc->pid, thread->pid);
4007 }
4008 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4009 break;
4010 case BC_EXIT_LOOPER:
4011 binder_debug(BINDER_DEBUG_THREADS,
4012 "%d:%d BC_EXIT_LOOPER\n",
4013 proc->pid, thread->pid);
4014 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4015 break;
4016
4017 case BC_REQUEST_DEATH_NOTIFICATION:
4018 case BC_CLEAR_DEATH_NOTIFICATION: {
4019 uint32_t target;
4020 binder_uintptr_t cookie;
4021 struct binder_ref *ref;
4022 struct binder_ref_death *death = NULL;
4023
4024 if (get_user(target, (uint32_t __user *)ptr))
4025 return -EFAULT;
4026 ptr += sizeof(uint32_t);
4027 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4028 return -EFAULT;
4029 ptr += sizeof(binder_uintptr_t);
4030 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4031 /*
4032 * Allocate memory for death notification
4033 * before taking lock
4034 */
4035 death = kzalloc(sizeof(*death), GFP_KERNEL);
4036 if (death == NULL) {
4037 WARN_ON(thread->return_error.cmd !=
4038 BR_OK);
4039 thread->return_error.cmd = BR_ERROR;
4040 binder_enqueue_thread_work(
4041 thread,
4042 &thread->return_error.work);
4043 binder_debug(
4044 BINDER_DEBUG_FAILED_TRANSACTION,
4045 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4046 proc->pid, thread->pid);
4047 break;
4048 }
4049 }
4050 binder_proc_lock(proc);
4051 ref = binder_get_ref_olocked(proc, target, false);
4052 if (ref == NULL) {
4053 binder_user_error("%d:%d %s invalid ref %d\n",
4054 proc->pid, thread->pid,
4055 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4056 "BC_REQUEST_DEATH_NOTIFICATION" :
4057 "BC_CLEAR_DEATH_NOTIFICATION",
4058 target);
4059 binder_proc_unlock(proc);
4060 kfree(death);
4061 break;
4062 }
4063
4064 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4065 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4066 proc->pid, thread->pid,
4067 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4068 "BC_REQUEST_DEATH_NOTIFICATION" :
4069 "BC_CLEAR_DEATH_NOTIFICATION",
4070 (u64)cookie, ref->data.debug_id,
4071 ref->data.desc, ref->data.strong,
4072 ref->data.weak, ref->node->debug_id);
4073
4074 binder_node_lock(ref->node);
4075 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4076 if (ref->death) {
4077 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4078 proc->pid, thread->pid);
4079 binder_node_unlock(ref->node);
4080 binder_proc_unlock(proc);
4081 kfree(death);
4082 break;
4083 }
4084 binder_stats_created(BINDER_STAT_DEATH);
4085 INIT_LIST_HEAD(&death->work.entry);
4086 death->cookie = cookie;
4087 ref->death = death;
4088 if (ref->node->proc == NULL) {
4089 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4090
4091 binder_inner_proc_lock(proc);
4092 binder_enqueue_work_ilocked(
4093 &ref->death->work, &proc->todo);
4094 binder_wakeup_proc_ilocked(proc);
4095 binder_inner_proc_unlock(proc);
4096 }
4097 } else {
4098 if (ref->death == NULL) {
4099 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4100 proc->pid, thread->pid);
4101 binder_node_unlock(ref->node);
4102 binder_proc_unlock(proc);
4103 break;
4104 }
4105 death = ref->death;
4106 if (death->cookie != cookie) {
4107 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4108 proc->pid, thread->pid,
4109 (u64)death->cookie,
4110 (u64)cookie);
4111 binder_node_unlock(ref->node);
4112 binder_proc_unlock(proc);
4113 break;
4114 }
4115 ref->death = NULL;
4116 binder_inner_proc_lock(proc);
4117 if (list_empty(&death->work.entry)) {
4118 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4119 if (thread->looper &
4120 (BINDER_LOOPER_STATE_REGISTERED |
4121 BINDER_LOOPER_STATE_ENTERED))
4122 binder_enqueue_thread_work_ilocked(
4123 thread,
4124 &death->work);
4125 else {
4126 binder_enqueue_work_ilocked(
4127 &death->work,
4128 &proc->todo);
4129 binder_wakeup_proc_ilocked(
4130 proc);
4131 }
4132 } else {
4133 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4134 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4135 }
4136 binder_inner_proc_unlock(proc);
4137 }
4138 binder_node_unlock(ref->node);
4139 binder_proc_unlock(proc);
4140 } break;
4141 case BC_DEAD_BINDER_DONE: {
4142 struct binder_work *w;
4143 binder_uintptr_t cookie;
4144 struct binder_ref_death *death = NULL;
4145
4146 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4147 return -EFAULT;
4148
4149 ptr += sizeof(cookie);
4150 binder_inner_proc_lock(proc);
4151 list_for_each_entry(w, &proc->delivered_death,
4152 entry) {
4153 struct binder_ref_death *tmp_death =
4154 container_of(w,
4155 struct binder_ref_death,
4156 work);
4157
4158 if (tmp_death->cookie == cookie) {
4159 death = tmp_death;
4160 break;
4161 }
4162 }
4163 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4164 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4165 proc->pid, thread->pid, (u64)cookie,
4166 death);
4167 if (death == NULL) {
4168 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4169 proc->pid, thread->pid, (u64)cookie);
4170 binder_inner_proc_unlock(proc);
4171 break;
4172 }
4173 binder_dequeue_work_ilocked(&death->work);
4174 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4175 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4176 if (thread->looper &
4177 (BINDER_LOOPER_STATE_REGISTERED |
4178 BINDER_LOOPER_STATE_ENTERED))
4179 binder_enqueue_thread_work_ilocked(
4180 thread, &death->work);
4181 else {
4182 binder_enqueue_work_ilocked(
4183 &death->work,
4184 &proc->todo);
4185 binder_wakeup_proc_ilocked(proc);
4186 }
4187 }
4188 binder_inner_proc_unlock(proc);
4189 } break;
4190
4191 default:
4192 pr_err("%d:%d unknown command %u\n",
4193 proc->pid, thread->pid, cmd);
4194 return -EINVAL;
4195 }
4196 *consumed = ptr - buffer;
4197 }
4198 return 0;
4199}
4200
4201static void binder_stat_br(struct binder_proc *proc,
4202 struct binder_thread *thread, uint32_t cmd)
4203{
4204 trace_binder_return(cmd);
4205 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4206 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4207 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4208 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4209 }
4210}
4211
4212static int binder_put_node_cmd(struct binder_proc *proc,
4213 struct binder_thread *thread,
4214 void __user **ptrp,
4215 binder_uintptr_t node_ptr,
4216 binder_uintptr_t node_cookie,
4217 int node_debug_id,
4218 uint32_t cmd, const char *cmd_name)
4219{
4220 void __user *ptr = *ptrp;
4221
4222 if (put_user(cmd, (uint32_t __user *)ptr))
4223 return -EFAULT;
4224 ptr += sizeof(uint32_t);
4225
4226 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4227 return -EFAULT;
4228 ptr += sizeof(binder_uintptr_t);
4229
4230 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4231 return -EFAULT;
4232 ptr += sizeof(binder_uintptr_t);
4233
4234 binder_stat_br(proc, thread, cmd);
4235 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4236 proc->pid, thread->pid, cmd_name, node_debug_id,
4237 (u64)node_ptr, (u64)node_cookie);
4238
4239 *ptrp = ptr;
4240 return 0;
4241}
4242
4243static int binder_wait_for_work(struct binder_thread *thread,
4244 bool do_proc_work)
4245{
4246 DEFINE_WAIT(wait);
4247 struct binder_proc *proc = thread->proc;
4248 int ret = 0;
4249
4250 freezer_do_not_count();
4251 binder_inner_proc_lock(proc);
4252 for (;;) {
4253 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4254 if (binder_has_work_ilocked(thread, do_proc_work))
4255 break;
4256 if (do_proc_work)
4257 list_add(&thread->waiting_thread_node,
4258 &proc->waiting_threads);
4259 binder_inner_proc_unlock(proc);
4260 schedule();
4261 binder_inner_proc_lock(proc);
4262 list_del_init(&thread->waiting_thread_node);
4263 if (signal_pending(current)) {
4264 ret = -EINTR;
4265 break;
4266 }
4267 }
4268 finish_wait(&thread->wait, &wait);
4269 binder_inner_proc_unlock(proc);
4270 freezer_count();
4271
4272 return ret;
4273}
4274
4275/**
4276 * binder_apply_fd_fixups() - finish fd translation
4277 * @proc: binder_proc associated @t->buffer
4278 * @t: binder transaction with list of fd fixups
4279 *
4280 * Now that we are in the context of the transaction target
4281 * process, we can allocate and install fds. Process the
4282 * list of fds to translate and fixup the buffer with the
4283 * new fds first and only then install the files.
4284 *
4285 * If we fail to allocate an fd, skip the install and release
4286 * any fds that have already been allocated.
4287 */
4288static int binder_apply_fd_fixups(struct binder_proc *proc,
4289 struct binder_transaction *t)
4290{
4291 struct binder_txn_fd_fixup *fixup, *tmp;
4292 int ret = 0;
4293
4294 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4295 int fd = get_unused_fd_flags(O_CLOEXEC);
4296
4297 if (fd < 0) {
4298 binder_debug(BINDER_DEBUG_TRANSACTION,
4299 "failed fd fixup txn %d fd %d\n",
4300 t->debug_id, fd);
4301 ret = -ENOMEM;
4302 goto err;
4303 }
4304 binder_debug(BINDER_DEBUG_TRANSACTION,
4305 "fd fixup txn %d fd %d\n",
4306 t->debug_id, fd);
4307 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4308 fixup->target_fd = fd;
4309 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4310 fixup->offset, &fd,
4311 sizeof(u32))) {
4312 ret = -EINVAL;
4313 goto err;
4314 }
4315 }
4316 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4317 fd_install(fixup->target_fd, fixup->file);
4318 list_del(&fixup->fixup_entry);
4319 kfree(fixup);
4320 }
4321
4322 return ret;
4323
4324err:
4325 binder_free_txn_fixups(t);
4326 return ret;
4327}
4328
4329static int binder_thread_read(struct binder_proc *proc,
4330 struct binder_thread *thread,
4331 binder_uintptr_t binder_buffer, size_t size,
4332 binder_size_t *consumed, int non_block)
4333{
4334 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4335 void __user *ptr = buffer + *consumed;
4336 void __user *end = buffer + size;
4337
4338 int ret = 0;
4339 int wait_for_proc_work;
4340
4341 if (*consumed == 0) {
4342 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4343 return -EFAULT;
4344 ptr += sizeof(uint32_t);
4345 }
4346
4347retry:
4348 binder_inner_proc_lock(proc);
4349 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4350 binder_inner_proc_unlock(proc);
4351
4352 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4353
4354 trace_binder_wait_for_work(wait_for_proc_work,
4355 !!thread->transaction_stack,
4356 !binder_worklist_empty(proc, &thread->todo));
4357 if (wait_for_proc_work) {
4358 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4359 BINDER_LOOPER_STATE_ENTERED))) {
4360 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4361 proc->pid, thread->pid, thread->looper);
4362 wait_event_interruptible(binder_user_error_wait,
4363 binder_stop_on_user_error < 2);
4364 }
4365 binder_set_nice(proc->default_priority);
4366 }
4367
4368 if (non_block) {
4369 if (!binder_has_work(thread, wait_for_proc_work))
4370 ret = -EAGAIN;
4371 } else {
4372 ret = binder_wait_for_work(thread, wait_for_proc_work);
4373 }
4374
4375 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4376
4377 if (ret)
4378 return ret;
4379
4380 while (1) {
4381 uint32_t cmd;
4382 struct binder_transaction_data_secctx tr;
4383 struct binder_transaction_data *trd = &tr.transaction_data;
4384 struct binder_work *w = NULL;
4385 struct list_head *list = NULL;
4386 struct binder_transaction *t = NULL;
4387 struct binder_thread *t_from;
4388 size_t trsize = sizeof(*trd);
4389
4390 binder_inner_proc_lock(proc);
4391 if (!binder_worklist_empty_ilocked(&thread->todo))
4392 list = &thread->todo;
4393 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4394 wait_for_proc_work)
4395 list = &proc->todo;
4396 else {
4397 binder_inner_proc_unlock(proc);
4398
4399 /* no data added */
4400 if (ptr - buffer == 4 && !thread->looper_need_return)
4401 goto retry;
4402 break;
4403 }
4404
4405 if (end - ptr < sizeof(tr) + 4) {
4406 binder_inner_proc_unlock(proc);
4407 break;
4408 }
4409 w = binder_dequeue_work_head_ilocked(list);
4410 if (binder_worklist_empty_ilocked(&thread->todo))
4411 thread->process_todo = false;
4412
4413 switch (w->type) {
4414 case BINDER_WORK_TRANSACTION: {
4415 binder_inner_proc_unlock(proc);
4416 t = container_of(w, struct binder_transaction, work);
4417 } break;
4418 case BINDER_WORK_RETURN_ERROR: {
4419 struct binder_error *e = container_of(
4420 w, struct binder_error, work);
4421
4422 WARN_ON(e->cmd == BR_OK);
4423 binder_inner_proc_unlock(proc);
4424 if (put_user(e->cmd, (uint32_t __user *)ptr))
4425 return -EFAULT;
4426 cmd = e->cmd;
4427 e->cmd = BR_OK;
4428 ptr += sizeof(uint32_t);
4429
4430 binder_stat_br(proc, thread, cmd);
4431 } break;
4432 case BINDER_WORK_TRANSACTION_COMPLETE:
4433 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4434 if (proc->oneway_spam_detection_enabled &&
4435 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4436 cmd = BR_ONEWAY_SPAM_SUSPECT;
4437 else
4438 cmd = BR_TRANSACTION_COMPLETE;
4439 binder_inner_proc_unlock(proc);
4440 kfree(w);
4441 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4442 if (put_user(cmd, (uint32_t __user *)ptr))
4443 return -EFAULT;
4444 ptr += sizeof(uint32_t);
4445
4446 binder_stat_br(proc, thread, cmd);
4447 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4448 "%d:%d BR_TRANSACTION_COMPLETE\n",
4449 proc->pid, thread->pid);
4450 } break;
4451 case BINDER_WORK_NODE: {
4452 struct binder_node *node = container_of(w, struct binder_node, work);
4453 int strong, weak;
4454 binder_uintptr_t node_ptr = node->ptr;
4455 binder_uintptr_t node_cookie = node->cookie;
4456 int node_debug_id = node->debug_id;
4457 int has_weak_ref;
4458 int has_strong_ref;
4459 void __user *orig_ptr = ptr;
4460
4461 BUG_ON(proc != node->proc);
4462 strong = node->internal_strong_refs ||
4463 node->local_strong_refs;
4464 weak = !hlist_empty(&node->refs) ||
4465 node->local_weak_refs ||
4466 node->tmp_refs || strong;
4467 has_strong_ref = node->has_strong_ref;
4468 has_weak_ref = node->has_weak_ref;
4469
4470 if (weak && !has_weak_ref) {
4471 node->has_weak_ref = 1;
4472 node->pending_weak_ref = 1;
4473 node->local_weak_refs++;
4474 }
4475 if (strong && !has_strong_ref) {
4476 node->has_strong_ref = 1;
4477 node->pending_strong_ref = 1;
4478 node->local_strong_refs++;
4479 }
4480 if (!strong && has_strong_ref)
4481 node->has_strong_ref = 0;
4482 if (!weak && has_weak_ref)
4483 node->has_weak_ref = 0;
4484 if (!weak && !strong) {
4485 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4486 "%d:%d node %d u%016llx c%016llx deleted\n",
4487 proc->pid, thread->pid,
4488 node_debug_id,
4489 (u64)node_ptr,
4490 (u64)node_cookie);
4491 rb_erase(&node->rb_node, &proc->nodes);
4492 binder_inner_proc_unlock(proc);
4493 binder_node_lock(node);
4494 /*
4495 * Acquire the node lock before freeing the
4496 * node to serialize with other threads that
4497 * may have been holding the node lock while
4498 * decrementing this node (avoids race where
4499 * this thread frees while the other thread
4500 * is unlocking the node after the final
4501 * decrement)
4502 */
4503 binder_node_unlock(node);
4504 binder_free_node(node);
4505 } else
4506 binder_inner_proc_unlock(proc);
4507
4508 if (weak && !has_weak_ref)
4509 ret = binder_put_node_cmd(
4510 proc, thread, &ptr, node_ptr,
4511 node_cookie, node_debug_id,
4512 BR_INCREFS, "BR_INCREFS");
4513 if (!ret && strong && !has_strong_ref)
4514 ret = binder_put_node_cmd(
4515 proc, thread, &ptr, node_ptr,
4516 node_cookie, node_debug_id,
4517 BR_ACQUIRE, "BR_ACQUIRE");
4518 if (!ret && !strong && has_strong_ref)
4519 ret = binder_put_node_cmd(
4520 proc, thread, &ptr, node_ptr,
4521 node_cookie, node_debug_id,
4522 BR_RELEASE, "BR_RELEASE");
4523 if (!ret && !weak && has_weak_ref)
4524 ret = binder_put_node_cmd(
4525 proc, thread, &ptr, node_ptr,
4526 node_cookie, node_debug_id,
4527 BR_DECREFS, "BR_DECREFS");
4528 if (orig_ptr == ptr)
4529 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4530 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4531 proc->pid, thread->pid,
4532 node_debug_id,
4533 (u64)node_ptr,
4534 (u64)node_cookie);
4535 if (ret)
4536 return ret;
4537 } break;
4538 case BINDER_WORK_DEAD_BINDER:
4539 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4540 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4541 struct binder_ref_death *death;
4542 uint32_t cmd;
4543 binder_uintptr_t cookie;
4544
4545 death = container_of(w, struct binder_ref_death, work);
4546 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4547 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4548 else
4549 cmd = BR_DEAD_BINDER;
4550 cookie = death->cookie;
4551
4552 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4553 "%d:%d %s %016llx\n",
4554 proc->pid, thread->pid,
4555 cmd == BR_DEAD_BINDER ?
4556 "BR_DEAD_BINDER" :
4557 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4558 (u64)cookie);
4559 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4560 binder_inner_proc_unlock(proc);
4561 kfree(death);
4562 binder_stats_deleted(BINDER_STAT_DEATH);
4563 } else {
4564 binder_enqueue_work_ilocked(
4565 w, &proc->delivered_death);
4566 binder_inner_proc_unlock(proc);
4567 }
4568 if (put_user(cmd, (uint32_t __user *)ptr))
4569 return -EFAULT;
4570 ptr += sizeof(uint32_t);
4571 if (put_user(cookie,
4572 (binder_uintptr_t __user *)ptr))
4573 return -EFAULT;
4574 ptr += sizeof(binder_uintptr_t);
4575 binder_stat_br(proc, thread, cmd);
4576 if (cmd == BR_DEAD_BINDER)
4577 goto done; /* DEAD_BINDER notifications can cause transactions */
4578 } break;
4579 default:
4580 binder_inner_proc_unlock(proc);
4581 pr_err("%d:%d: bad work type %d\n",
4582 proc->pid, thread->pid, w->type);
4583 break;
4584 }
4585
4586 if (!t)
4587 continue;
4588
4589 BUG_ON(t->buffer == NULL);
4590 if (t->buffer->target_node) {
4591 struct binder_node *target_node = t->buffer->target_node;
4592
4593 trd->target.ptr = target_node->ptr;
4594 trd->cookie = target_node->cookie;
4595 t->saved_priority = task_nice(current);
4596 if (t->priority < target_node->min_priority &&
4597 !(t->flags & TF_ONE_WAY))
4598 binder_set_nice(t->priority);
4599 else if (!(t->flags & TF_ONE_WAY) ||
4600 t->saved_priority > target_node->min_priority)
4601 binder_set_nice(target_node->min_priority);
4602 cmd = BR_TRANSACTION;
4603 } else {
4604 trd->target.ptr = 0;
4605 trd->cookie = 0;
4606 cmd = BR_REPLY;
4607 }
4608 trd->code = t->code;
4609 trd->flags = t->flags;
4610 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4611
4612 t_from = binder_get_txn_from(t);
4613 if (t_from) {
4614 struct task_struct *sender = t_from->proc->tsk;
4615
4616 trd->sender_pid =
4617 task_tgid_nr_ns(sender,
4618 task_active_pid_ns(current));
4619 } else {
4620 trd->sender_pid = 0;
4621 }
4622
4623 ret = binder_apply_fd_fixups(proc, t);
4624 if (ret) {
4625 struct binder_buffer *buffer = t->buffer;
4626 bool oneway = !!(t->flags & TF_ONE_WAY);
4627 int tid = t->debug_id;
4628
4629 if (t_from)
4630 binder_thread_dec_tmpref(t_from);
4631 buffer->transaction = NULL;
4632 binder_cleanup_transaction(t, "fd fixups failed",
4633 BR_FAILED_REPLY);
4634 binder_free_buf(proc, thread, buffer, true);
4635 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4636 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4637 proc->pid, thread->pid,
4638 oneway ? "async " :
4639 (cmd == BR_REPLY ? "reply " : ""),
4640 tid, BR_FAILED_REPLY, ret, __LINE__);
4641 if (cmd == BR_REPLY) {
4642 cmd = BR_FAILED_REPLY;
4643 if (put_user(cmd, (uint32_t __user *)ptr))
4644 return -EFAULT;
4645 ptr += sizeof(uint32_t);
4646 binder_stat_br(proc, thread, cmd);
4647 break;
4648 }
4649 continue;
4650 }
4651 trd->data_size = t->buffer->data_size;
4652 trd->offsets_size = t->buffer->offsets_size;
4653 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4654 trd->data.ptr.offsets = trd->data.ptr.buffer +
4655 ALIGN(t->buffer->data_size,
4656 sizeof(void *));
4657
4658 tr.secctx = t->security_ctx;
4659 if (t->security_ctx) {
4660 cmd = BR_TRANSACTION_SEC_CTX;
4661 trsize = sizeof(tr);
4662 }
4663 if (put_user(cmd, (uint32_t __user *)ptr)) {
4664 if (t_from)
4665 binder_thread_dec_tmpref(t_from);
4666
4667 binder_cleanup_transaction(t, "put_user failed",
4668 BR_FAILED_REPLY);
4669
4670 return -EFAULT;
4671 }
4672 ptr += sizeof(uint32_t);
4673 if (copy_to_user(ptr, &tr, trsize)) {
4674 if (t_from)
4675 binder_thread_dec_tmpref(t_from);
4676
4677 binder_cleanup_transaction(t, "copy_to_user failed",
4678 BR_FAILED_REPLY);
4679
4680 return -EFAULT;
4681 }
4682 ptr += trsize;
4683
4684 trace_binder_transaction_received(t);
4685 binder_stat_br(proc, thread, cmd);
4686 binder_debug(BINDER_DEBUG_TRANSACTION,
4687 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4688 proc->pid, thread->pid,
4689 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4690 (cmd == BR_TRANSACTION_SEC_CTX) ?
4691 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4692 t->debug_id, t_from ? t_from->proc->pid : 0,
4693 t_from ? t_from->pid : 0, cmd,
4694 t->buffer->data_size, t->buffer->offsets_size,
4695 (u64)trd->data.ptr.buffer,
4696 (u64)trd->data.ptr.offsets);
4697
4698 if (t_from)
4699 binder_thread_dec_tmpref(t_from);
4700 t->buffer->allow_user_free = 1;
4701 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4702 binder_inner_proc_lock(thread->proc);
4703 t->to_parent = thread->transaction_stack;
4704 t->to_thread = thread;
4705 thread->transaction_stack = t;
4706 binder_inner_proc_unlock(thread->proc);
4707 } else {
4708 binder_free_transaction(t);
4709 }
4710 break;
4711 }
4712
4713done:
4714
4715 *consumed = ptr - buffer;
4716 binder_inner_proc_lock(proc);
4717 if (proc->requested_threads == 0 &&
4718 list_empty(&thread->proc->waiting_threads) &&
4719 proc->requested_threads_started < proc->max_threads &&
4720 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4721 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4722 /*spawn a new thread if we leave this out */) {
4723 proc->requested_threads++;
4724 binder_inner_proc_unlock(proc);
4725 binder_debug(BINDER_DEBUG_THREADS,
4726 "%d:%d BR_SPAWN_LOOPER\n",
4727 proc->pid, thread->pid);
4728 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4729 return -EFAULT;
4730 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4731 } else
4732 binder_inner_proc_unlock(proc);
4733 return 0;
4734}
4735
4736static void binder_release_work(struct binder_proc *proc,
4737 struct list_head *list)
4738{
4739 struct binder_work *w;
4740 enum binder_work_type wtype;
4741
4742 while (1) {
4743 binder_inner_proc_lock(proc);
4744 w = binder_dequeue_work_head_ilocked(list);
4745 wtype = w ? w->type : 0;
4746 binder_inner_proc_unlock(proc);
4747 if (!w)
4748 return;
4749
4750 switch (wtype) {
4751 case BINDER_WORK_TRANSACTION: {
4752 struct binder_transaction *t;
4753
4754 t = container_of(w, struct binder_transaction, work);
4755
4756 binder_cleanup_transaction(t, "process died.",
4757 BR_DEAD_REPLY);
4758 } break;
4759 case BINDER_WORK_RETURN_ERROR: {
4760 struct binder_error *e = container_of(
4761 w, struct binder_error, work);
4762
4763 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4764 "undelivered TRANSACTION_ERROR: %u\n",
4765 e->cmd);
4766 } break;
4767 case BINDER_WORK_TRANSACTION_COMPLETE: {
4768 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4769 "undelivered TRANSACTION_COMPLETE\n");
4770 kfree(w);
4771 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4772 } break;
4773 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4774 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4775 struct binder_ref_death *death;
4776
4777 death = container_of(w, struct binder_ref_death, work);
4778 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4779 "undelivered death notification, %016llx\n",
4780 (u64)death->cookie);
4781 kfree(death);
4782 binder_stats_deleted(BINDER_STAT_DEATH);
4783 } break;
4784 case BINDER_WORK_NODE:
4785 break;
4786 default:
4787 pr_err("unexpected work type, %d, not freed\n",
4788 wtype);
4789 break;
4790 }
4791 }
4792
4793}
4794
4795static struct binder_thread *binder_get_thread_ilocked(
4796 struct binder_proc *proc, struct binder_thread *new_thread)
4797{
4798 struct binder_thread *thread = NULL;
4799 struct rb_node *parent = NULL;
4800 struct rb_node **p = &proc->threads.rb_node;
4801
4802 while (*p) {
4803 parent = *p;
4804 thread = rb_entry(parent, struct binder_thread, rb_node);
4805
4806 if (current->pid < thread->pid)
4807 p = &(*p)->rb_left;
4808 else if (current->pid > thread->pid)
4809 p = &(*p)->rb_right;
4810 else
4811 return thread;
4812 }
4813 if (!new_thread)
4814 return NULL;
4815 thread = new_thread;
4816 binder_stats_created(BINDER_STAT_THREAD);
4817 thread->proc = proc;
4818 thread->pid = current->pid;
4819 atomic_set(&thread->tmp_ref, 0);
4820 init_waitqueue_head(&thread->wait);
4821 INIT_LIST_HEAD(&thread->todo);
4822 rb_link_node(&thread->rb_node, parent, p);
4823 rb_insert_color(&thread->rb_node, &proc->threads);
4824 thread->looper_need_return = true;
4825 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4826 thread->return_error.cmd = BR_OK;
4827 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4828 thread->reply_error.cmd = BR_OK;
4829 thread->ee.command = BR_OK;
4830 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4831 return thread;
4832}
4833
4834static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4835{
4836 struct binder_thread *thread;
4837 struct binder_thread *new_thread;
4838
4839 binder_inner_proc_lock(proc);
4840 thread = binder_get_thread_ilocked(proc, NULL);
4841 binder_inner_proc_unlock(proc);
4842 if (!thread) {
4843 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4844 if (new_thread == NULL)
4845 return NULL;
4846 binder_inner_proc_lock(proc);
4847 thread = binder_get_thread_ilocked(proc, new_thread);
4848 binder_inner_proc_unlock(proc);
4849 if (thread != new_thread)
4850 kfree(new_thread);
4851 }
4852 return thread;
4853}
4854
4855static void binder_free_proc(struct binder_proc *proc)
4856{
4857 struct binder_device *device;
4858
4859 BUG_ON(!list_empty(&proc->todo));
4860 BUG_ON(!list_empty(&proc->delivered_death));
4861 if (proc->outstanding_txns)
4862 pr_warn("%s: Unexpected outstanding_txns %d\n",
4863 __func__, proc->outstanding_txns);
4864 device = container_of(proc->context, struct binder_device, context);
4865 if (refcount_dec_and_test(&device->ref)) {
4866 kfree(proc->context->name);
4867 kfree(device);
4868 }
4869 binder_alloc_deferred_release(&proc->alloc);
4870 put_task_struct(proc->tsk);
4871 put_cred(proc->cred);
4872 binder_stats_deleted(BINDER_STAT_PROC);
4873 kfree(proc);
4874}
4875
4876static void binder_free_thread(struct binder_thread *thread)
4877{
4878 BUG_ON(!list_empty(&thread->todo));
4879 binder_stats_deleted(BINDER_STAT_THREAD);
4880 binder_proc_dec_tmpref(thread->proc);
4881 kfree(thread);
4882}
4883
4884static int binder_thread_release(struct binder_proc *proc,
4885 struct binder_thread *thread)
4886{
4887 struct binder_transaction *t;
4888 struct binder_transaction *send_reply = NULL;
4889 int active_transactions = 0;
4890 struct binder_transaction *last_t = NULL;
4891
4892 binder_inner_proc_lock(thread->proc);
4893 /*
4894 * take a ref on the proc so it survives
4895 * after we remove this thread from proc->threads.
4896 * The corresponding dec is when we actually
4897 * free the thread in binder_free_thread()
4898 */
4899 proc->tmp_ref++;
4900 /*
4901 * take a ref on this thread to ensure it
4902 * survives while we are releasing it
4903 */
4904 atomic_inc(&thread->tmp_ref);
4905 rb_erase(&thread->rb_node, &proc->threads);
4906 t = thread->transaction_stack;
4907 if (t) {
4908 spin_lock(&t->lock);
4909 if (t->to_thread == thread)
4910 send_reply = t;
4911 } else {
4912 __acquire(&t->lock);
4913 }
4914 thread->is_dead = true;
4915
4916 while (t) {
4917 last_t = t;
4918 active_transactions++;
4919 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4920 "release %d:%d transaction %d %s, still active\n",
4921 proc->pid, thread->pid,
4922 t->debug_id,
4923 (t->to_thread == thread) ? "in" : "out");
4924
4925 if (t->to_thread == thread) {
4926 thread->proc->outstanding_txns--;
4927 t->to_proc = NULL;
4928 t->to_thread = NULL;
4929 if (t->buffer) {
4930 t->buffer->transaction = NULL;
4931 t->buffer = NULL;
4932 }
4933 t = t->to_parent;
4934 } else if (t->from == thread) {
4935 t->from = NULL;
4936 t = t->from_parent;
4937 } else
4938 BUG();
4939 spin_unlock(&last_t->lock);
4940 if (t)
4941 spin_lock(&t->lock);
4942 else
4943 __acquire(&t->lock);
4944 }
4945 /* annotation for sparse, lock not acquired in last iteration above */
4946 __release(&t->lock);
4947
4948 /*
4949 * If this thread used poll, make sure we remove the waitqueue from any
4950 * poll data structures holding it.
4951 */
4952 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4953 wake_up_pollfree(&thread->wait);
4954
4955 binder_inner_proc_unlock(thread->proc);
4956
4957 /*
4958 * This is needed to avoid races between wake_up_pollfree() above and
4959 * someone else removing the last entry from the queue for other reasons
4960 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4961 * descriptor being closed). Such other users hold an RCU read lock, so
4962 * we can be sure they're done after we call synchronize_rcu().
4963 */
4964 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4965 synchronize_rcu();
4966
4967 if (send_reply)
4968 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4969 binder_release_work(proc, &thread->todo);
4970 binder_thread_dec_tmpref(thread);
4971 return active_transactions;
4972}
4973
4974static __poll_t binder_poll(struct file *filp,
4975 struct poll_table_struct *wait)
4976{
4977 struct binder_proc *proc = filp->private_data;
4978 struct binder_thread *thread = NULL;
4979 bool wait_for_proc_work;
4980
4981 thread = binder_get_thread(proc);
4982 if (!thread)
4983 return POLLERR;
4984
4985 binder_inner_proc_lock(thread->proc);
4986 thread->looper |= BINDER_LOOPER_STATE_POLL;
4987 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4988
4989 binder_inner_proc_unlock(thread->proc);
4990
4991 poll_wait(filp, &thread->wait, wait);
4992
4993 if (binder_has_work(thread, wait_for_proc_work))
4994 return EPOLLIN;
4995
4996 return 0;
4997}
4998
4999static int binder_ioctl_write_read(struct file *filp,
5000 unsigned int cmd, unsigned long arg,
5001 struct binder_thread *thread)
5002{
5003 int ret = 0;
5004 struct binder_proc *proc = filp->private_data;
5005 unsigned int size = _IOC_SIZE(cmd);
5006 void __user *ubuf = (void __user *)arg;
5007 struct binder_write_read bwr;
5008
5009 if (size != sizeof(struct binder_write_read)) {
5010 ret = -EINVAL;
5011 goto out;
5012 }
5013 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5014 ret = -EFAULT;
5015 goto out;
5016 }
5017 binder_debug(BINDER_DEBUG_READ_WRITE,
5018 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5019 proc->pid, thread->pid,
5020 (u64)bwr.write_size, (u64)bwr.write_buffer,
5021 (u64)bwr.read_size, (u64)bwr.read_buffer);
5022
5023 if (bwr.write_size > 0) {
5024 ret = binder_thread_write(proc, thread,
5025 bwr.write_buffer,
5026 bwr.write_size,
5027 &bwr.write_consumed);
5028 trace_binder_write_done(ret);
5029 if (ret < 0) {
5030 bwr.read_consumed = 0;
5031 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5032 ret = -EFAULT;
5033 goto out;
5034 }
5035 }
5036 if (bwr.read_size > 0) {
5037 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5038 bwr.read_size,
5039 &bwr.read_consumed,
5040 filp->f_flags & O_NONBLOCK);
5041 trace_binder_read_done(ret);
5042 binder_inner_proc_lock(proc);
5043 if (!binder_worklist_empty_ilocked(&proc->todo))
5044 binder_wakeup_proc_ilocked(proc);
5045 binder_inner_proc_unlock(proc);
5046 if (ret < 0) {
5047 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5048 ret = -EFAULT;
5049 goto out;
5050 }
5051 }
5052 binder_debug(BINDER_DEBUG_READ_WRITE,
5053 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5054 proc->pid, thread->pid,
5055 (u64)bwr.write_consumed, (u64)bwr.write_size,
5056 (u64)bwr.read_consumed, (u64)bwr.read_size);
5057 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5058 ret = -EFAULT;
5059 goto out;
5060 }
5061out:
5062 return ret;
5063}
5064
5065static int binder_ioctl_set_ctx_mgr(struct file *filp,
5066 struct flat_binder_object *fbo)
5067{
5068 int ret = 0;
5069 struct binder_proc *proc = filp->private_data;
5070 struct binder_context *context = proc->context;
5071 struct binder_node *new_node;
5072 kuid_t curr_euid = current_euid();
5073
5074 mutex_lock(&context->context_mgr_node_lock);
5075 if (context->binder_context_mgr_node) {
5076 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5077 ret = -EBUSY;
5078 goto out;
5079 }
5080 ret = security_binder_set_context_mgr(proc->cred);
5081 if (ret < 0)
5082 goto out;
5083 if (uid_valid(context->binder_context_mgr_uid)) {
5084 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5085 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5086 from_kuid(&init_user_ns, curr_euid),
5087 from_kuid(&init_user_ns,
5088 context->binder_context_mgr_uid));
5089 ret = -EPERM;
5090 goto out;
5091 }
5092 } else {
5093 context->binder_context_mgr_uid = curr_euid;
5094 }
5095 new_node = binder_new_node(proc, fbo);
5096 if (!new_node) {
5097 ret = -ENOMEM;
5098 goto out;
5099 }
5100 binder_node_lock(new_node);
5101 new_node->local_weak_refs++;
5102 new_node->local_strong_refs++;
5103 new_node->has_strong_ref = 1;
5104 new_node->has_weak_ref = 1;
5105 context->binder_context_mgr_node = new_node;
5106 binder_node_unlock(new_node);
5107 binder_put_node(new_node);
5108out:
5109 mutex_unlock(&context->context_mgr_node_lock);
5110 return ret;
5111}
5112
5113static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5114 struct binder_node_info_for_ref *info)
5115{
5116 struct binder_node *node;
5117 struct binder_context *context = proc->context;
5118 __u32 handle = info->handle;
5119
5120 if (info->strong_count || info->weak_count || info->reserved1 ||
5121 info->reserved2 || info->reserved3) {
5122 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5123 proc->pid);
5124 return -EINVAL;
5125 }
5126
5127 /* This ioctl may only be used by the context manager */
5128 mutex_lock(&context->context_mgr_node_lock);
5129 if (!context->binder_context_mgr_node ||
5130 context->binder_context_mgr_node->proc != proc) {
5131 mutex_unlock(&context->context_mgr_node_lock);
5132 return -EPERM;
5133 }
5134 mutex_unlock(&context->context_mgr_node_lock);
5135
5136 node = binder_get_node_from_ref(proc, handle, true, NULL);
5137 if (!node)
5138 return -EINVAL;
5139
5140 info->strong_count = node->local_strong_refs +
5141 node->internal_strong_refs;
5142 info->weak_count = node->local_weak_refs;
5143
5144 binder_put_node(node);
5145
5146 return 0;
5147}
5148
5149static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5150 struct binder_node_debug_info *info)
5151{
5152 struct rb_node *n;
5153 binder_uintptr_t ptr = info->ptr;
5154
5155 memset(info, 0, sizeof(*info));
5156
5157 binder_inner_proc_lock(proc);
5158 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5159 struct binder_node *node = rb_entry(n, struct binder_node,
5160 rb_node);
5161 if (node->ptr > ptr) {
5162 info->ptr = node->ptr;
5163 info->cookie = node->cookie;
5164 info->has_strong_ref = node->has_strong_ref;
5165 info->has_weak_ref = node->has_weak_ref;
5166 break;
5167 }
5168 }
5169 binder_inner_proc_unlock(proc);
5170
5171 return 0;
5172}
5173
5174static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5175{
5176 struct rb_node *n;
5177 struct binder_thread *thread;
5178
5179 if (proc->outstanding_txns > 0)
5180 return true;
5181
5182 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5183 thread = rb_entry(n, struct binder_thread, rb_node);
5184 if (thread->transaction_stack)
5185 return true;
5186 }
5187 return false;
5188}
5189
5190static int binder_ioctl_freeze(struct binder_freeze_info *info,
5191 struct binder_proc *target_proc)
5192{
5193 int ret = 0;
5194
5195 if (!info->enable) {
5196 binder_inner_proc_lock(target_proc);
5197 target_proc->sync_recv = false;
5198 target_proc->async_recv = false;
5199 target_proc->is_frozen = false;
5200 binder_inner_proc_unlock(target_proc);
5201 return 0;
5202 }
5203
5204 /*
5205 * Freezing the target. Prevent new transactions by
5206 * setting frozen state. If timeout specified, wait
5207 * for transactions to drain.
5208 */
5209 binder_inner_proc_lock(target_proc);
5210 target_proc->sync_recv = false;
5211 target_proc->async_recv = false;
5212 target_proc->is_frozen = true;
5213 binder_inner_proc_unlock(target_proc);
5214
5215 if (info->timeout_ms > 0)
5216 ret = wait_event_interruptible_timeout(
5217 target_proc->freeze_wait,
5218 (!target_proc->outstanding_txns),
5219 msecs_to_jiffies(info->timeout_ms));
5220
5221 /* Check pending transactions that wait for reply */
5222 if (ret >= 0) {
5223 binder_inner_proc_lock(target_proc);
5224 if (binder_txns_pending_ilocked(target_proc))
5225 ret = -EAGAIN;
5226 binder_inner_proc_unlock(target_proc);
5227 }
5228
5229 if (ret < 0) {
5230 binder_inner_proc_lock(target_proc);
5231 target_proc->is_frozen = false;
5232 binder_inner_proc_unlock(target_proc);
5233 }
5234
5235 return ret;
5236}
5237
5238static int binder_ioctl_get_freezer_info(
5239 struct binder_frozen_status_info *info)
5240{
5241 struct binder_proc *target_proc;
5242 bool found = false;
5243 __u32 txns_pending;
5244
5245 info->sync_recv = 0;
5246 info->async_recv = 0;
5247
5248 mutex_lock(&binder_procs_lock);
5249 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5250 if (target_proc->pid == info->pid) {
5251 found = true;
5252 binder_inner_proc_lock(target_proc);
5253 txns_pending = binder_txns_pending_ilocked(target_proc);
5254 info->sync_recv |= target_proc->sync_recv |
5255 (txns_pending << 1);
5256 info->async_recv |= target_proc->async_recv;
5257 binder_inner_proc_unlock(target_proc);
5258 }
5259 }
5260 mutex_unlock(&binder_procs_lock);
5261
5262 if (!found)
5263 return -EINVAL;
5264
5265 return 0;
5266}
5267
5268static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5269 void __user *ubuf)
5270{
5271 struct binder_extended_error ee;
5272
5273 binder_inner_proc_lock(thread->proc);
5274 ee = thread->ee;
5275 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5276 binder_inner_proc_unlock(thread->proc);
5277
5278 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5279 return -EFAULT;
5280
5281 return 0;
5282}
5283
5284static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5285{
5286 int ret;
5287 struct binder_proc *proc = filp->private_data;
5288 struct binder_thread *thread;
5289 unsigned int size = _IOC_SIZE(cmd);
5290 void __user *ubuf = (void __user *)arg;
5291
5292 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5293 proc->pid, current->pid, cmd, arg);*/
5294
5295 binder_selftest_alloc(&proc->alloc);
5296
5297 trace_binder_ioctl(cmd, arg);
5298
5299 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5300 if (ret)
5301 goto err_unlocked;
5302
5303 thread = binder_get_thread(proc);
5304 if (thread == NULL) {
5305 ret = -ENOMEM;
5306 goto err;
5307 }
5308
5309 switch (cmd) {
5310 case BINDER_WRITE_READ:
5311 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5312 if (ret)
5313 goto err;
5314 break;
5315 case BINDER_SET_MAX_THREADS: {
5316 int max_threads;
5317
5318 if (copy_from_user(&max_threads, ubuf,
5319 sizeof(max_threads))) {
5320 ret = -EINVAL;
5321 goto err;
5322 }
5323 binder_inner_proc_lock(proc);
5324 proc->max_threads = max_threads;
5325 binder_inner_proc_unlock(proc);
5326 break;
5327 }
5328 case BINDER_SET_CONTEXT_MGR_EXT: {
5329 struct flat_binder_object fbo;
5330
5331 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5332 ret = -EINVAL;
5333 goto err;
5334 }
5335 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5336 if (ret)
5337 goto err;
5338 break;
5339 }
5340 case BINDER_SET_CONTEXT_MGR:
5341 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5342 if (ret)
5343 goto err;
5344 break;
5345 case BINDER_THREAD_EXIT:
5346 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5347 proc->pid, thread->pid);
5348 binder_thread_release(proc, thread);
5349 thread = NULL;
5350 break;
5351 case BINDER_VERSION: {
5352 struct binder_version __user *ver = ubuf;
5353
5354 if (size != sizeof(struct binder_version)) {
5355 ret = -EINVAL;
5356 goto err;
5357 }
5358 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5359 &ver->protocol_version)) {
5360 ret = -EINVAL;
5361 goto err;
5362 }
5363 break;
5364 }
5365 case BINDER_GET_NODE_INFO_FOR_REF: {
5366 struct binder_node_info_for_ref info;
5367
5368 if (copy_from_user(&info, ubuf, sizeof(info))) {
5369 ret = -EFAULT;
5370 goto err;
5371 }
5372
5373 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5374 if (ret < 0)
5375 goto err;
5376
5377 if (copy_to_user(ubuf, &info, sizeof(info))) {
5378 ret = -EFAULT;
5379 goto err;
5380 }
5381
5382 break;
5383 }
5384 case BINDER_GET_NODE_DEBUG_INFO: {
5385 struct binder_node_debug_info info;
5386
5387 if (copy_from_user(&info, ubuf, sizeof(info))) {
5388 ret = -EFAULT;
5389 goto err;
5390 }
5391
5392 ret = binder_ioctl_get_node_debug_info(proc, &info);
5393 if (ret < 0)
5394 goto err;
5395
5396 if (copy_to_user(ubuf, &info, sizeof(info))) {
5397 ret = -EFAULT;
5398 goto err;
5399 }
5400 break;
5401 }
5402 case BINDER_FREEZE: {
5403 struct binder_freeze_info info;
5404 struct binder_proc **target_procs = NULL, *target_proc;
5405 int target_procs_count = 0, i = 0;
5406
5407 ret = 0;
5408
5409 if (copy_from_user(&info, ubuf, sizeof(info))) {
5410 ret = -EFAULT;
5411 goto err;
5412 }
5413
5414 mutex_lock(&binder_procs_lock);
5415 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5416 if (target_proc->pid == info.pid)
5417 target_procs_count++;
5418 }
5419
5420 if (target_procs_count == 0) {
5421 mutex_unlock(&binder_procs_lock);
5422 ret = -EINVAL;
5423 goto err;
5424 }
5425
5426 target_procs = kcalloc(target_procs_count,
5427 sizeof(struct binder_proc *),
5428 GFP_KERNEL);
5429
5430 if (!target_procs) {
5431 mutex_unlock(&binder_procs_lock);
5432 ret = -ENOMEM;
5433 goto err;
5434 }
5435
5436 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5437 if (target_proc->pid != info.pid)
5438 continue;
5439
5440 binder_inner_proc_lock(target_proc);
5441 target_proc->tmp_ref++;
5442 binder_inner_proc_unlock(target_proc);
5443
5444 target_procs[i++] = target_proc;
5445 }
5446 mutex_unlock(&binder_procs_lock);
5447
5448 for (i = 0; i < target_procs_count; i++) {
5449 if (ret >= 0)
5450 ret = binder_ioctl_freeze(&info,
5451 target_procs[i]);
5452
5453 binder_proc_dec_tmpref(target_procs[i]);
5454 }
5455
5456 kfree(target_procs);
5457
5458 if (ret < 0)
5459 goto err;
5460 break;
5461 }
5462 case BINDER_GET_FROZEN_INFO: {
5463 struct binder_frozen_status_info info;
5464
5465 if (copy_from_user(&info, ubuf, sizeof(info))) {
5466 ret = -EFAULT;
5467 goto err;
5468 }
5469
5470 ret = binder_ioctl_get_freezer_info(&info);
5471 if (ret < 0)
5472 goto err;
5473
5474 if (copy_to_user(ubuf, &info, sizeof(info))) {
5475 ret = -EFAULT;
5476 goto err;
5477 }
5478 break;
5479 }
5480 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5481 uint32_t enable;
5482
5483 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5484 ret = -EFAULT;
5485 goto err;
5486 }
5487 binder_inner_proc_lock(proc);
5488 proc->oneway_spam_detection_enabled = (bool)enable;
5489 binder_inner_proc_unlock(proc);
5490 break;
5491 }
5492 case BINDER_GET_EXTENDED_ERROR:
5493 ret = binder_ioctl_get_extended_error(thread, ubuf);
5494 if (ret < 0)
5495 goto err;
5496 break;
5497 default:
5498 ret = -EINVAL;
5499 goto err;
5500 }
5501 ret = 0;
5502err:
5503 if (thread)
5504 thread->looper_need_return = false;
5505 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5506 if (ret && ret != -EINTR)
5507 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5508err_unlocked:
5509 trace_binder_ioctl_done(ret);
5510 return ret;
5511}
5512
5513static void binder_vma_open(struct vm_area_struct *vma)
5514{
5515 struct binder_proc *proc = vma->vm_private_data;
5516
5517 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5518 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5519 proc->pid, vma->vm_start, vma->vm_end,
5520 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5521 (unsigned long)pgprot_val(vma->vm_page_prot));
5522}
5523
5524static void binder_vma_close(struct vm_area_struct *vma)
5525{
5526 struct binder_proc *proc = vma->vm_private_data;
5527
5528 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5529 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5530 proc->pid, vma->vm_start, vma->vm_end,
5531 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5532 (unsigned long)pgprot_val(vma->vm_page_prot));
5533 binder_alloc_vma_close(&proc->alloc);
5534}
5535
5536static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5537{
5538 return VM_FAULT_SIGBUS;
5539}
5540
5541static const struct vm_operations_struct binder_vm_ops = {
5542 .open = binder_vma_open,
5543 .close = binder_vma_close,
5544 .fault = binder_vm_fault,
5545};
5546
5547static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5548{
5549 struct binder_proc *proc = filp->private_data;
5550
5551 if (proc->tsk != current->group_leader)
5552 return -EINVAL;
5553
5554 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5555 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5556 __func__, proc->pid, vma->vm_start, vma->vm_end,
5557 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5558 (unsigned long)pgprot_val(vma->vm_page_prot));
5559
5560 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5561 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5562 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5563 return -EPERM;
5564 }
5565 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5566 vma->vm_flags &= ~VM_MAYWRITE;
5567
5568 vma->vm_ops = &binder_vm_ops;
5569 vma->vm_private_data = proc;
5570
5571 return binder_alloc_mmap_handler(&proc->alloc, vma);
5572}
5573
5574static int binder_open(struct inode *nodp, struct file *filp)
5575{
5576 struct binder_proc *proc, *itr;
5577 struct binder_device *binder_dev;
5578 struct binderfs_info *info;
5579 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5580 bool existing_pid = false;
5581
5582 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5583 current->group_leader->pid, current->pid);
5584
5585 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5586 if (proc == NULL)
5587 return -ENOMEM;
5588 spin_lock_init(&proc->inner_lock);
5589 spin_lock_init(&proc->outer_lock);
5590 get_task_struct(current->group_leader);
5591 proc->tsk = current->group_leader;
5592 proc->cred = get_cred(filp->f_cred);
5593 INIT_LIST_HEAD(&proc->todo);
5594 init_waitqueue_head(&proc->freeze_wait);
5595 proc->default_priority = task_nice(current);
5596 /* binderfs stashes devices in i_private */
5597 if (is_binderfs_device(nodp)) {
5598 binder_dev = nodp->i_private;
5599 info = nodp->i_sb->s_fs_info;
5600 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5601 } else {
5602 binder_dev = container_of(filp->private_data,
5603 struct binder_device, miscdev);
5604 }
5605 refcount_inc(&binder_dev->ref);
5606 proc->context = &binder_dev->context;
5607 binder_alloc_init(&proc->alloc);
5608
5609 binder_stats_created(BINDER_STAT_PROC);
5610 proc->pid = current->group_leader->pid;
5611 INIT_LIST_HEAD(&proc->delivered_death);
5612 INIT_LIST_HEAD(&proc->waiting_threads);
5613 filp->private_data = proc;
5614
5615 mutex_lock(&binder_procs_lock);
5616 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5617 if (itr->pid == proc->pid) {
5618 existing_pid = true;
5619 break;
5620 }
5621 }
5622 hlist_add_head(&proc->proc_node, &binder_procs);
5623 mutex_unlock(&binder_procs_lock);
5624
5625 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5626 char strbuf[11];
5627
5628 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5629 /*
5630 * proc debug entries are shared between contexts.
5631 * Only create for the first PID to avoid debugfs log spamming
5632 * The printing code will anyway print all contexts for a given
5633 * PID so this is not a problem.
5634 */
5635 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5636 binder_debugfs_dir_entry_proc,
5637 (void *)(unsigned long)proc->pid,
5638 &proc_fops);
5639 }
5640
5641 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5642 char strbuf[11];
5643 struct dentry *binderfs_entry;
5644
5645 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5646 /*
5647 * Similar to debugfs, the process specific log file is shared
5648 * between contexts. Only create for the first PID.
5649 * This is ok since same as debugfs, the log file will contain
5650 * information on all contexts of a given PID.
5651 */
5652 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5653 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5654 if (!IS_ERR(binderfs_entry)) {
5655 proc->binderfs_entry = binderfs_entry;
5656 } else {
5657 int error;
5658
5659 error = PTR_ERR(binderfs_entry);
5660 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5661 strbuf, error);
5662 }
5663 }
5664
5665 return 0;
5666}
5667
5668static int binder_flush(struct file *filp, fl_owner_t id)
5669{
5670 struct binder_proc *proc = filp->private_data;
5671
5672 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5673
5674 return 0;
5675}
5676
5677static void binder_deferred_flush(struct binder_proc *proc)
5678{
5679 struct rb_node *n;
5680 int wake_count = 0;
5681
5682 binder_inner_proc_lock(proc);
5683 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5684 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5685
5686 thread->looper_need_return = true;
5687 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5688 wake_up_interruptible(&thread->wait);
5689 wake_count++;
5690 }
5691 }
5692 binder_inner_proc_unlock(proc);
5693
5694 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5695 "binder_flush: %d woke %d threads\n", proc->pid,
5696 wake_count);
5697}
5698
5699static int binder_release(struct inode *nodp, struct file *filp)
5700{
5701 struct binder_proc *proc = filp->private_data;
5702
5703 debugfs_remove(proc->debugfs_entry);
5704
5705 if (proc->binderfs_entry) {
5706 binderfs_remove_file(proc->binderfs_entry);
5707 proc->binderfs_entry = NULL;
5708 }
5709
5710 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5711
5712 return 0;
5713}
5714
5715static int binder_node_release(struct binder_node *node, int refs)
5716{
5717 struct binder_ref *ref;
5718 int death = 0;
5719 struct binder_proc *proc = node->proc;
5720
5721 binder_release_work(proc, &node->async_todo);
5722
5723 binder_node_lock(node);
5724 binder_inner_proc_lock(proc);
5725 binder_dequeue_work_ilocked(&node->work);
5726 /*
5727 * The caller must have taken a temporary ref on the node,
5728 */
5729 BUG_ON(!node->tmp_refs);
5730 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5731 binder_inner_proc_unlock(proc);
5732 binder_node_unlock(node);
5733 binder_free_node(node);
5734
5735 return refs;
5736 }
5737
5738 node->proc = NULL;
5739 node->local_strong_refs = 0;
5740 node->local_weak_refs = 0;
5741 binder_inner_proc_unlock(proc);
5742
5743 spin_lock(&binder_dead_nodes_lock);
5744 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5745 spin_unlock(&binder_dead_nodes_lock);
5746
5747 hlist_for_each_entry(ref, &node->refs, node_entry) {
5748 refs++;
5749 /*
5750 * Need the node lock to synchronize
5751 * with new notification requests and the
5752 * inner lock to synchronize with queued
5753 * death notifications.
5754 */
5755 binder_inner_proc_lock(ref->proc);
5756 if (!ref->death) {
5757 binder_inner_proc_unlock(ref->proc);
5758 continue;
5759 }
5760
5761 death++;
5762
5763 BUG_ON(!list_empty(&ref->death->work.entry));
5764 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5765 binder_enqueue_work_ilocked(&ref->death->work,
5766 &ref->proc->todo);
5767 binder_wakeup_proc_ilocked(ref->proc);
5768 binder_inner_proc_unlock(ref->proc);
5769 }
5770
5771 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5772 "node %d now dead, refs %d, death %d\n",
5773 node->debug_id, refs, death);
5774 binder_node_unlock(node);
5775 binder_put_node(node);
5776
5777 return refs;
5778}
5779
5780static void binder_deferred_release(struct binder_proc *proc)
5781{
5782 struct binder_context *context = proc->context;
5783 struct rb_node *n;
5784 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5785
5786 mutex_lock(&binder_procs_lock);
5787 hlist_del(&proc->proc_node);
5788 mutex_unlock(&binder_procs_lock);
5789
5790 mutex_lock(&context->context_mgr_node_lock);
5791 if (context->binder_context_mgr_node &&
5792 context->binder_context_mgr_node->proc == proc) {
5793 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5794 "%s: %d context_mgr_node gone\n",
5795 __func__, proc->pid);
5796 context->binder_context_mgr_node = NULL;
5797 }
5798 mutex_unlock(&context->context_mgr_node_lock);
5799 binder_inner_proc_lock(proc);
5800 /*
5801 * Make sure proc stays alive after we
5802 * remove all the threads
5803 */
5804 proc->tmp_ref++;
5805
5806 proc->is_dead = true;
5807 proc->is_frozen = false;
5808 proc->sync_recv = false;
5809 proc->async_recv = false;
5810 threads = 0;
5811 active_transactions = 0;
5812 while ((n = rb_first(&proc->threads))) {
5813 struct binder_thread *thread;
5814
5815 thread = rb_entry(n, struct binder_thread, rb_node);
5816 binder_inner_proc_unlock(proc);
5817 threads++;
5818 active_transactions += binder_thread_release(proc, thread);
5819 binder_inner_proc_lock(proc);
5820 }
5821
5822 nodes = 0;
5823 incoming_refs = 0;
5824 while ((n = rb_first(&proc->nodes))) {
5825 struct binder_node *node;
5826
5827 node = rb_entry(n, struct binder_node, rb_node);
5828 nodes++;
5829 /*
5830 * take a temporary ref on the node before
5831 * calling binder_node_release() which will either
5832 * kfree() the node or call binder_put_node()
5833 */
5834 binder_inc_node_tmpref_ilocked(node);
5835 rb_erase(&node->rb_node, &proc->nodes);
5836 binder_inner_proc_unlock(proc);
5837 incoming_refs = binder_node_release(node, incoming_refs);
5838 binder_inner_proc_lock(proc);
5839 }
5840 binder_inner_proc_unlock(proc);
5841
5842 outgoing_refs = 0;
5843 binder_proc_lock(proc);
5844 while ((n = rb_first(&proc->refs_by_desc))) {
5845 struct binder_ref *ref;
5846
5847 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5848 outgoing_refs++;
5849 binder_cleanup_ref_olocked(ref);
5850 binder_proc_unlock(proc);
5851 binder_free_ref(ref);
5852 binder_proc_lock(proc);
5853 }
5854 binder_proc_unlock(proc);
5855
5856 binder_release_work(proc, &proc->todo);
5857 binder_release_work(proc, &proc->delivered_death);
5858
5859 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5860 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5861 __func__, proc->pid, threads, nodes, incoming_refs,
5862 outgoing_refs, active_transactions);
5863
5864 binder_proc_dec_tmpref(proc);
5865}
5866
5867static void binder_deferred_func(struct work_struct *work)
5868{
5869 struct binder_proc *proc;
5870
5871 int defer;
5872
5873 do {
5874 mutex_lock(&binder_deferred_lock);
5875 if (!hlist_empty(&binder_deferred_list)) {
5876 proc = hlist_entry(binder_deferred_list.first,
5877 struct binder_proc, deferred_work_node);
5878 hlist_del_init(&proc->deferred_work_node);
5879 defer = proc->deferred_work;
5880 proc->deferred_work = 0;
5881 } else {
5882 proc = NULL;
5883 defer = 0;
5884 }
5885 mutex_unlock(&binder_deferred_lock);
5886
5887 if (defer & BINDER_DEFERRED_FLUSH)
5888 binder_deferred_flush(proc);
5889
5890 if (defer & BINDER_DEFERRED_RELEASE)
5891 binder_deferred_release(proc); /* frees proc */
5892 } while (proc);
5893}
5894static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5895
5896static void
5897binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5898{
5899 mutex_lock(&binder_deferred_lock);
5900 proc->deferred_work |= defer;
5901 if (hlist_unhashed(&proc->deferred_work_node)) {
5902 hlist_add_head(&proc->deferred_work_node,
5903 &binder_deferred_list);
5904 schedule_work(&binder_deferred_work);
5905 }
5906 mutex_unlock(&binder_deferred_lock);
5907}
5908
5909static void print_binder_transaction_ilocked(struct seq_file *m,
5910 struct binder_proc *proc,
5911 const char *prefix,
5912 struct binder_transaction *t)
5913{
5914 struct binder_proc *to_proc;
5915 struct binder_buffer *buffer = t->buffer;
5916
5917 spin_lock(&t->lock);
5918 to_proc = t->to_proc;
5919 seq_printf(m,
5920 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5921 prefix, t->debug_id, t,
5922 t->from ? t->from->proc->pid : 0,
5923 t->from ? t->from->pid : 0,
5924 to_proc ? to_proc->pid : 0,
5925 t->to_thread ? t->to_thread->pid : 0,
5926 t->code, t->flags, t->priority, t->need_reply);
5927 spin_unlock(&t->lock);
5928
5929 if (proc != to_proc) {
5930 /*
5931 * Can only safely deref buffer if we are holding the
5932 * correct proc inner lock for this node
5933 */
5934 seq_puts(m, "\n");
5935 return;
5936 }
5937
5938 if (buffer == NULL) {
5939 seq_puts(m, " buffer free\n");
5940 return;
5941 }
5942 if (buffer->target_node)
5943 seq_printf(m, " node %d", buffer->target_node->debug_id);
5944 seq_printf(m, " size %zd:%zd data %pK\n",
5945 buffer->data_size, buffer->offsets_size,
5946 buffer->user_data);
5947}
5948
5949static void print_binder_work_ilocked(struct seq_file *m,
5950 struct binder_proc *proc,
5951 const char *prefix,
5952 const char *transaction_prefix,
5953 struct binder_work *w)
5954{
5955 struct binder_node *node;
5956 struct binder_transaction *t;
5957
5958 switch (w->type) {
5959 case BINDER_WORK_TRANSACTION:
5960 t = container_of(w, struct binder_transaction, work);
5961 print_binder_transaction_ilocked(
5962 m, proc, transaction_prefix, t);
5963 break;
5964 case BINDER_WORK_RETURN_ERROR: {
5965 struct binder_error *e = container_of(
5966 w, struct binder_error, work);
5967
5968 seq_printf(m, "%stransaction error: %u\n",
5969 prefix, e->cmd);
5970 } break;
5971 case BINDER_WORK_TRANSACTION_COMPLETE:
5972 seq_printf(m, "%stransaction complete\n", prefix);
5973 break;
5974 case BINDER_WORK_NODE:
5975 node = container_of(w, struct binder_node, work);
5976 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5977 prefix, node->debug_id,
5978 (u64)node->ptr, (u64)node->cookie);
5979 break;
5980 case BINDER_WORK_DEAD_BINDER:
5981 seq_printf(m, "%shas dead binder\n", prefix);
5982 break;
5983 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5984 seq_printf(m, "%shas cleared dead binder\n", prefix);
5985 break;
5986 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5987 seq_printf(m, "%shas cleared death notification\n", prefix);
5988 break;
5989 default:
5990 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5991 break;
5992 }
5993}
5994
5995static void print_binder_thread_ilocked(struct seq_file *m,
5996 struct binder_thread *thread,
5997 int print_always)
5998{
5999 struct binder_transaction *t;
6000 struct binder_work *w;
6001 size_t start_pos = m->count;
6002 size_t header_pos;
6003
6004 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6005 thread->pid, thread->looper,
6006 thread->looper_need_return,
6007 atomic_read(&thread->tmp_ref));
6008 header_pos = m->count;
6009 t = thread->transaction_stack;
6010 while (t) {
6011 if (t->from == thread) {
6012 print_binder_transaction_ilocked(m, thread->proc,
6013 " outgoing transaction", t);
6014 t = t->from_parent;
6015 } else if (t->to_thread == thread) {
6016 print_binder_transaction_ilocked(m, thread->proc,
6017 " incoming transaction", t);
6018 t = t->to_parent;
6019 } else {
6020 print_binder_transaction_ilocked(m, thread->proc,
6021 " bad transaction", t);
6022 t = NULL;
6023 }
6024 }
6025 list_for_each_entry(w, &thread->todo, entry) {
6026 print_binder_work_ilocked(m, thread->proc, " ",
6027 " pending transaction", w);
6028 }
6029 if (!print_always && m->count == header_pos)
6030 m->count = start_pos;
6031}
6032
6033static void print_binder_node_nilocked(struct seq_file *m,
6034 struct binder_node *node)
6035{
6036 struct binder_ref *ref;
6037 struct binder_work *w;
6038 int count;
6039
6040 count = 0;
6041 hlist_for_each_entry(ref, &node->refs, node_entry)
6042 count++;
6043
6044 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6045 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6046 node->has_strong_ref, node->has_weak_ref,
6047 node->local_strong_refs, node->local_weak_refs,
6048 node->internal_strong_refs, count, node->tmp_refs);
6049 if (count) {
6050 seq_puts(m, " proc");
6051 hlist_for_each_entry(ref, &node->refs, node_entry)
6052 seq_printf(m, " %d", ref->proc->pid);
6053 }
6054 seq_puts(m, "\n");
6055 if (node->proc) {
6056 list_for_each_entry(w, &node->async_todo, entry)
6057 print_binder_work_ilocked(m, node->proc, " ",
6058 " pending async transaction", w);
6059 }
6060}
6061
6062static void print_binder_ref_olocked(struct seq_file *m,
6063 struct binder_ref *ref)
6064{
6065 binder_node_lock(ref->node);
6066 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6067 ref->data.debug_id, ref->data.desc,
6068 ref->node->proc ? "" : "dead ",
6069 ref->node->debug_id, ref->data.strong,
6070 ref->data.weak, ref->death);
6071 binder_node_unlock(ref->node);
6072}
6073
6074static void print_binder_proc(struct seq_file *m,
6075 struct binder_proc *proc, int print_all)
6076{
6077 struct binder_work *w;
6078 struct rb_node *n;
6079 size_t start_pos = m->count;
6080 size_t header_pos;
6081 struct binder_node *last_node = NULL;
6082
6083 seq_printf(m, "proc %d\n", proc->pid);
6084 seq_printf(m, "context %s\n", proc->context->name);
6085 header_pos = m->count;
6086
6087 binder_inner_proc_lock(proc);
6088 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6089 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6090 rb_node), print_all);
6091
6092 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6093 struct binder_node *node = rb_entry(n, struct binder_node,
6094 rb_node);
6095 if (!print_all && !node->has_async_transaction)
6096 continue;
6097
6098 /*
6099 * take a temporary reference on the node so it
6100 * survives and isn't removed from the tree
6101 * while we print it.
6102 */
6103 binder_inc_node_tmpref_ilocked(node);
6104 /* Need to drop inner lock to take node lock */
6105 binder_inner_proc_unlock(proc);
6106 if (last_node)
6107 binder_put_node(last_node);
6108 binder_node_inner_lock(node);
6109 print_binder_node_nilocked(m, node);
6110 binder_node_inner_unlock(node);
6111 last_node = node;
6112 binder_inner_proc_lock(proc);
6113 }
6114 binder_inner_proc_unlock(proc);
6115 if (last_node)
6116 binder_put_node(last_node);
6117
6118 if (print_all) {
6119 binder_proc_lock(proc);
6120 for (n = rb_first(&proc->refs_by_desc);
6121 n != NULL;
6122 n = rb_next(n))
6123 print_binder_ref_olocked(m, rb_entry(n,
6124 struct binder_ref,
6125 rb_node_desc));
6126 binder_proc_unlock(proc);
6127 }
6128 binder_alloc_print_allocated(m, &proc->alloc);
6129 binder_inner_proc_lock(proc);
6130 list_for_each_entry(w, &proc->todo, entry)
6131 print_binder_work_ilocked(m, proc, " ",
6132 " pending transaction", w);
6133 list_for_each_entry(w, &proc->delivered_death, entry) {
6134 seq_puts(m, " has delivered dead binder\n");
6135 break;
6136 }
6137 binder_inner_proc_unlock(proc);
6138 if (!print_all && m->count == header_pos)
6139 m->count = start_pos;
6140}
6141
6142static const char * const binder_return_strings[] = {
6143 "BR_ERROR",
6144 "BR_OK",
6145 "BR_TRANSACTION",
6146 "BR_REPLY",
6147 "BR_ACQUIRE_RESULT",
6148 "BR_DEAD_REPLY",
6149 "BR_TRANSACTION_COMPLETE",
6150 "BR_INCREFS",
6151 "BR_ACQUIRE",
6152 "BR_RELEASE",
6153 "BR_DECREFS",
6154 "BR_ATTEMPT_ACQUIRE",
6155 "BR_NOOP",
6156 "BR_SPAWN_LOOPER",
6157 "BR_FINISHED",
6158 "BR_DEAD_BINDER",
6159 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6160 "BR_FAILED_REPLY",
6161 "BR_FROZEN_REPLY",
6162 "BR_ONEWAY_SPAM_SUSPECT",
6163};
6164
6165static const char * const binder_command_strings[] = {
6166 "BC_TRANSACTION",
6167 "BC_REPLY",
6168 "BC_ACQUIRE_RESULT",
6169 "BC_FREE_BUFFER",
6170 "BC_INCREFS",
6171 "BC_ACQUIRE",
6172 "BC_RELEASE",
6173 "BC_DECREFS",
6174 "BC_INCREFS_DONE",
6175 "BC_ACQUIRE_DONE",
6176 "BC_ATTEMPT_ACQUIRE",
6177 "BC_REGISTER_LOOPER",
6178 "BC_ENTER_LOOPER",
6179 "BC_EXIT_LOOPER",
6180 "BC_REQUEST_DEATH_NOTIFICATION",
6181 "BC_CLEAR_DEATH_NOTIFICATION",
6182 "BC_DEAD_BINDER_DONE",
6183 "BC_TRANSACTION_SG",
6184 "BC_REPLY_SG",
6185};
6186
6187static const char * const binder_objstat_strings[] = {
6188 "proc",
6189 "thread",
6190 "node",
6191 "ref",
6192 "death",
6193 "transaction",
6194 "transaction_complete"
6195};
6196
6197static void print_binder_stats(struct seq_file *m, const char *prefix,
6198 struct binder_stats *stats)
6199{
6200 int i;
6201
6202 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6203 ARRAY_SIZE(binder_command_strings));
6204 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6205 int temp = atomic_read(&stats->bc[i]);
6206
6207 if (temp)
6208 seq_printf(m, "%s%s: %d\n", prefix,
6209 binder_command_strings[i], temp);
6210 }
6211
6212 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6213 ARRAY_SIZE(binder_return_strings));
6214 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6215 int temp = atomic_read(&stats->br[i]);
6216
6217 if (temp)
6218 seq_printf(m, "%s%s: %d\n", prefix,
6219 binder_return_strings[i], temp);
6220 }
6221
6222 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6223 ARRAY_SIZE(binder_objstat_strings));
6224 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6225 ARRAY_SIZE(stats->obj_deleted));
6226 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6227 int created = atomic_read(&stats->obj_created[i]);
6228 int deleted = atomic_read(&stats->obj_deleted[i]);
6229
6230 if (created || deleted)
6231 seq_printf(m, "%s%s: active %d total %d\n",
6232 prefix,
6233 binder_objstat_strings[i],
6234 created - deleted,
6235 created);
6236 }
6237}
6238
6239static void print_binder_proc_stats(struct seq_file *m,
6240 struct binder_proc *proc)
6241{
6242 struct binder_work *w;
6243 struct binder_thread *thread;
6244 struct rb_node *n;
6245 int count, strong, weak, ready_threads;
6246 size_t free_async_space =
6247 binder_alloc_get_free_async_space(&proc->alloc);
6248
6249 seq_printf(m, "proc %d\n", proc->pid);
6250 seq_printf(m, "context %s\n", proc->context->name);
6251 count = 0;
6252 ready_threads = 0;
6253 binder_inner_proc_lock(proc);
6254 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6255 count++;
6256
6257 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6258 ready_threads++;
6259
6260 seq_printf(m, " threads: %d\n", count);
6261 seq_printf(m, " requested threads: %d+%d/%d\n"
6262 " ready threads %d\n"
6263 " free async space %zd\n", proc->requested_threads,
6264 proc->requested_threads_started, proc->max_threads,
6265 ready_threads,
6266 free_async_space);
6267 count = 0;
6268 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6269 count++;
6270 binder_inner_proc_unlock(proc);
6271 seq_printf(m, " nodes: %d\n", count);
6272 count = 0;
6273 strong = 0;
6274 weak = 0;
6275 binder_proc_lock(proc);
6276 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6277 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6278 rb_node_desc);
6279 count++;
6280 strong += ref->data.strong;
6281 weak += ref->data.weak;
6282 }
6283 binder_proc_unlock(proc);
6284 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6285
6286 count = binder_alloc_get_allocated_count(&proc->alloc);
6287 seq_printf(m, " buffers: %d\n", count);
6288
6289 binder_alloc_print_pages(m, &proc->alloc);
6290
6291 count = 0;
6292 binder_inner_proc_lock(proc);
6293 list_for_each_entry(w, &proc->todo, entry) {
6294 if (w->type == BINDER_WORK_TRANSACTION)
6295 count++;
6296 }
6297 binder_inner_proc_unlock(proc);
6298 seq_printf(m, " pending transactions: %d\n", count);
6299
6300 print_binder_stats(m, " ", &proc->stats);
6301}
6302
6303static int state_show(struct seq_file *m, void *unused)
6304{
6305 struct binder_proc *proc;
6306 struct binder_node *node;
6307 struct binder_node *last_node = NULL;
6308
6309 seq_puts(m, "binder state:\n");
6310
6311 spin_lock(&binder_dead_nodes_lock);
6312 if (!hlist_empty(&binder_dead_nodes))
6313 seq_puts(m, "dead nodes:\n");
6314 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6315 /*
6316 * take a temporary reference on the node so it
6317 * survives and isn't removed from the list
6318 * while we print it.
6319 */
6320 node->tmp_refs++;
6321 spin_unlock(&binder_dead_nodes_lock);
6322 if (last_node)
6323 binder_put_node(last_node);
6324 binder_node_lock(node);
6325 print_binder_node_nilocked(m, node);
6326 binder_node_unlock(node);
6327 last_node = node;
6328 spin_lock(&binder_dead_nodes_lock);
6329 }
6330 spin_unlock(&binder_dead_nodes_lock);
6331 if (last_node)
6332 binder_put_node(last_node);
6333
6334 mutex_lock(&binder_procs_lock);
6335 hlist_for_each_entry(proc, &binder_procs, proc_node)
6336 print_binder_proc(m, proc, 1);
6337 mutex_unlock(&binder_procs_lock);
6338
6339 return 0;
6340}
6341
6342static int stats_show(struct seq_file *m, void *unused)
6343{
6344 struct binder_proc *proc;
6345
6346 seq_puts(m, "binder stats:\n");
6347
6348 print_binder_stats(m, "", &binder_stats);
6349
6350 mutex_lock(&binder_procs_lock);
6351 hlist_for_each_entry(proc, &binder_procs, proc_node)
6352 print_binder_proc_stats(m, proc);
6353 mutex_unlock(&binder_procs_lock);
6354
6355 return 0;
6356}
6357
6358static int transactions_show(struct seq_file *m, void *unused)
6359{
6360 struct binder_proc *proc;
6361
6362 seq_puts(m, "binder transactions:\n");
6363 mutex_lock(&binder_procs_lock);
6364 hlist_for_each_entry(proc, &binder_procs, proc_node)
6365 print_binder_proc(m, proc, 0);
6366 mutex_unlock(&binder_procs_lock);
6367
6368 return 0;
6369}
6370
6371static int proc_show(struct seq_file *m, void *unused)
6372{
6373 struct binder_proc *itr;
6374 int pid = (unsigned long)m->private;
6375
6376 mutex_lock(&binder_procs_lock);
6377 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6378 if (itr->pid == pid) {
6379 seq_puts(m, "binder proc state:\n");
6380 print_binder_proc(m, itr, 1);
6381 }
6382 }
6383 mutex_unlock(&binder_procs_lock);
6384
6385 return 0;
6386}
6387
6388static void print_binder_transaction_log_entry(struct seq_file *m,
6389 struct binder_transaction_log_entry *e)
6390{
6391 int debug_id = READ_ONCE(e->debug_id_done);
6392 /*
6393 * read barrier to guarantee debug_id_done read before
6394 * we print the log values
6395 */
6396 smp_rmb();
6397 seq_printf(m,
6398 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6399 e->debug_id, (e->call_type == 2) ? "reply" :
6400 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6401 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6402 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6403 e->return_error, e->return_error_param,
6404 e->return_error_line);
6405 /*
6406 * read-barrier to guarantee read of debug_id_done after
6407 * done printing the fields of the entry
6408 */
6409 smp_rmb();
6410 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6411 "\n" : " (incomplete)\n");
6412}
6413
6414static int transaction_log_show(struct seq_file *m, void *unused)
6415{
6416 struct binder_transaction_log *log = m->private;
6417 unsigned int log_cur = atomic_read(&log->cur);
6418 unsigned int count;
6419 unsigned int cur;
6420 int i;
6421
6422 count = log_cur + 1;
6423 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6424 0 : count % ARRAY_SIZE(log->entry);
6425 if (count > ARRAY_SIZE(log->entry) || log->full)
6426 count = ARRAY_SIZE(log->entry);
6427 for (i = 0; i < count; i++) {
6428 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6429
6430 print_binder_transaction_log_entry(m, &log->entry[index]);
6431 }
6432 return 0;
6433}
6434
6435const struct file_operations binder_fops = {
6436 .owner = THIS_MODULE,
6437 .poll = binder_poll,
6438 .unlocked_ioctl = binder_ioctl,
6439 .compat_ioctl = compat_ptr_ioctl,
6440 .mmap = binder_mmap,
6441 .open = binder_open,
6442 .flush = binder_flush,
6443 .release = binder_release,
6444};
6445
6446DEFINE_SHOW_ATTRIBUTE(state);
6447DEFINE_SHOW_ATTRIBUTE(stats);
6448DEFINE_SHOW_ATTRIBUTE(transactions);
6449DEFINE_SHOW_ATTRIBUTE(transaction_log);
6450
6451const struct binder_debugfs_entry binder_debugfs_entries[] = {
6452 {
6453 .name = "state",
6454 .mode = 0444,
6455 .fops = &state_fops,
6456 .data = NULL,
6457 },
6458 {
6459 .name = "stats",
6460 .mode = 0444,
6461 .fops = &stats_fops,
6462 .data = NULL,
6463 },
6464 {
6465 .name = "transactions",
6466 .mode = 0444,
6467 .fops = &transactions_fops,
6468 .data = NULL,
6469 },
6470 {
6471 .name = "transaction_log",
6472 .mode = 0444,
6473 .fops = &transaction_log_fops,
6474 .data = &binder_transaction_log,
6475 },
6476 {
6477 .name = "failed_transaction_log",
6478 .mode = 0444,
6479 .fops = &transaction_log_fops,
6480 .data = &binder_transaction_log_failed,
6481 },
6482 {} /* terminator */
6483};
6484
6485static int __init init_binder_device(const char *name)
6486{
6487 int ret;
6488 struct binder_device *binder_device;
6489
6490 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6491 if (!binder_device)
6492 return -ENOMEM;
6493
6494 binder_device->miscdev.fops = &binder_fops;
6495 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6496 binder_device->miscdev.name = name;
6497
6498 refcount_set(&binder_device->ref, 1);
6499 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6500 binder_device->context.name = name;
6501 mutex_init(&binder_device->context.context_mgr_node_lock);
6502
6503 ret = misc_register(&binder_device->miscdev);
6504 if (ret < 0) {
6505 kfree(binder_device);
6506 return ret;
6507 }
6508
6509 hlist_add_head(&binder_device->hlist, &binder_devices);
6510
6511 return ret;
6512}
6513
6514static int __init binder_init(void)
6515{
6516 int ret;
6517 char *device_name, *device_tmp;
6518 struct binder_device *device;
6519 struct hlist_node *tmp;
6520 char *device_names = NULL;
6521
6522 ret = binder_alloc_shrinker_init();
6523 if (ret)
6524 return ret;
6525
6526 atomic_set(&binder_transaction_log.cur, ~0U);
6527 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6528
6529 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6530 if (binder_debugfs_dir_entry_root) {
6531 const struct binder_debugfs_entry *db_entry;
6532
6533 binder_for_each_debugfs_entry(db_entry)
6534 debugfs_create_file(db_entry->name,
6535 db_entry->mode,
6536 binder_debugfs_dir_entry_root,
6537 db_entry->data,
6538 db_entry->fops);
6539
6540 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6541 binder_debugfs_dir_entry_root);
6542 }
6543
6544 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6545 strcmp(binder_devices_param, "") != 0) {
6546 /*
6547 * Copy the module_parameter string, because we don't want to
6548 * tokenize it in-place.
6549 */
6550 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6551 if (!device_names) {
6552 ret = -ENOMEM;
6553 goto err_alloc_device_names_failed;
6554 }
6555
6556 device_tmp = device_names;
6557 while ((device_name = strsep(&device_tmp, ","))) {
6558 ret = init_binder_device(device_name);
6559 if (ret)
6560 goto err_init_binder_device_failed;
6561 }
6562 }
6563
6564 ret = init_binderfs();
6565 if (ret)
6566 goto err_init_binder_device_failed;
6567
6568 return ret;
6569
6570err_init_binder_device_failed:
6571 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6572 misc_deregister(&device->miscdev);
6573 hlist_del(&device->hlist);
6574 kfree(device);
6575 }
6576
6577 kfree(device_names);
6578
6579err_alloc_device_names_failed:
6580 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6581
6582 return ret;
6583}
6584
6585device_initcall(binder_init);
6586
6587#define CREATE_TRACE_POINTS
6588#include "binder_trace.h"
6589
6590MODULE_LICENSE("GPL v2");