Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69#include <linux/ktime.h>
70
71#include <uapi/linux/android/binder.h>
72
73#include <linux/cacheflush.h>
74
75#include "binder_internal.h"
76#include "binder_trace.h"
77
78static HLIST_HEAD(binder_deferred_list);
79static DEFINE_MUTEX(binder_deferred_lock);
80
81static HLIST_HEAD(binder_devices);
82static HLIST_HEAD(binder_procs);
83static DEFINE_MUTEX(binder_procs_lock);
84
85static HLIST_HEAD(binder_dead_nodes);
86static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
90static atomic_t binder_last_id;
91
92static int proc_show(struct seq_file *m, void *unused);
93DEFINE_SHOW_ATTRIBUTE(proc);
94
95#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
96
97enum {
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113};
114static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117
118char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119module_param_named(devices, binder_devices_param, charp, 0444);
120
121static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122static int binder_stop_on_user_error;
123
124static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
126{
127 int ret;
128
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
132 return ret;
133}
134module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
136
137static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138{
139 struct va_format vaf;
140 va_list args;
141
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
144 vaf.va = &args;
145 vaf.fmt = format;
146 pr_info_ratelimited("%pV", &vaf);
147 va_end(args);
148 }
149}
150
151#define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153
154static __printf(1, 2) void binder_user_error(const char *format, ...)
155{
156 struct va_format vaf;
157 va_list args;
158
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
161 vaf.va = &args;
162 vaf.fmt = format;
163 pr_info_ratelimited("%pV", &vaf);
164 va_end(args);
165 }
166
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
169}
170
171#define binder_set_extended_error(ee, _id, _command, _param) \
172 do { \
173 (ee)->id = _id; \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
176 } while (0)
177
178#define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
180
181#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183#define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
185
186#define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
188
189static struct binder_stats binder_stats;
190
191static inline void binder_stats_deleted(enum binder_stat_types type)
192{
193 atomic_inc(&binder_stats.obj_deleted[type]);
194}
195
196static inline void binder_stats_created(enum binder_stat_types type)
197{
198 atomic_inc(&binder_stats.obj_created[type]);
199}
200
201struct binder_transaction_log_entry {
202 int debug_id;
203 int debug_id_done;
204 int call_type;
205 int from_proc;
206 int from_thread;
207 int target_handle;
208 int to_proc;
209 int to_thread;
210 int to_node;
211 int data_size;
212 int offsets_size;
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
217};
218
219struct binder_transaction_log {
220 atomic_t cur;
221 bool full;
222 struct binder_transaction_log_entry entry[32];
223};
224
225static struct binder_transaction_log binder_transaction_log;
226static struct binder_transaction_log binder_transaction_log_failed;
227
228static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
230{
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
233
234 if (cur >= ARRAY_SIZE(log->entry))
235 log->full = true;
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
238 /*
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
242 */
243 smp_wmb();
244 memset(e, 0, sizeof(*e));
245 return e;
246}
247
248enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
251};
252
253enum {
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
260};
261
262/**
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
265 *
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
268 */
269#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270static void
271_binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
273{
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
277}
278
279/**
280 * binder_proc_unlock() - Release spinlock for given binder_proc
281 * @proc: struct binder_proc to acquire
282 *
283 * Release lock acquired via binder_proc_lock()
284 */
285#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286static void
287_binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
289{
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
293}
294
295/**
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
298 *
299 * Acquires proc->inner_lock. Used to protect todo lists
300 */
301#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302static void
303_binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
305{
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
309}
310
311/**
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
314 *
315 * Release lock acquired via binder_inner_proc_lock()
316 */
317#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318static void
319_binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
321{
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
325}
326
327/**
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
330 *
331 * Acquires node->lock. Used to protect binder_node fields
332 */
333#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334static void
335_binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
337{
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
341}
342
343/**
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
346 *
347 * Release lock acquired via binder_node_lock()
348 */
349#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350static void
351_binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
353{
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
357}
358
359/**
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
362 *
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
365 */
366#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367static void
368_binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
370{
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
374 if (node->proc)
375 binder_inner_proc_lock(node->proc);
376 else
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
379}
380
381/**
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
384 *
385 * Release lock acquired via binder_node_lock()
386 */
387#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388static void
389_binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
391{
392 struct binder_proc *proc = node->proc;
393
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
396 if (proc)
397 binder_inner_proc_unlock(proc);
398 else
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
402}
403
404static bool binder_worklist_empty_ilocked(struct list_head *list)
405{
406 return list_empty(list);
407}
408
409/**
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
413 *
414 * Return: true if there are no items on list, else false
415 */
416static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
418{
419 bool ret;
420
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
424 return ret;
425}
426
427/**
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
431 *
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
434 *
435 * Requires the proc->inner_lock to be held.
436 */
437static void
438binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
440{
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
444}
445
446/**
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
450 *
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
454 *
455 * Requires the proc->inner_lock to be held.
456 */
457static void
458binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
460{
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
463}
464
465/**
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
469 *
470 * Adds the work to the todo list of the thread, and enables processing
471 * of the todo queue.
472 *
473 * Requires the proc->inner_lock to be held.
474 */
475static void
476binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
478{
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
481
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
486 */
487 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 thread->pid == current->pid && !thread->process_todo)
489 wake_up_interruptible_sync(&thread->wait);
490
491 thread->process_todo = true;
492}
493
494/**
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
498 *
499 * Adds the work to the todo list of the thread, and enables processing
500 * of the todo queue.
501 */
502static void
503binder_enqueue_thread_work(struct binder_thread *thread,
504 struct binder_work *work)
505{
506 binder_inner_proc_lock(thread->proc);
507 binder_enqueue_thread_work_ilocked(thread, work);
508 binder_inner_proc_unlock(thread->proc);
509}
510
511static void
512binder_dequeue_work_ilocked(struct binder_work *work)
513{
514 list_del_init(&work->entry);
515}
516
517/**
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
521 *
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
524 */
525static void
526binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527{
528 binder_inner_proc_lock(proc);
529 binder_dequeue_work_ilocked(work);
530 binder_inner_proc_unlock(proc);
531}
532
533static struct binder_work *binder_dequeue_work_head_ilocked(
534 struct list_head *list)
535{
536 struct binder_work *w;
537
538 w = list_first_entry_or_null(list, struct binder_work, entry);
539 if (w)
540 list_del_init(&w->entry);
541 return w;
542}
543
544static void
545binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546static void binder_free_thread(struct binder_thread *thread);
547static void binder_free_proc(struct binder_proc *proc);
548static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549
550static bool binder_has_work_ilocked(struct binder_thread *thread,
551 bool do_proc_work)
552{
553 return thread->process_todo ||
554 thread->looper_need_return ||
555 (do_proc_work &&
556 !binder_worklist_empty_ilocked(&thread->proc->todo));
557}
558
559static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560{
561 bool has_work;
562
563 binder_inner_proc_lock(thread->proc);
564 has_work = binder_has_work_ilocked(thread, do_proc_work);
565 binder_inner_proc_unlock(thread->proc);
566
567 return has_work;
568}
569
570static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571{
572 return !thread->transaction_stack &&
573 binder_worklist_empty_ilocked(&thread->todo);
574}
575
576static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
577 bool sync)
578{
579 struct rb_node *n;
580 struct binder_thread *thread;
581
582 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
583 thread = rb_entry(n, struct binder_thread, rb_node);
584 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
585 binder_available_for_proc_work_ilocked(thread)) {
586 if (sync)
587 wake_up_interruptible_sync(&thread->wait);
588 else
589 wake_up_interruptible(&thread->wait);
590 }
591 }
592}
593
594/**
595 * binder_select_thread_ilocked() - selects a thread for doing proc work.
596 * @proc: process to select a thread from
597 *
598 * Note that calling this function moves the thread off the waiting_threads
599 * list, so it can only be woken up by the caller of this function, or a
600 * signal. Therefore, callers *should* always wake up the thread this function
601 * returns.
602 *
603 * Return: If there's a thread currently waiting for process work,
604 * returns that thread. Otherwise returns NULL.
605 */
606static struct binder_thread *
607binder_select_thread_ilocked(struct binder_proc *proc)
608{
609 struct binder_thread *thread;
610
611 assert_spin_locked(&proc->inner_lock);
612 thread = list_first_entry_or_null(&proc->waiting_threads,
613 struct binder_thread,
614 waiting_thread_node);
615
616 if (thread)
617 list_del_init(&thread->waiting_thread_node);
618
619 return thread;
620}
621
622/**
623 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624 * @proc: process to wake up a thread in
625 * @thread: specific thread to wake-up (may be NULL)
626 * @sync: whether to do a synchronous wake-up
627 *
628 * This function wakes up a thread in the @proc process.
629 * The caller may provide a specific thread to wake-up in
630 * the @thread parameter. If @thread is NULL, this function
631 * will wake up threads that have called poll().
632 *
633 * Note that for this function to work as expected, callers
634 * should first call binder_select_thread() to find a thread
635 * to handle the work (if they don't have a thread already),
636 * and pass the result into the @thread parameter.
637 */
638static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
639 struct binder_thread *thread,
640 bool sync)
641{
642 assert_spin_locked(&proc->inner_lock);
643
644 if (thread) {
645 if (sync)
646 wake_up_interruptible_sync(&thread->wait);
647 else
648 wake_up_interruptible(&thread->wait);
649 return;
650 }
651
652 /* Didn't find a thread waiting for proc work; this can happen
653 * in two scenarios:
654 * 1. All threads are busy handling transactions
655 * In that case, one of those threads should call back into
656 * the kernel driver soon and pick up this work.
657 * 2. Threads are using the (e)poll interface, in which case
658 * they may be blocked on the waitqueue without having been
659 * added to waiting_threads. For this case, we just iterate
660 * over all threads not handling transaction work, and
661 * wake them all up. We wake all because we don't know whether
662 * a thread that called into (e)poll is handling non-binder
663 * work currently.
664 */
665 binder_wakeup_poll_threads_ilocked(proc, sync);
666}
667
668static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
669{
670 struct binder_thread *thread = binder_select_thread_ilocked(proc);
671
672 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
673}
674
675static void binder_set_nice(long nice)
676{
677 long min_nice;
678
679 if (can_nice(current, nice)) {
680 set_user_nice(current, nice);
681 return;
682 }
683 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
684 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
685 "%d: nice value %ld not allowed use %ld instead\n",
686 current->pid, nice, min_nice);
687 set_user_nice(current, min_nice);
688 if (min_nice <= MAX_NICE)
689 return;
690 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
691}
692
693static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
694 binder_uintptr_t ptr)
695{
696 struct rb_node *n = proc->nodes.rb_node;
697 struct binder_node *node;
698
699 assert_spin_locked(&proc->inner_lock);
700
701 while (n) {
702 node = rb_entry(n, struct binder_node, rb_node);
703
704 if (ptr < node->ptr)
705 n = n->rb_left;
706 else if (ptr > node->ptr)
707 n = n->rb_right;
708 else {
709 /*
710 * take an implicit weak reference
711 * to ensure node stays alive until
712 * call to binder_put_node()
713 */
714 binder_inc_node_tmpref_ilocked(node);
715 return node;
716 }
717 }
718 return NULL;
719}
720
721static struct binder_node *binder_get_node(struct binder_proc *proc,
722 binder_uintptr_t ptr)
723{
724 struct binder_node *node;
725
726 binder_inner_proc_lock(proc);
727 node = binder_get_node_ilocked(proc, ptr);
728 binder_inner_proc_unlock(proc);
729 return node;
730}
731
732static struct binder_node *binder_init_node_ilocked(
733 struct binder_proc *proc,
734 struct binder_node *new_node,
735 struct flat_binder_object *fp)
736{
737 struct rb_node **p = &proc->nodes.rb_node;
738 struct rb_node *parent = NULL;
739 struct binder_node *node;
740 binder_uintptr_t ptr = fp ? fp->binder : 0;
741 binder_uintptr_t cookie = fp ? fp->cookie : 0;
742 __u32 flags = fp ? fp->flags : 0;
743
744 assert_spin_locked(&proc->inner_lock);
745
746 while (*p) {
747
748 parent = *p;
749 node = rb_entry(parent, struct binder_node, rb_node);
750
751 if (ptr < node->ptr)
752 p = &(*p)->rb_left;
753 else if (ptr > node->ptr)
754 p = &(*p)->rb_right;
755 else {
756 /*
757 * A matching node is already in
758 * the rb tree. Abandon the init
759 * and return it.
760 */
761 binder_inc_node_tmpref_ilocked(node);
762 return node;
763 }
764 }
765 node = new_node;
766 binder_stats_created(BINDER_STAT_NODE);
767 node->tmp_refs++;
768 rb_link_node(&node->rb_node, parent, p);
769 rb_insert_color(&node->rb_node, &proc->nodes);
770 node->debug_id = atomic_inc_return(&binder_last_id);
771 node->proc = proc;
772 node->ptr = ptr;
773 node->cookie = cookie;
774 node->work.type = BINDER_WORK_NODE;
775 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
776 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
777 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
778 spin_lock_init(&node->lock);
779 INIT_LIST_HEAD(&node->work.entry);
780 INIT_LIST_HEAD(&node->async_todo);
781 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
782 "%d:%d node %d u%016llx c%016llx created\n",
783 proc->pid, current->pid, node->debug_id,
784 (u64)node->ptr, (u64)node->cookie);
785
786 return node;
787}
788
789static struct binder_node *binder_new_node(struct binder_proc *proc,
790 struct flat_binder_object *fp)
791{
792 struct binder_node *node;
793 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
794
795 if (!new_node)
796 return NULL;
797 binder_inner_proc_lock(proc);
798 node = binder_init_node_ilocked(proc, new_node, fp);
799 binder_inner_proc_unlock(proc);
800 if (node != new_node)
801 /*
802 * The node was already added by another thread
803 */
804 kfree(new_node);
805
806 return node;
807}
808
809static void binder_free_node(struct binder_node *node)
810{
811 kfree(node);
812 binder_stats_deleted(BINDER_STAT_NODE);
813}
814
815static int binder_inc_node_nilocked(struct binder_node *node, int strong,
816 int internal,
817 struct list_head *target_list)
818{
819 struct binder_proc *proc = node->proc;
820
821 assert_spin_locked(&node->lock);
822 if (proc)
823 assert_spin_locked(&proc->inner_lock);
824 if (strong) {
825 if (internal) {
826 if (target_list == NULL &&
827 node->internal_strong_refs == 0 &&
828 !(node->proc &&
829 node == node->proc->context->binder_context_mgr_node &&
830 node->has_strong_ref)) {
831 pr_err("invalid inc strong node for %d\n",
832 node->debug_id);
833 return -EINVAL;
834 }
835 node->internal_strong_refs++;
836 } else
837 node->local_strong_refs++;
838 if (!node->has_strong_ref && target_list) {
839 struct binder_thread *thread = container_of(target_list,
840 struct binder_thread, todo);
841 binder_dequeue_work_ilocked(&node->work);
842 BUG_ON(&thread->todo != target_list);
843 binder_enqueue_deferred_thread_work_ilocked(thread,
844 &node->work);
845 }
846 } else {
847 if (!internal)
848 node->local_weak_refs++;
849 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
850 if (target_list == NULL) {
851 pr_err("invalid inc weak node for %d\n",
852 node->debug_id);
853 return -EINVAL;
854 }
855 /*
856 * See comment above
857 */
858 binder_enqueue_work_ilocked(&node->work, target_list);
859 }
860 }
861 return 0;
862}
863
864static int binder_inc_node(struct binder_node *node, int strong, int internal,
865 struct list_head *target_list)
866{
867 int ret;
868
869 binder_node_inner_lock(node);
870 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
871 binder_node_inner_unlock(node);
872
873 return ret;
874}
875
876static bool binder_dec_node_nilocked(struct binder_node *node,
877 int strong, int internal)
878{
879 struct binder_proc *proc = node->proc;
880
881 assert_spin_locked(&node->lock);
882 if (proc)
883 assert_spin_locked(&proc->inner_lock);
884 if (strong) {
885 if (internal)
886 node->internal_strong_refs--;
887 else
888 node->local_strong_refs--;
889 if (node->local_strong_refs || node->internal_strong_refs)
890 return false;
891 } else {
892 if (!internal)
893 node->local_weak_refs--;
894 if (node->local_weak_refs || node->tmp_refs ||
895 !hlist_empty(&node->refs))
896 return false;
897 }
898
899 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
900 if (list_empty(&node->work.entry)) {
901 binder_enqueue_work_ilocked(&node->work, &proc->todo);
902 binder_wakeup_proc_ilocked(proc);
903 }
904 } else {
905 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
906 !node->local_weak_refs && !node->tmp_refs) {
907 if (proc) {
908 binder_dequeue_work_ilocked(&node->work);
909 rb_erase(&node->rb_node, &proc->nodes);
910 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911 "refless node %d deleted\n",
912 node->debug_id);
913 } else {
914 BUG_ON(!list_empty(&node->work.entry));
915 spin_lock(&binder_dead_nodes_lock);
916 /*
917 * tmp_refs could have changed so
918 * check it again
919 */
920 if (node->tmp_refs) {
921 spin_unlock(&binder_dead_nodes_lock);
922 return false;
923 }
924 hlist_del(&node->dead_node);
925 spin_unlock(&binder_dead_nodes_lock);
926 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
927 "dead node %d deleted\n",
928 node->debug_id);
929 }
930 return true;
931 }
932 }
933 return false;
934}
935
936static void binder_dec_node(struct binder_node *node, int strong, int internal)
937{
938 bool free_node;
939
940 binder_node_inner_lock(node);
941 free_node = binder_dec_node_nilocked(node, strong, internal);
942 binder_node_inner_unlock(node);
943 if (free_node)
944 binder_free_node(node);
945}
946
947static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
948{
949 /*
950 * No call to binder_inc_node() is needed since we
951 * don't need to inform userspace of any changes to
952 * tmp_refs
953 */
954 node->tmp_refs++;
955}
956
957/**
958 * binder_inc_node_tmpref() - take a temporary reference on node
959 * @node: node to reference
960 *
961 * Take reference on node to prevent the node from being freed
962 * while referenced only by a local variable. The inner lock is
963 * needed to serialize with the node work on the queue (which
964 * isn't needed after the node is dead). If the node is dead
965 * (node->proc is NULL), use binder_dead_nodes_lock to protect
966 * node->tmp_refs against dead-node-only cases where the node
967 * lock cannot be acquired (eg traversing the dead node list to
968 * print nodes)
969 */
970static void binder_inc_node_tmpref(struct binder_node *node)
971{
972 binder_node_lock(node);
973 if (node->proc)
974 binder_inner_proc_lock(node->proc);
975 else
976 spin_lock(&binder_dead_nodes_lock);
977 binder_inc_node_tmpref_ilocked(node);
978 if (node->proc)
979 binder_inner_proc_unlock(node->proc);
980 else
981 spin_unlock(&binder_dead_nodes_lock);
982 binder_node_unlock(node);
983}
984
985/**
986 * binder_dec_node_tmpref() - remove a temporary reference on node
987 * @node: node to reference
988 *
989 * Release temporary reference on node taken via binder_inc_node_tmpref()
990 */
991static void binder_dec_node_tmpref(struct binder_node *node)
992{
993 bool free_node;
994
995 binder_node_inner_lock(node);
996 if (!node->proc)
997 spin_lock(&binder_dead_nodes_lock);
998 else
999 __acquire(&binder_dead_nodes_lock);
1000 node->tmp_refs--;
1001 BUG_ON(node->tmp_refs < 0);
1002 if (!node->proc)
1003 spin_unlock(&binder_dead_nodes_lock);
1004 else
1005 __release(&binder_dead_nodes_lock);
1006 /*
1007 * Call binder_dec_node() to check if all refcounts are 0
1008 * and cleanup is needed. Calling with strong=0 and internal=1
1009 * causes no actual reference to be released in binder_dec_node().
1010 * If that changes, a change is needed here too.
1011 */
1012 free_node = binder_dec_node_nilocked(node, 0, 1);
1013 binder_node_inner_unlock(node);
1014 if (free_node)
1015 binder_free_node(node);
1016}
1017
1018static void binder_put_node(struct binder_node *node)
1019{
1020 binder_dec_node_tmpref(node);
1021}
1022
1023static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1024 u32 desc, bool need_strong_ref)
1025{
1026 struct rb_node *n = proc->refs_by_desc.rb_node;
1027 struct binder_ref *ref;
1028
1029 while (n) {
1030 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1031
1032 if (desc < ref->data.desc) {
1033 n = n->rb_left;
1034 } else if (desc > ref->data.desc) {
1035 n = n->rb_right;
1036 } else if (need_strong_ref && !ref->data.strong) {
1037 binder_user_error("tried to use weak ref as strong ref\n");
1038 return NULL;
1039 } else {
1040 return ref;
1041 }
1042 }
1043 return NULL;
1044}
1045
1046/* Find the smallest unused descriptor the "slow way" */
1047static u32 slow_desc_lookup_olocked(struct binder_proc *proc)
1048{
1049 struct binder_ref *ref;
1050 struct rb_node *n;
1051 u32 desc;
1052
1053 desc = 1;
1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1056 if (ref->data.desc > desc)
1057 break;
1058 desc = ref->data.desc + 1;
1059 }
1060
1061 return desc;
1062}
1063
1064/*
1065 * Find an available reference descriptor ID. The proc->outer_lock might
1066 * be released in the process, in which case -EAGAIN is returned and the
1067 * @desc should be considered invalid.
1068 */
1069static int get_ref_desc_olocked(struct binder_proc *proc,
1070 struct binder_node *node,
1071 u32 *desc)
1072{
1073 struct dbitmap *dmap = &proc->dmap;
1074 unsigned long *new, bit;
1075 unsigned int nbits;
1076
1077 /* 0 is reserved for the context manager */
1078 if (node == proc->context->binder_context_mgr_node) {
1079 *desc = 0;
1080 return 0;
1081 }
1082
1083 if (!dbitmap_enabled(dmap)) {
1084 *desc = slow_desc_lookup_olocked(proc);
1085 return 0;
1086 }
1087
1088 if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) {
1089 *desc = bit;
1090 return 0;
1091 }
1092
1093 /*
1094 * The dbitmap is full and needs to grow. The proc->outer_lock
1095 * is briefly released to allocate the new bitmap safely.
1096 */
1097 nbits = dbitmap_grow_nbits(dmap);
1098 binder_proc_unlock(proc);
1099 new = bitmap_zalloc(nbits, GFP_KERNEL);
1100 binder_proc_lock(proc);
1101 dbitmap_grow(dmap, new, nbits);
1102
1103 return -EAGAIN;
1104}
1105
1106/**
1107 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1108 * @proc: binder_proc that owns the ref
1109 * @node: binder_node of target
1110 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1111 *
1112 * Look up the ref for the given node and return it if it exists
1113 *
1114 * If it doesn't exist and the caller provides a newly allocated
1115 * ref, initialize the fields of the newly allocated ref and insert
1116 * into the given proc rb_trees and node refs list.
1117 *
1118 * Return: the ref for node. It is possible that another thread
1119 * allocated/initialized the ref first in which case the
1120 * returned ref would be different than the passed-in
1121 * new_ref. new_ref must be kfree'd by the caller in
1122 * this case.
1123 */
1124static struct binder_ref *binder_get_ref_for_node_olocked(
1125 struct binder_proc *proc,
1126 struct binder_node *node,
1127 struct binder_ref *new_ref)
1128{
1129 struct binder_ref *ref;
1130 struct rb_node *parent;
1131 struct rb_node **p;
1132 u32 desc;
1133
1134retry:
1135 p = &proc->refs_by_node.rb_node;
1136 parent = NULL;
1137 while (*p) {
1138 parent = *p;
1139 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1140
1141 if (node < ref->node)
1142 p = &(*p)->rb_left;
1143 else if (node > ref->node)
1144 p = &(*p)->rb_right;
1145 else
1146 return ref;
1147 }
1148 if (!new_ref)
1149 return NULL;
1150
1151 /* might release the proc->outer_lock */
1152 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1153 goto retry;
1154
1155 binder_stats_created(BINDER_STAT_REF);
1156 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1157 new_ref->proc = proc;
1158 new_ref->node = node;
1159 rb_link_node(&new_ref->rb_node_node, parent, p);
1160 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1161
1162 new_ref->data.desc = desc;
1163 p = &proc->refs_by_desc.rb_node;
1164 while (*p) {
1165 parent = *p;
1166 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1167
1168 if (new_ref->data.desc < ref->data.desc)
1169 p = &(*p)->rb_left;
1170 else if (new_ref->data.desc > ref->data.desc)
1171 p = &(*p)->rb_right;
1172 else
1173 BUG();
1174 }
1175 rb_link_node(&new_ref->rb_node_desc, parent, p);
1176 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1177
1178 binder_node_lock(node);
1179 hlist_add_head(&new_ref->node_entry, &node->refs);
1180
1181 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1182 "%d new ref %d desc %d for node %d\n",
1183 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1184 node->debug_id);
1185 binder_node_unlock(node);
1186 return new_ref;
1187}
1188
1189static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1190{
1191 struct dbitmap *dmap = &ref->proc->dmap;
1192 bool delete_node = false;
1193
1194 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1195 "%d delete ref %d desc %d for node %d\n",
1196 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1197 ref->node->debug_id);
1198
1199 if (dbitmap_enabled(dmap))
1200 dbitmap_clear_bit(dmap, ref->data.desc);
1201 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1202 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1203
1204 binder_node_inner_lock(ref->node);
1205 if (ref->data.strong)
1206 binder_dec_node_nilocked(ref->node, 1, 1);
1207
1208 hlist_del(&ref->node_entry);
1209 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1210 binder_node_inner_unlock(ref->node);
1211 /*
1212 * Clear ref->node unless we want the caller to free the node
1213 */
1214 if (!delete_node) {
1215 /*
1216 * The caller uses ref->node to determine
1217 * whether the node needs to be freed. Clear
1218 * it since the node is still alive.
1219 */
1220 ref->node = NULL;
1221 }
1222
1223 if (ref->death) {
1224 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1225 "%d delete ref %d desc %d has death notification\n",
1226 ref->proc->pid, ref->data.debug_id,
1227 ref->data.desc);
1228 binder_dequeue_work(ref->proc, &ref->death->work);
1229 binder_stats_deleted(BINDER_STAT_DEATH);
1230 }
1231 binder_stats_deleted(BINDER_STAT_REF);
1232}
1233
1234/**
1235 * binder_inc_ref_olocked() - increment the ref for given handle
1236 * @ref: ref to be incremented
1237 * @strong: if true, strong increment, else weak
1238 * @target_list: list to queue node work on
1239 *
1240 * Increment the ref. @ref->proc->outer_lock must be held on entry
1241 *
1242 * Return: 0, if successful, else errno
1243 */
1244static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1245 struct list_head *target_list)
1246{
1247 int ret;
1248
1249 if (strong) {
1250 if (ref->data.strong == 0) {
1251 ret = binder_inc_node(ref->node, 1, 1, target_list);
1252 if (ret)
1253 return ret;
1254 }
1255 ref->data.strong++;
1256 } else {
1257 if (ref->data.weak == 0) {
1258 ret = binder_inc_node(ref->node, 0, 1, target_list);
1259 if (ret)
1260 return ret;
1261 }
1262 ref->data.weak++;
1263 }
1264 return 0;
1265}
1266
1267/**
1268 * binder_dec_ref_olocked() - dec the ref for given handle
1269 * @ref: ref to be decremented
1270 * @strong: if true, strong decrement, else weak
1271 *
1272 * Decrement the ref.
1273 *
1274 * Return: %true if ref is cleaned up and ready to be freed.
1275 */
1276static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1277{
1278 if (strong) {
1279 if (ref->data.strong == 0) {
1280 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1281 ref->proc->pid, ref->data.debug_id,
1282 ref->data.desc, ref->data.strong,
1283 ref->data.weak);
1284 return false;
1285 }
1286 ref->data.strong--;
1287 if (ref->data.strong == 0)
1288 binder_dec_node(ref->node, strong, 1);
1289 } else {
1290 if (ref->data.weak == 0) {
1291 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1292 ref->proc->pid, ref->data.debug_id,
1293 ref->data.desc, ref->data.strong,
1294 ref->data.weak);
1295 return false;
1296 }
1297 ref->data.weak--;
1298 }
1299 if (ref->data.strong == 0 && ref->data.weak == 0) {
1300 binder_cleanup_ref_olocked(ref);
1301 return true;
1302 }
1303 return false;
1304}
1305
1306/**
1307 * binder_get_node_from_ref() - get the node from the given proc/desc
1308 * @proc: proc containing the ref
1309 * @desc: the handle associated with the ref
1310 * @need_strong_ref: if true, only return node if ref is strong
1311 * @rdata: the id/refcount data for the ref
1312 *
1313 * Given a proc and ref handle, return the associated binder_node
1314 *
1315 * Return: a binder_node or NULL if not found or not strong when strong required
1316 */
1317static struct binder_node *binder_get_node_from_ref(
1318 struct binder_proc *proc,
1319 u32 desc, bool need_strong_ref,
1320 struct binder_ref_data *rdata)
1321{
1322 struct binder_node *node;
1323 struct binder_ref *ref;
1324
1325 binder_proc_lock(proc);
1326 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1327 if (!ref)
1328 goto err_no_ref;
1329 node = ref->node;
1330 /*
1331 * Take an implicit reference on the node to ensure
1332 * it stays alive until the call to binder_put_node()
1333 */
1334 binder_inc_node_tmpref(node);
1335 if (rdata)
1336 *rdata = ref->data;
1337 binder_proc_unlock(proc);
1338
1339 return node;
1340
1341err_no_ref:
1342 binder_proc_unlock(proc);
1343 return NULL;
1344}
1345
1346/**
1347 * binder_free_ref() - free the binder_ref
1348 * @ref: ref to free
1349 *
1350 * Free the binder_ref. Free the binder_node indicated by ref->node
1351 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1352 */
1353static void binder_free_ref(struct binder_ref *ref)
1354{
1355 if (ref->node)
1356 binder_free_node(ref->node);
1357 kfree(ref->death);
1358 kfree(ref);
1359}
1360
1361/* shrink descriptor bitmap if needed */
1362static void try_shrink_dmap(struct binder_proc *proc)
1363{
1364 unsigned long *new;
1365 int nbits;
1366
1367 binder_proc_lock(proc);
1368 nbits = dbitmap_shrink_nbits(&proc->dmap);
1369 binder_proc_unlock(proc);
1370
1371 if (!nbits)
1372 return;
1373
1374 new = bitmap_zalloc(nbits, GFP_KERNEL);
1375 binder_proc_lock(proc);
1376 dbitmap_shrink(&proc->dmap, new, nbits);
1377 binder_proc_unlock(proc);
1378}
1379
1380/**
1381 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1382 * @proc: proc containing the ref
1383 * @desc: the handle associated with the ref
1384 * @increment: true=inc reference, false=dec reference
1385 * @strong: true=strong reference, false=weak reference
1386 * @rdata: the id/refcount data for the ref
1387 *
1388 * Given a proc and ref handle, increment or decrement the ref
1389 * according to "increment" arg.
1390 *
1391 * Return: 0 if successful, else errno
1392 */
1393static int binder_update_ref_for_handle(struct binder_proc *proc,
1394 uint32_t desc, bool increment, bool strong,
1395 struct binder_ref_data *rdata)
1396{
1397 int ret = 0;
1398 struct binder_ref *ref;
1399 bool delete_ref = false;
1400
1401 binder_proc_lock(proc);
1402 ref = binder_get_ref_olocked(proc, desc, strong);
1403 if (!ref) {
1404 ret = -EINVAL;
1405 goto err_no_ref;
1406 }
1407 if (increment)
1408 ret = binder_inc_ref_olocked(ref, strong, NULL);
1409 else
1410 delete_ref = binder_dec_ref_olocked(ref, strong);
1411
1412 if (rdata)
1413 *rdata = ref->data;
1414 binder_proc_unlock(proc);
1415
1416 if (delete_ref) {
1417 binder_free_ref(ref);
1418 try_shrink_dmap(proc);
1419 }
1420 return ret;
1421
1422err_no_ref:
1423 binder_proc_unlock(proc);
1424 return ret;
1425}
1426
1427/**
1428 * binder_dec_ref_for_handle() - dec the ref for given handle
1429 * @proc: proc containing the ref
1430 * @desc: the handle associated with the ref
1431 * @strong: true=strong reference, false=weak reference
1432 * @rdata: the id/refcount data for the ref
1433 *
1434 * Just calls binder_update_ref_for_handle() to decrement the ref.
1435 *
1436 * Return: 0 if successful, else errno
1437 */
1438static int binder_dec_ref_for_handle(struct binder_proc *proc,
1439 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1440{
1441 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1442}
1443
1444
1445/**
1446 * binder_inc_ref_for_node() - increment the ref for given proc/node
1447 * @proc: proc containing the ref
1448 * @node: target node
1449 * @strong: true=strong reference, false=weak reference
1450 * @target_list: worklist to use if node is incremented
1451 * @rdata: the id/refcount data for the ref
1452 *
1453 * Given a proc and node, increment the ref. Create the ref if it
1454 * doesn't already exist
1455 *
1456 * Return: 0 if successful, else errno
1457 */
1458static int binder_inc_ref_for_node(struct binder_proc *proc,
1459 struct binder_node *node,
1460 bool strong,
1461 struct list_head *target_list,
1462 struct binder_ref_data *rdata)
1463{
1464 struct binder_ref *ref;
1465 struct binder_ref *new_ref = NULL;
1466 int ret = 0;
1467
1468 binder_proc_lock(proc);
1469 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1470 if (!ref) {
1471 binder_proc_unlock(proc);
1472 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1473 if (!new_ref)
1474 return -ENOMEM;
1475 binder_proc_lock(proc);
1476 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1477 }
1478 ret = binder_inc_ref_olocked(ref, strong, target_list);
1479 *rdata = ref->data;
1480 if (ret && ref == new_ref) {
1481 /*
1482 * Cleanup the failed reference here as the target
1483 * could now be dead and have already released its
1484 * references by now. Calling on the new reference
1485 * with strong=0 and a tmp_refs will not decrement
1486 * the node. The new_ref gets kfree'd below.
1487 */
1488 binder_cleanup_ref_olocked(new_ref);
1489 ref = NULL;
1490 }
1491
1492 binder_proc_unlock(proc);
1493 if (new_ref && ref != new_ref)
1494 /*
1495 * Another thread created the ref first so
1496 * free the one we allocated
1497 */
1498 kfree(new_ref);
1499 return ret;
1500}
1501
1502static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1503 struct binder_transaction *t)
1504{
1505 BUG_ON(!target_thread);
1506 assert_spin_locked(&target_thread->proc->inner_lock);
1507 BUG_ON(target_thread->transaction_stack != t);
1508 BUG_ON(target_thread->transaction_stack->from != target_thread);
1509 target_thread->transaction_stack =
1510 target_thread->transaction_stack->from_parent;
1511 t->from = NULL;
1512}
1513
1514/**
1515 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1516 * @thread: thread to decrement
1517 *
1518 * A thread needs to be kept alive while being used to create or
1519 * handle a transaction. binder_get_txn_from() is used to safely
1520 * extract t->from from a binder_transaction and keep the thread
1521 * indicated by t->from from being freed. When done with that
1522 * binder_thread, this function is called to decrement the
1523 * tmp_ref and free if appropriate (thread has been released
1524 * and no transaction being processed by the driver)
1525 */
1526static void binder_thread_dec_tmpref(struct binder_thread *thread)
1527{
1528 /*
1529 * atomic is used to protect the counter value while
1530 * it cannot reach zero or thread->is_dead is false
1531 */
1532 binder_inner_proc_lock(thread->proc);
1533 atomic_dec(&thread->tmp_ref);
1534 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1535 binder_inner_proc_unlock(thread->proc);
1536 binder_free_thread(thread);
1537 return;
1538 }
1539 binder_inner_proc_unlock(thread->proc);
1540}
1541
1542/**
1543 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1544 * @proc: proc to decrement
1545 *
1546 * A binder_proc needs to be kept alive while being used to create or
1547 * handle a transaction. proc->tmp_ref is incremented when
1548 * creating a new transaction or the binder_proc is currently in-use
1549 * by threads that are being released. When done with the binder_proc,
1550 * this function is called to decrement the counter and free the
1551 * proc if appropriate (proc has been released, all threads have
1552 * been released and not currenly in-use to process a transaction).
1553 */
1554static void binder_proc_dec_tmpref(struct binder_proc *proc)
1555{
1556 binder_inner_proc_lock(proc);
1557 proc->tmp_ref--;
1558 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1559 !proc->tmp_ref) {
1560 binder_inner_proc_unlock(proc);
1561 binder_free_proc(proc);
1562 return;
1563 }
1564 binder_inner_proc_unlock(proc);
1565}
1566
1567/**
1568 * binder_get_txn_from() - safely extract the "from" thread in transaction
1569 * @t: binder transaction for t->from
1570 *
1571 * Atomically return the "from" thread and increment the tmp_ref
1572 * count for the thread to ensure it stays alive until
1573 * binder_thread_dec_tmpref() is called.
1574 *
1575 * Return: the value of t->from
1576 */
1577static struct binder_thread *binder_get_txn_from(
1578 struct binder_transaction *t)
1579{
1580 struct binder_thread *from;
1581
1582 spin_lock(&t->lock);
1583 from = t->from;
1584 if (from)
1585 atomic_inc(&from->tmp_ref);
1586 spin_unlock(&t->lock);
1587 return from;
1588}
1589
1590/**
1591 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1592 * @t: binder transaction for t->from
1593 *
1594 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1595 * to guarantee that the thread cannot be released while operating on it.
1596 * The caller must call binder_inner_proc_unlock() to release the inner lock
1597 * as well as call binder_dec_thread_txn() to release the reference.
1598 *
1599 * Return: the value of t->from
1600 */
1601static struct binder_thread *binder_get_txn_from_and_acq_inner(
1602 struct binder_transaction *t)
1603 __acquires(&t->from->proc->inner_lock)
1604{
1605 struct binder_thread *from;
1606
1607 from = binder_get_txn_from(t);
1608 if (!from) {
1609 __acquire(&from->proc->inner_lock);
1610 return NULL;
1611 }
1612 binder_inner_proc_lock(from->proc);
1613 if (t->from) {
1614 BUG_ON(from != t->from);
1615 return from;
1616 }
1617 binder_inner_proc_unlock(from->proc);
1618 __acquire(&from->proc->inner_lock);
1619 binder_thread_dec_tmpref(from);
1620 return NULL;
1621}
1622
1623/**
1624 * binder_free_txn_fixups() - free unprocessed fd fixups
1625 * @t: binder transaction for t->from
1626 *
1627 * If the transaction is being torn down prior to being
1628 * processed by the target process, free all of the
1629 * fd fixups and fput the file structs. It is safe to
1630 * call this function after the fixups have been
1631 * processed -- in that case, the list will be empty.
1632 */
1633static void binder_free_txn_fixups(struct binder_transaction *t)
1634{
1635 struct binder_txn_fd_fixup *fixup, *tmp;
1636
1637 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1638 fput(fixup->file);
1639 if (fixup->target_fd >= 0)
1640 put_unused_fd(fixup->target_fd);
1641 list_del(&fixup->fixup_entry);
1642 kfree(fixup);
1643 }
1644}
1645
1646static void binder_txn_latency_free(struct binder_transaction *t)
1647{
1648 int from_proc, from_thread, to_proc, to_thread;
1649
1650 spin_lock(&t->lock);
1651 from_proc = t->from ? t->from->proc->pid : 0;
1652 from_thread = t->from ? t->from->pid : 0;
1653 to_proc = t->to_proc ? t->to_proc->pid : 0;
1654 to_thread = t->to_thread ? t->to_thread->pid : 0;
1655 spin_unlock(&t->lock);
1656
1657 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1658}
1659
1660static void binder_free_transaction(struct binder_transaction *t)
1661{
1662 struct binder_proc *target_proc = t->to_proc;
1663
1664 if (target_proc) {
1665 binder_inner_proc_lock(target_proc);
1666 target_proc->outstanding_txns--;
1667 if (target_proc->outstanding_txns < 0)
1668 pr_warn("%s: Unexpected outstanding_txns %d\n",
1669 __func__, target_proc->outstanding_txns);
1670 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1671 wake_up_interruptible_all(&target_proc->freeze_wait);
1672 if (t->buffer)
1673 t->buffer->transaction = NULL;
1674 binder_inner_proc_unlock(target_proc);
1675 }
1676 if (trace_binder_txn_latency_free_enabled())
1677 binder_txn_latency_free(t);
1678 /*
1679 * If the transaction has no target_proc, then
1680 * t->buffer->transaction has already been cleared.
1681 */
1682 binder_free_txn_fixups(t);
1683 kfree(t);
1684 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1685}
1686
1687static void binder_send_failed_reply(struct binder_transaction *t,
1688 uint32_t error_code)
1689{
1690 struct binder_thread *target_thread;
1691 struct binder_transaction *next;
1692
1693 BUG_ON(t->flags & TF_ONE_WAY);
1694 while (1) {
1695 target_thread = binder_get_txn_from_and_acq_inner(t);
1696 if (target_thread) {
1697 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1698 "send failed reply for transaction %d to %d:%d\n",
1699 t->debug_id,
1700 target_thread->proc->pid,
1701 target_thread->pid);
1702
1703 binder_pop_transaction_ilocked(target_thread, t);
1704 if (target_thread->reply_error.cmd == BR_OK) {
1705 target_thread->reply_error.cmd = error_code;
1706 binder_enqueue_thread_work_ilocked(
1707 target_thread,
1708 &target_thread->reply_error.work);
1709 wake_up_interruptible(&target_thread->wait);
1710 } else {
1711 /*
1712 * Cannot get here for normal operation, but
1713 * we can if multiple synchronous transactions
1714 * are sent without blocking for responses.
1715 * Just ignore the 2nd error in this case.
1716 */
1717 pr_warn("Unexpected reply error: %u\n",
1718 target_thread->reply_error.cmd);
1719 }
1720 binder_inner_proc_unlock(target_thread->proc);
1721 binder_thread_dec_tmpref(target_thread);
1722 binder_free_transaction(t);
1723 return;
1724 }
1725 __release(&target_thread->proc->inner_lock);
1726 next = t->from_parent;
1727
1728 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1729 "send failed reply for transaction %d, target dead\n",
1730 t->debug_id);
1731
1732 binder_free_transaction(t);
1733 if (next == NULL) {
1734 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1735 "reply failed, no target thread at root\n");
1736 return;
1737 }
1738 t = next;
1739 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1740 "reply failed, no target thread -- retry %d\n",
1741 t->debug_id);
1742 }
1743}
1744
1745/**
1746 * binder_cleanup_transaction() - cleans up undelivered transaction
1747 * @t: transaction that needs to be cleaned up
1748 * @reason: reason the transaction wasn't delivered
1749 * @error_code: error to return to caller (if synchronous call)
1750 */
1751static void binder_cleanup_transaction(struct binder_transaction *t,
1752 const char *reason,
1753 uint32_t error_code)
1754{
1755 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1756 binder_send_failed_reply(t, error_code);
1757 } else {
1758 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1759 "undelivered transaction %d, %s\n",
1760 t->debug_id, reason);
1761 binder_free_transaction(t);
1762 }
1763}
1764
1765/**
1766 * binder_get_object() - gets object and checks for valid metadata
1767 * @proc: binder_proc owning the buffer
1768 * @u: sender's user pointer to base of buffer
1769 * @buffer: binder_buffer that we're parsing.
1770 * @offset: offset in the @buffer at which to validate an object.
1771 * @object: struct binder_object to read into
1772 *
1773 * Copy the binder object at the given offset into @object. If @u is
1774 * provided then the copy is from the sender's buffer. If not, then
1775 * it is copied from the target's @buffer.
1776 *
1777 * Return: If there's a valid metadata object at @offset, the
1778 * size of that object. Otherwise, it returns zero. The object
1779 * is read into the struct binder_object pointed to by @object.
1780 */
1781static size_t binder_get_object(struct binder_proc *proc,
1782 const void __user *u,
1783 struct binder_buffer *buffer,
1784 unsigned long offset,
1785 struct binder_object *object)
1786{
1787 size_t read_size;
1788 struct binder_object_header *hdr;
1789 size_t object_size = 0;
1790
1791 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1792 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1793 !IS_ALIGNED(offset, sizeof(u32)))
1794 return 0;
1795
1796 if (u) {
1797 if (copy_from_user(object, u + offset, read_size))
1798 return 0;
1799 } else {
1800 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1801 offset, read_size))
1802 return 0;
1803 }
1804
1805 /* Ok, now see if we read a complete object. */
1806 hdr = &object->hdr;
1807 switch (hdr->type) {
1808 case BINDER_TYPE_BINDER:
1809 case BINDER_TYPE_WEAK_BINDER:
1810 case BINDER_TYPE_HANDLE:
1811 case BINDER_TYPE_WEAK_HANDLE:
1812 object_size = sizeof(struct flat_binder_object);
1813 break;
1814 case BINDER_TYPE_FD:
1815 object_size = sizeof(struct binder_fd_object);
1816 break;
1817 case BINDER_TYPE_PTR:
1818 object_size = sizeof(struct binder_buffer_object);
1819 break;
1820 case BINDER_TYPE_FDA:
1821 object_size = sizeof(struct binder_fd_array_object);
1822 break;
1823 default:
1824 return 0;
1825 }
1826 if (offset <= buffer->data_size - object_size &&
1827 buffer->data_size >= object_size)
1828 return object_size;
1829 else
1830 return 0;
1831}
1832
1833/**
1834 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1835 * @proc: binder_proc owning the buffer
1836 * @b: binder_buffer containing the object
1837 * @object: struct binder_object to read into
1838 * @index: index in offset array at which the binder_buffer_object is
1839 * located
1840 * @start_offset: points to the start of the offset array
1841 * @object_offsetp: offset of @object read from @b
1842 * @num_valid: the number of valid offsets in the offset array
1843 *
1844 * Return: If @index is within the valid range of the offset array
1845 * described by @start and @num_valid, and if there's a valid
1846 * binder_buffer_object at the offset found in index @index
1847 * of the offset array, that object is returned. Otherwise,
1848 * %NULL is returned.
1849 * Note that the offset found in index @index itself is not
1850 * verified; this function assumes that @num_valid elements
1851 * from @start were previously verified to have valid offsets.
1852 * If @object_offsetp is non-NULL, then the offset within
1853 * @b is written to it.
1854 */
1855static struct binder_buffer_object *binder_validate_ptr(
1856 struct binder_proc *proc,
1857 struct binder_buffer *b,
1858 struct binder_object *object,
1859 binder_size_t index,
1860 binder_size_t start_offset,
1861 binder_size_t *object_offsetp,
1862 binder_size_t num_valid)
1863{
1864 size_t object_size;
1865 binder_size_t object_offset;
1866 unsigned long buffer_offset;
1867
1868 if (index >= num_valid)
1869 return NULL;
1870
1871 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1872 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1873 b, buffer_offset,
1874 sizeof(object_offset)))
1875 return NULL;
1876 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1877 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1878 return NULL;
1879 if (object_offsetp)
1880 *object_offsetp = object_offset;
1881
1882 return &object->bbo;
1883}
1884
1885/**
1886 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1887 * @proc: binder_proc owning the buffer
1888 * @b: transaction buffer
1889 * @objects_start_offset: offset to start of objects buffer
1890 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1891 * @fixup_offset: start offset in @buffer to fix up
1892 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1893 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1894 *
1895 * Return: %true if a fixup in buffer @buffer at offset @offset is
1896 * allowed.
1897 *
1898 * For safety reasons, we only allow fixups inside a buffer to happen
1899 * at increasing offsets; additionally, we only allow fixup on the last
1900 * buffer object that was verified, or one of its parents.
1901 *
1902 * Example of what is allowed:
1903 *
1904 * A
1905 * B (parent = A, offset = 0)
1906 * C (parent = A, offset = 16)
1907 * D (parent = C, offset = 0)
1908 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1909 *
1910 * Examples of what is not allowed:
1911 *
1912 * Decreasing offsets within the same parent:
1913 * A
1914 * C (parent = A, offset = 16)
1915 * B (parent = A, offset = 0) // decreasing offset within A
1916 *
1917 * Referring to a parent that wasn't the last object or any of its parents:
1918 * A
1919 * B (parent = A, offset = 0)
1920 * C (parent = A, offset = 0)
1921 * C (parent = A, offset = 16)
1922 * D (parent = B, offset = 0) // B is not A or any of A's parents
1923 */
1924static bool binder_validate_fixup(struct binder_proc *proc,
1925 struct binder_buffer *b,
1926 binder_size_t objects_start_offset,
1927 binder_size_t buffer_obj_offset,
1928 binder_size_t fixup_offset,
1929 binder_size_t last_obj_offset,
1930 binder_size_t last_min_offset)
1931{
1932 if (!last_obj_offset) {
1933 /* Nothing to fix up in */
1934 return false;
1935 }
1936
1937 while (last_obj_offset != buffer_obj_offset) {
1938 unsigned long buffer_offset;
1939 struct binder_object last_object;
1940 struct binder_buffer_object *last_bbo;
1941 size_t object_size = binder_get_object(proc, NULL, b,
1942 last_obj_offset,
1943 &last_object);
1944 if (object_size != sizeof(*last_bbo))
1945 return false;
1946
1947 last_bbo = &last_object.bbo;
1948 /*
1949 * Safe to retrieve the parent of last_obj, since it
1950 * was already previously verified by the driver.
1951 */
1952 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1953 return false;
1954 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1955 buffer_offset = objects_start_offset +
1956 sizeof(binder_size_t) * last_bbo->parent;
1957 if (binder_alloc_copy_from_buffer(&proc->alloc,
1958 &last_obj_offset,
1959 b, buffer_offset,
1960 sizeof(last_obj_offset)))
1961 return false;
1962 }
1963 return (fixup_offset >= last_min_offset);
1964}
1965
1966/**
1967 * struct binder_task_work_cb - for deferred close
1968 *
1969 * @twork: callback_head for task work
1970 * @fd: fd to close
1971 *
1972 * Structure to pass task work to be handled after
1973 * returning from binder_ioctl() via task_work_add().
1974 */
1975struct binder_task_work_cb {
1976 struct callback_head twork;
1977 struct file *file;
1978};
1979
1980/**
1981 * binder_do_fd_close() - close list of file descriptors
1982 * @twork: callback head for task work
1983 *
1984 * It is not safe to call ksys_close() during the binder_ioctl()
1985 * function if there is a chance that binder's own file descriptor
1986 * might be closed. This is to meet the requirements for using
1987 * fdget() (see comments for __fget_light()). Therefore use
1988 * task_work_add() to schedule the close operation once we have
1989 * returned from binder_ioctl(). This function is a callback
1990 * for that mechanism and does the actual ksys_close() on the
1991 * given file descriptor.
1992 */
1993static void binder_do_fd_close(struct callback_head *twork)
1994{
1995 struct binder_task_work_cb *twcb = container_of(twork,
1996 struct binder_task_work_cb, twork);
1997
1998 fput(twcb->file);
1999 kfree(twcb);
2000}
2001
2002/**
2003 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2004 * @fd: file-descriptor to close
2005 *
2006 * See comments in binder_do_fd_close(). This function is used to schedule
2007 * a file-descriptor to be closed after returning from binder_ioctl().
2008 */
2009static void binder_deferred_fd_close(int fd)
2010{
2011 struct binder_task_work_cb *twcb;
2012
2013 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2014 if (!twcb)
2015 return;
2016 init_task_work(&twcb->twork, binder_do_fd_close);
2017 twcb->file = file_close_fd(fd);
2018 if (twcb->file) {
2019 // pin it until binder_do_fd_close(); see comments there
2020 get_file(twcb->file);
2021 filp_close(twcb->file, current->files);
2022 task_work_add(current, &twcb->twork, TWA_RESUME);
2023 } else {
2024 kfree(twcb);
2025 }
2026}
2027
2028static void binder_transaction_buffer_release(struct binder_proc *proc,
2029 struct binder_thread *thread,
2030 struct binder_buffer *buffer,
2031 binder_size_t off_end_offset,
2032 bool is_failure)
2033{
2034 int debug_id = buffer->debug_id;
2035 binder_size_t off_start_offset, buffer_offset;
2036
2037 binder_debug(BINDER_DEBUG_TRANSACTION,
2038 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2039 proc->pid, buffer->debug_id,
2040 buffer->data_size, buffer->offsets_size,
2041 (unsigned long long)off_end_offset);
2042
2043 if (buffer->target_node)
2044 binder_dec_node(buffer->target_node, 1, 0);
2045
2046 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2047
2048 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2049 buffer_offset += sizeof(binder_size_t)) {
2050 struct binder_object_header *hdr;
2051 size_t object_size = 0;
2052 struct binder_object object;
2053 binder_size_t object_offset;
2054
2055 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2056 buffer, buffer_offset,
2057 sizeof(object_offset)))
2058 object_size = binder_get_object(proc, NULL, buffer,
2059 object_offset, &object);
2060 if (object_size == 0) {
2061 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2062 debug_id, (u64)object_offset, buffer->data_size);
2063 continue;
2064 }
2065 hdr = &object.hdr;
2066 switch (hdr->type) {
2067 case BINDER_TYPE_BINDER:
2068 case BINDER_TYPE_WEAK_BINDER: {
2069 struct flat_binder_object *fp;
2070 struct binder_node *node;
2071
2072 fp = to_flat_binder_object(hdr);
2073 node = binder_get_node(proc, fp->binder);
2074 if (node == NULL) {
2075 pr_err("transaction release %d bad node %016llx\n",
2076 debug_id, (u64)fp->binder);
2077 break;
2078 }
2079 binder_debug(BINDER_DEBUG_TRANSACTION,
2080 " node %d u%016llx\n",
2081 node->debug_id, (u64)node->ptr);
2082 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2083 0);
2084 binder_put_node(node);
2085 } break;
2086 case BINDER_TYPE_HANDLE:
2087 case BINDER_TYPE_WEAK_HANDLE: {
2088 struct flat_binder_object *fp;
2089 struct binder_ref_data rdata;
2090 int ret;
2091
2092 fp = to_flat_binder_object(hdr);
2093 ret = binder_dec_ref_for_handle(proc, fp->handle,
2094 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2095
2096 if (ret) {
2097 pr_err("transaction release %d bad handle %d, ret = %d\n",
2098 debug_id, fp->handle, ret);
2099 break;
2100 }
2101 binder_debug(BINDER_DEBUG_TRANSACTION,
2102 " ref %d desc %d\n",
2103 rdata.debug_id, rdata.desc);
2104 } break;
2105
2106 case BINDER_TYPE_FD: {
2107 /*
2108 * No need to close the file here since user-space
2109 * closes it for successfully delivered
2110 * transactions. For transactions that weren't
2111 * delivered, the new fd was never allocated so
2112 * there is no need to close and the fput on the
2113 * file is done when the transaction is torn
2114 * down.
2115 */
2116 } break;
2117 case BINDER_TYPE_PTR:
2118 /*
2119 * Nothing to do here, this will get cleaned up when the
2120 * transaction buffer gets freed
2121 */
2122 break;
2123 case BINDER_TYPE_FDA: {
2124 struct binder_fd_array_object *fda;
2125 struct binder_buffer_object *parent;
2126 struct binder_object ptr_object;
2127 binder_size_t fda_offset;
2128 size_t fd_index;
2129 binder_size_t fd_buf_size;
2130 binder_size_t num_valid;
2131
2132 if (is_failure) {
2133 /*
2134 * The fd fixups have not been applied so no
2135 * fds need to be closed.
2136 */
2137 continue;
2138 }
2139
2140 num_valid = (buffer_offset - off_start_offset) /
2141 sizeof(binder_size_t);
2142 fda = to_binder_fd_array_object(hdr);
2143 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2144 fda->parent,
2145 off_start_offset,
2146 NULL,
2147 num_valid);
2148 if (!parent) {
2149 pr_err("transaction release %d bad parent offset\n",
2150 debug_id);
2151 continue;
2152 }
2153 fd_buf_size = sizeof(u32) * fda->num_fds;
2154 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2155 pr_err("transaction release %d invalid number of fds (%lld)\n",
2156 debug_id, (u64)fda->num_fds);
2157 continue;
2158 }
2159 if (fd_buf_size > parent->length ||
2160 fda->parent_offset > parent->length - fd_buf_size) {
2161 /* No space for all file descriptors here. */
2162 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2163 debug_id, (u64)fda->num_fds);
2164 continue;
2165 }
2166 /*
2167 * the source data for binder_buffer_object is visible
2168 * to user-space and the @buffer element is the user
2169 * pointer to the buffer_object containing the fd_array.
2170 * Convert the address to an offset relative to
2171 * the base of the transaction buffer.
2172 */
2173 fda_offset = parent->buffer - buffer->user_data +
2174 fda->parent_offset;
2175 for (fd_index = 0; fd_index < fda->num_fds;
2176 fd_index++) {
2177 u32 fd;
2178 int err;
2179 binder_size_t offset = fda_offset +
2180 fd_index * sizeof(fd);
2181
2182 err = binder_alloc_copy_from_buffer(
2183 &proc->alloc, &fd, buffer,
2184 offset, sizeof(fd));
2185 WARN_ON(err);
2186 if (!err) {
2187 binder_deferred_fd_close(fd);
2188 /*
2189 * Need to make sure the thread goes
2190 * back to userspace to complete the
2191 * deferred close
2192 */
2193 if (thread)
2194 thread->looper_need_return = true;
2195 }
2196 }
2197 } break;
2198 default:
2199 pr_err("transaction release %d bad object type %x\n",
2200 debug_id, hdr->type);
2201 break;
2202 }
2203 }
2204}
2205
2206/* Clean up all the objects in the buffer */
2207static inline void binder_release_entire_buffer(struct binder_proc *proc,
2208 struct binder_thread *thread,
2209 struct binder_buffer *buffer,
2210 bool is_failure)
2211{
2212 binder_size_t off_end_offset;
2213
2214 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2215 off_end_offset += buffer->offsets_size;
2216
2217 binder_transaction_buffer_release(proc, thread, buffer,
2218 off_end_offset, is_failure);
2219}
2220
2221static int binder_translate_binder(struct flat_binder_object *fp,
2222 struct binder_transaction *t,
2223 struct binder_thread *thread)
2224{
2225 struct binder_node *node;
2226 struct binder_proc *proc = thread->proc;
2227 struct binder_proc *target_proc = t->to_proc;
2228 struct binder_ref_data rdata;
2229 int ret = 0;
2230
2231 node = binder_get_node(proc, fp->binder);
2232 if (!node) {
2233 node = binder_new_node(proc, fp);
2234 if (!node)
2235 return -ENOMEM;
2236 }
2237 if (fp->cookie != node->cookie) {
2238 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2239 proc->pid, thread->pid, (u64)fp->binder,
2240 node->debug_id, (u64)fp->cookie,
2241 (u64)node->cookie);
2242 ret = -EINVAL;
2243 goto done;
2244 }
2245 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2246 ret = -EPERM;
2247 goto done;
2248 }
2249
2250 ret = binder_inc_ref_for_node(target_proc, node,
2251 fp->hdr.type == BINDER_TYPE_BINDER,
2252 &thread->todo, &rdata);
2253 if (ret)
2254 goto done;
2255
2256 if (fp->hdr.type == BINDER_TYPE_BINDER)
2257 fp->hdr.type = BINDER_TYPE_HANDLE;
2258 else
2259 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2260 fp->binder = 0;
2261 fp->handle = rdata.desc;
2262 fp->cookie = 0;
2263
2264 trace_binder_transaction_node_to_ref(t, node, &rdata);
2265 binder_debug(BINDER_DEBUG_TRANSACTION,
2266 " node %d u%016llx -> ref %d desc %d\n",
2267 node->debug_id, (u64)node->ptr,
2268 rdata.debug_id, rdata.desc);
2269done:
2270 binder_put_node(node);
2271 return ret;
2272}
2273
2274static int binder_translate_handle(struct flat_binder_object *fp,
2275 struct binder_transaction *t,
2276 struct binder_thread *thread)
2277{
2278 struct binder_proc *proc = thread->proc;
2279 struct binder_proc *target_proc = t->to_proc;
2280 struct binder_node *node;
2281 struct binder_ref_data src_rdata;
2282 int ret = 0;
2283
2284 node = binder_get_node_from_ref(proc, fp->handle,
2285 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2286 if (!node) {
2287 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2288 proc->pid, thread->pid, fp->handle);
2289 return -EINVAL;
2290 }
2291 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2292 ret = -EPERM;
2293 goto done;
2294 }
2295
2296 binder_node_lock(node);
2297 if (node->proc == target_proc) {
2298 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2299 fp->hdr.type = BINDER_TYPE_BINDER;
2300 else
2301 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2302 fp->binder = node->ptr;
2303 fp->cookie = node->cookie;
2304 if (node->proc)
2305 binder_inner_proc_lock(node->proc);
2306 else
2307 __acquire(&node->proc->inner_lock);
2308 binder_inc_node_nilocked(node,
2309 fp->hdr.type == BINDER_TYPE_BINDER,
2310 0, NULL);
2311 if (node->proc)
2312 binder_inner_proc_unlock(node->proc);
2313 else
2314 __release(&node->proc->inner_lock);
2315 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2316 binder_debug(BINDER_DEBUG_TRANSACTION,
2317 " ref %d desc %d -> node %d u%016llx\n",
2318 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2319 (u64)node->ptr);
2320 binder_node_unlock(node);
2321 } else {
2322 struct binder_ref_data dest_rdata;
2323
2324 binder_node_unlock(node);
2325 ret = binder_inc_ref_for_node(target_proc, node,
2326 fp->hdr.type == BINDER_TYPE_HANDLE,
2327 NULL, &dest_rdata);
2328 if (ret)
2329 goto done;
2330
2331 fp->binder = 0;
2332 fp->handle = dest_rdata.desc;
2333 fp->cookie = 0;
2334 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2335 &dest_rdata);
2336 binder_debug(BINDER_DEBUG_TRANSACTION,
2337 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2338 src_rdata.debug_id, src_rdata.desc,
2339 dest_rdata.debug_id, dest_rdata.desc,
2340 node->debug_id);
2341 }
2342done:
2343 binder_put_node(node);
2344 return ret;
2345}
2346
2347static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2348 struct binder_transaction *t,
2349 struct binder_thread *thread,
2350 struct binder_transaction *in_reply_to)
2351{
2352 struct binder_proc *proc = thread->proc;
2353 struct binder_proc *target_proc = t->to_proc;
2354 struct binder_txn_fd_fixup *fixup;
2355 struct file *file;
2356 int ret = 0;
2357 bool target_allows_fd;
2358
2359 if (in_reply_to)
2360 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2361 else
2362 target_allows_fd = t->buffer->target_node->accept_fds;
2363 if (!target_allows_fd) {
2364 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2365 proc->pid, thread->pid,
2366 in_reply_to ? "reply" : "transaction",
2367 fd);
2368 ret = -EPERM;
2369 goto err_fd_not_accepted;
2370 }
2371
2372 file = fget(fd);
2373 if (!file) {
2374 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2375 proc->pid, thread->pid, fd);
2376 ret = -EBADF;
2377 goto err_fget;
2378 }
2379 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2380 if (ret < 0) {
2381 ret = -EPERM;
2382 goto err_security;
2383 }
2384
2385 /*
2386 * Add fixup record for this transaction. The allocation
2387 * of the fd in the target needs to be done from a
2388 * target thread.
2389 */
2390 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2391 if (!fixup) {
2392 ret = -ENOMEM;
2393 goto err_alloc;
2394 }
2395 fixup->file = file;
2396 fixup->offset = fd_offset;
2397 fixup->target_fd = -1;
2398 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2399 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2400
2401 return ret;
2402
2403err_alloc:
2404err_security:
2405 fput(file);
2406err_fget:
2407err_fd_not_accepted:
2408 return ret;
2409}
2410
2411/**
2412 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2413 * @offset offset in target buffer to fixup
2414 * @skip_size bytes to skip in copy (fixup will be written later)
2415 * @fixup_data data to write at fixup offset
2416 * @node list node
2417 *
2418 * This is used for the pointer fixup list (pf) which is created and consumed
2419 * during binder_transaction() and is only accessed locally. No
2420 * locking is necessary.
2421 *
2422 * The list is ordered by @offset.
2423 */
2424struct binder_ptr_fixup {
2425 binder_size_t offset;
2426 size_t skip_size;
2427 binder_uintptr_t fixup_data;
2428 struct list_head node;
2429};
2430
2431/**
2432 * struct binder_sg_copy - scatter-gather data to be copied
2433 * @offset offset in target buffer
2434 * @sender_uaddr user address in source buffer
2435 * @length bytes to copy
2436 * @node list node
2437 *
2438 * This is used for the sg copy list (sgc) which is created and consumed
2439 * during binder_transaction() and is only accessed locally. No
2440 * locking is necessary.
2441 *
2442 * The list is ordered by @offset.
2443 */
2444struct binder_sg_copy {
2445 binder_size_t offset;
2446 const void __user *sender_uaddr;
2447 size_t length;
2448 struct list_head node;
2449};
2450
2451/**
2452 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2453 * @alloc: binder_alloc associated with @buffer
2454 * @buffer: binder buffer in target process
2455 * @sgc_head: list_head of scatter-gather copy list
2456 * @pf_head: list_head of pointer fixup list
2457 *
2458 * Processes all elements of @sgc_head, applying fixups from @pf_head
2459 * and copying the scatter-gather data from the source process' user
2460 * buffer to the target's buffer. It is expected that the list creation
2461 * and processing all occurs during binder_transaction() so these lists
2462 * are only accessed in local context.
2463 *
2464 * Return: 0=success, else -errno
2465 */
2466static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2467 struct binder_buffer *buffer,
2468 struct list_head *sgc_head,
2469 struct list_head *pf_head)
2470{
2471 int ret = 0;
2472 struct binder_sg_copy *sgc, *tmpsgc;
2473 struct binder_ptr_fixup *tmppf;
2474 struct binder_ptr_fixup *pf =
2475 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2476 node);
2477
2478 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2479 size_t bytes_copied = 0;
2480
2481 while (bytes_copied < sgc->length) {
2482 size_t copy_size;
2483 size_t bytes_left = sgc->length - bytes_copied;
2484 size_t offset = sgc->offset + bytes_copied;
2485
2486 /*
2487 * We copy up to the fixup (pointed to by pf)
2488 */
2489 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2490 : bytes_left;
2491 if (!ret && copy_size)
2492 ret = binder_alloc_copy_user_to_buffer(
2493 alloc, buffer,
2494 offset,
2495 sgc->sender_uaddr + bytes_copied,
2496 copy_size);
2497 bytes_copied += copy_size;
2498 if (copy_size != bytes_left) {
2499 BUG_ON(!pf);
2500 /* we stopped at a fixup offset */
2501 if (pf->skip_size) {
2502 /*
2503 * we are just skipping. This is for
2504 * BINDER_TYPE_FDA where the translated
2505 * fds will be fixed up when we get
2506 * to target context.
2507 */
2508 bytes_copied += pf->skip_size;
2509 } else {
2510 /* apply the fixup indicated by pf */
2511 if (!ret)
2512 ret = binder_alloc_copy_to_buffer(
2513 alloc, buffer,
2514 pf->offset,
2515 &pf->fixup_data,
2516 sizeof(pf->fixup_data));
2517 bytes_copied += sizeof(pf->fixup_data);
2518 }
2519 list_del(&pf->node);
2520 kfree(pf);
2521 pf = list_first_entry_or_null(pf_head,
2522 struct binder_ptr_fixup, node);
2523 }
2524 }
2525 list_del(&sgc->node);
2526 kfree(sgc);
2527 }
2528 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2529 BUG_ON(pf->skip_size == 0);
2530 list_del(&pf->node);
2531 kfree(pf);
2532 }
2533 BUG_ON(!list_empty(sgc_head));
2534
2535 return ret > 0 ? -EINVAL : ret;
2536}
2537
2538/**
2539 * binder_cleanup_deferred_txn_lists() - free specified lists
2540 * @sgc_head: list_head of scatter-gather copy list
2541 * @pf_head: list_head of pointer fixup list
2542 *
2543 * Called to clean up @sgc_head and @pf_head if there is an
2544 * error.
2545 */
2546static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2547 struct list_head *pf_head)
2548{
2549 struct binder_sg_copy *sgc, *tmpsgc;
2550 struct binder_ptr_fixup *pf, *tmppf;
2551
2552 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2553 list_del(&sgc->node);
2554 kfree(sgc);
2555 }
2556 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2557 list_del(&pf->node);
2558 kfree(pf);
2559 }
2560}
2561
2562/**
2563 * binder_defer_copy() - queue a scatter-gather buffer for copy
2564 * @sgc_head: list_head of scatter-gather copy list
2565 * @offset: binder buffer offset in target process
2566 * @sender_uaddr: user address in source process
2567 * @length: bytes to copy
2568 *
2569 * Specify a scatter-gather block to be copied. The actual copy must
2570 * be deferred until all the needed fixups are identified and queued.
2571 * Then the copy and fixups are done together so un-translated values
2572 * from the source are never visible in the target buffer.
2573 *
2574 * We are guaranteed that repeated calls to this function will have
2575 * monotonically increasing @offset values so the list will naturally
2576 * be ordered.
2577 *
2578 * Return: 0=success, else -errno
2579 */
2580static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2581 const void __user *sender_uaddr, size_t length)
2582{
2583 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2584
2585 if (!bc)
2586 return -ENOMEM;
2587
2588 bc->offset = offset;
2589 bc->sender_uaddr = sender_uaddr;
2590 bc->length = length;
2591 INIT_LIST_HEAD(&bc->node);
2592
2593 /*
2594 * We are guaranteed that the deferred copies are in-order
2595 * so just add to the tail.
2596 */
2597 list_add_tail(&bc->node, sgc_head);
2598
2599 return 0;
2600}
2601
2602/**
2603 * binder_add_fixup() - queue a fixup to be applied to sg copy
2604 * @pf_head: list_head of binder ptr fixup list
2605 * @offset: binder buffer offset in target process
2606 * @fixup: bytes to be copied for fixup
2607 * @skip_size: bytes to skip when copying (fixup will be applied later)
2608 *
2609 * Add the specified fixup to a list ordered by @offset. When copying
2610 * the scatter-gather buffers, the fixup will be copied instead of
2611 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2612 * will be applied later (in target process context), so we just skip
2613 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2614 * value in @fixup.
2615 *
2616 * This function is called *mostly* in @offset order, but there are
2617 * exceptions. Since out-of-order inserts are relatively uncommon,
2618 * we insert the new element by searching backward from the tail of
2619 * the list.
2620 *
2621 * Return: 0=success, else -errno
2622 */
2623static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2624 binder_uintptr_t fixup, size_t skip_size)
2625{
2626 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2627 struct binder_ptr_fixup *tmppf;
2628
2629 if (!pf)
2630 return -ENOMEM;
2631
2632 pf->offset = offset;
2633 pf->fixup_data = fixup;
2634 pf->skip_size = skip_size;
2635 INIT_LIST_HEAD(&pf->node);
2636
2637 /* Fixups are *mostly* added in-order, but there are some
2638 * exceptions. Look backwards through list for insertion point.
2639 */
2640 list_for_each_entry_reverse(tmppf, pf_head, node) {
2641 if (tmppf->offset < pf->offset) {
2642 list_add(&pf->node, &tmppf->node);
2643 return 0;
2644 }
2645 }
2646 /*
2647 * if we get here, then the new offset is the lowest so
2648 * insert at the head
2649 */
2650 list_add(&pf->node, pf_head);
2651 return 0;
2652}
2653
2654static int binder_translate_fd_array(struct list_head *pf_head,
2655 struct binder_fd_array_object *fda,
2656 const void __user *sender_ubuffer,
2657 struct binder_buffer_object *parent,
2658 struct binder_buffer_object *sender_uparent,
2659 struct binder_transaction *t,
2660 struct binder_thread *thread,
2661 struct binder_transaction *in_reply_to)
2662{
2663 binder_size_t fdi, fd_buf_size;
2664 binder_size_t fda_offset;
2665 const void __user *sender_ufda_base;
2666 struct binder_proc *proc = thread->proc;
2667 int ret;
2668
2669 if (fda->num_fds == 0)
2670 return 0;
2671
2672 fd_buf_size = sizeof(u32) * fda->num_fds;
2673 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2674 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2675 proc->pid, thread->pid, (u64)fda->num_fds);
2676 return -EINVAL;
2677 }
2678 if (fd_buf_size > parent->length ||
2679 fda->parent_offset > parent->length - fd_buf_size) {
2680 /* No space for all file descriptors here. */
2681 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2682 proc->pid, thread->pid, (u64)fda->num_fds);
2683 return -EINVAL;
2684 }
2685 /*
2686 * the source data for binder_buffer_object is visible
2687 * to user-space and the @buffer element is the user
2688 * pointer to the buffer_object containing the fd_array.
2689 * Convert the address to an offset relative to
2690 * the base of the transaction buffer.
2691 */
2692 fda_offset = parent->buffer - t->buffer->user_data +
2693 fda->parent_offset;
2694 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2695 fda->parent_offset;
2696
2697 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2698 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2699 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2700 proc->pid, thread->pid);
2701 return -EINVAL;
2702 }
2703 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2704 if (ret)
2705 return ret;
2706
2707 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2708 u32 fd;
2709 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2710 binder_size_t sender_uoffset = fdi * sizeof(fd);
2711
2712 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2713 if (!ret)
2714 ret = binder_translate_fd(fd, offset, t, thread,
2715 in_reply_to);
2716 if (ret)
2717 return ret > 0 ? -EINVAL : ret;
2718 }
2719 return 0;
2720}
2721
2722static int binder_fixup_parent(struct list_head *pf_head,
2723 struct binder_transaction *t,
2724 struct binder_thread *thread,
2725 struct binder_buffer_object *bp,
2726 binder_size_t off_start_offset,
2727 binder_size_t num_valid,
2728 binder_size_t last_fixup_obj_off,
2729 binder_size_t last_fixup_min_off)
2730{
2731 struct binder_buffer_object *parent;
2732 struct binder_buffer *b = t->buffer;
2733 struct binder_proc *proc = thread->proc;
2734 struct binder_proc *target_proc = t->to_proc;
2735 struct binder_object object;
2736 binder_size_t buffer_offset;
2737 binder_size_t parent_offset;
2738
2739 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2740 return 0;
2741
2742 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2743 off_start_offset, &parent_offset,
2744 num_valid);
2745 if (!parent) {
2746 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2747 proc->pid, thread->pid);
2748 return -EINVAL;
2749 }
2750
2751 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2752 parent_offset, bp->parent_offset,
2753 last_fixup_obj_off,
2754 last_fixup_min_off)) {
2755 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2756 proc->pid, thread->pid);
2757 return -EINVAL;
2758 }
2759
2760 if (parent->length < sizeof(binder_uintptr_t) ||
2761 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2762 /* No space for a pointer here! */
2763 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2764 proc->pid, thread->pid);
2765 return -EINVAL;
2766 }
2767
2768 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2769
2770 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2771}
2772
2773/**
2774 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2775 * @t1: the pending async txn in the frozen process
2776 * @t2: the new async txn to supersede the outdated pending one
2777 *
2778 * Return: true if t2 can supersede t1
2779 * false if t2 can not supersede t1
2780 */
2781static bool binder_can_update_transaction(struct binder_transaction *t1,
2782 struct binder_transaction *t2)
2783{
2784 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2785 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2786 return false;
2787 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2788 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2789 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2790 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2791 return true;
2792 return false;
2793}
2794
2795/**
2796 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2797 * @t: new async transaction
2798 * @target_list: list to find outdated transaction
2799 *
2800 * Return: the outdated transaction if found
2801 * NULL if no outdated transacton can be found
2802 *
2803 * Requires the proc->inner_lock to be held.
2804 */
2805static struct binder_transaction *
2806binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2807 struct list_head *target_list)
2808{
2809 struct binder_work *w;
2810
2811 list_for_each_entry(w, target_list, entry) {
2812 struct binder_transaction *t_queued;
2813
2814 if (w->type != BINDER_WORK_TRANSACTION)
2815 continue;
2816 t_queued = container_of(w, struct binder_transaction, work);
2817 if (binder_can_update_transaction(t_queued, t))
2818 return t_queued;
2819 }
2820 return NULL;
2821}
2822
2823/**
2824 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2825 * @t: transaction to send
2826 * @proc: process to send the transaction to
2827 * @thread: thread in @proc to send the transaction to (may be NULL)
2828 *
2829 * This function queues a transaction to the specified process. It will try
2830 * to find a thread in the target process to handle the transaction and
2831 * wake it up. If no thread is found, the work is queued to the proc
2832 * waitqueue.
2833 *
2834 * If the @thread parameter is not NULL, the transaction is always queued
2835 * to the waitlist of that specific thread.
2836 *
2837 * Return: 0 if the transaction was successfully queued
2838 * BR_DEAD_REPLY if the target process or thread is dead
2839 * BR_FROZEN_REPLY if the target process or thread is frozen and
2840 * the sync transaction was rejected
2841 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2842 * and the async transaction was successfully queued
2843 */
2844static int binder_proc_transaction(struct binder_transaction *t,
2845 struct binder_proc *proc,
2846 struct binder_thread *thread)
2847{
2848 struct binder_node *node = t->buffer->target_node;
2849 bool oneway = !!(t->flags & TF_ONE_WAY);
2850 bool pending_async = false;
2851 struct binder_transaction *t_outdated = NULL;
2852 bool frozen = false;
2853
2854 BUG_ON(!node);
2855 binder_node_lock(node);
2856 if (oneway) {
2857 BUG_ON(thread);
2858 if (node->has_async_transaction)
2859 pending_async = true;
2860 else
2861 node->has_async_transaction = true;
2862 }
2863
2864 binder_inner_proc_lock(proc);
2865 if (proc->is_frozen) {
2866 frozen = true;
2867 proc->sync_recv |= !oneway;
2868 proc->async_recv |= oneway;
2869 }
2870
2871 if ((frozen && !oneway) || proc->is_dead ||
2872 (thread && thread->is_dead)) {
2873 binder_inner_proc_unlock(proc);
2874 binder_node_unlock(node);
2875 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2876 }
2877
2878 if (!thread && !pending_async)
2879 thread = binder_select_thread_ilocked(proc);
2880
2881 if (thread) {
2882 binder_enqueue_thread_work_ilocked(thread, &t->work);
2883 } else if (!pending_async) {
2884 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2885 } else {
2886 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2887 t_outdated = binder_find_outdated_transaction_ilocked(t,
2888 &node->async_todo);
2889 if (t_outdated) {
2890 binder_debug(BINDER_DEBUG_TRANSACTION,
2891 "txn %d supersedes %d\n",
2892 t->debug_id, t_outdated->debug_id);
2893 list_del_init(&t_outdated->work.entry);
2894 proc->outstanding_txns--;
2895 }
2896 }
2897 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2898 }
2899
2900 if (!pending_async)
2901 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2902
2903 proc->outstanding_txns++;
2904 binder_inner_proc_unlock(proc);
2905 binder_node_unlock(node);
2906
2907 /*
2908 * To reduce potential contention, free the outdated transaction and
2909 * buffer after releasing the locks.
2910 */
2911 if (t_outdated) {
2912 struct binder_buffer *buffer = t_outdated->buffer;
2913
2914 t_outdated->buffer = NULL;
2915 buffer->transaction = NULL;
2916 trace_binder_transaction_update_buffer_release(buffer);
2917 binder_release_entire_buffer(proc, NULL, buffer, false);
2918 binder_alloc_free_buf(&proc->alloc, buffer);
2919 kfree(t_outdated);
2920 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2921 }
2922
2923 if (oneway && frozen)
2924 return BR_TRANSACTION_PENDING_FROZEN;
2925
2926 return 0;
2927}
2928
2929/**
2930 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2931 * @node: struct binder_node for which to get refs
2932 * @procp: returns @node->proc if valid
2933 * @error: if no @procp then returns BR_DEAD_REPLY
2934 *
2935 * User-space normally keeps the node alive when creating a transaction
2936 * since it has a reference to the target. The local strong ref keeps it
2937 * alive if the sending process dies before the target process processes
2938 * the transaction. If the source process is malicious or has a reference
2939 * counting bug, relying on the local strong ref can fail.
2940 *
2941 * Since user-space can cause the local strong ref to go away, we also take
2942 * a tmpref on the node to ensure it survives while we are constructing
2943 * the transaction. We also need a tmpref on the proc while we are
2944 * constructing the transaction, so we take that here as well.
2945 *
2946 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2947 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2948 * target proc has died, @error is set to BR_DEAD_REPLY.
2949 */
2950static struct binder_node *binder_get_node_refs_for_txn(
2951 struct binder_node *node,
2952 struct binder_proc **procp,
2953 uint32_t *error)
2954{
2955 struct binder_node *target_node = NULL;
2956
2957 binder_node_inner_lock(node);
2958 if (node->proc) {
2959 target_node = node;
2960 binder_inc_node_nilocked(node, 1, 0, NULL);
2961 binder_inc_node_tmpref_ilocked(node);
2962 node->proc->tmp_ref++;
2963 *procp = node->proc;
2964 } else
2965 *error = BR_DEAD_REPLY;
2966 binder_node_inner_unlock(node);
2967
2968 return target_node;
2969}
2970
2971static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2972 uint32_t command, int32_t param)
2973{
2974 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2975
2976 if (!from) {
2977 /* annotation for sparse */
2978 __release(&from->proc->inner_lock);
2979 return;
2980 }
2981
2982 /* don't override existing errors */
2983 if (from->ee.command == BR_OK)
2984 binder_set_extended_error(&from->ee, id, command, param);
2985 binder_inner_proc_unlock(from->proc);
2986 binder_thread_dec_tmpref(from);
2987}
2988
2989static void binder_transaction(struct binder_proc *proc,
2990 struct binder_thread *thread,
2991 struct binder_transaction_data *tr, int reply,
2992 binder_size_t extra_buffers_size)
2993{
2994 int ret;
2995 struct binder_transaction *t;
2996 struct binder_work *w;
2997 struct binder_work *tcomplete;
2998 binder_size_t buffer_offset = 0;
2999 binder_size_t off_start_offset, off_end_offset;
3000 binder_size_t off_min;
3001 binder_size_t sg_buf_offset, sg_buf_end_offset;
3002 binder_size_t user_offset = 0;
3003 struct binder_proc *target_proc = NULL;
3004 struct binder_thread *target_thread = NULL;
3005 struct binder_node *target_node = NULL;
3006 struct binder_transaction *in_reply_to = NULL;
3007 struct binder_transaction_log_entry *e;
3008 uint32_t return_error = 0;
3009 uint32_t return_error_param = 0;
3010 uint32_t return_error_line = 0;
3011 binder_size_t last_fixup_obj_off = 0;
3012 binder_size_t last_fixup_min_off = 0;
3013 struct binder_context *context = proc->context;
3014 int t_debug_id = atomic_inc_return(&binder_last_id);
3015 ktime_t t_start_time = ktime_get();
3016 char *secctx = NULL;
3017 u32 secctx_sz = 0;
3018 struct list_head sgc_head;
3019 struct list_head pf_head;
3020 const void __user *user_buffer = (const void __user *)
3021 (uintptr_t)tr->data.ptr.buffer;
3022 INIT_LIST_HEAD(&sgc_head);
3023 INIT_LIST_HEAD(&pf_head);
3024
3025 e = binder_transaction_log_add(&binder_transaction_log);
3026 e->debug_id = t_debug_id;
3027 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3028 e->from_proc = proc->pid;
3029 e->from_thread = thread->pid;
3030 e->target_handle = tr->target.handle;
3031 e->data_size = tr->data_size;
3032 e->offsets_size = tr->offsets_size;
3033 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3034
3035 binder_inner_proc_lock(proc);
3036 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3037 binder_inner_proc_unlock(proc);
3038
3039 if (reply) {
3040 binder_inner_proc_lock(proc);
3041 in_reply_to = thread->transaction_stack;
3042 if (in_reply_to == NULL) {
3043 binder_inner_proc_unlock(proc);
3044 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3045 proc->pid, thread->pid);
3046 return_error = BR_FAILED_REPLY;
3047 return_error_param = -EPROTO;
3048 return_error_line = __LINE__;
3049 goto err_empty_call_stack;
3050 }
3051 if (in_reply_to->to_thread != thread) {
3052 spin_lock(&in_reply_to->lock);
3053 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3054 proc->pid, thread->pid, in_reply_to->debug_id,
3055 in_reply_to->to_proc ?
3056 in_reply_to->to_proc->pid : 0,
3057 in_reply_to->to_thread ?
3058 in_reply_to->to_thread->pid : 0);
3059 spin_unlock(&in_reply_to->lock);
3060 binder_inner_proc_unlock(proc);
3061 return_error = BR_FAILED_REPLY;
3062 return_error_param = -EPROTO;
3063 return_error_line = __LINE__;
3064 in_reply_to = NULL;
3065 goto err_bad_call_stack;
3066 }
3067 thread->transaction_stack = in_reply_to->to_parent;
3068 binder_inner_proc_unlock(proc);
3069 binder_set_nice(in_reply_to->saved_priority);
3070 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3071 if (target_thread == NULL) {
3072 /* annotation for sparse */
3073 __release(&target_thread->proc->inner_lock);
3074 binder_txn_error("%d:%d reply target not found\n",
3075 thread->pid, proc->pid);
3076 return_error = BR_DEAD_REPLY;
3077 return_error_line = __LINE__;
3078 goto err_dead_binder;
3079 }
3080 if (target_thread->transaction_stack != in_reply_to) {
3081 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3082 proc->pid, thread->pid,
3083 target_thread->transaction_stack ?
3084 target_thread->transaction_stack->debug_id : 0,
3085 in_reply_to->debug_id);
3086 binder_inner_proc_unlock(target_thread->proc);
3087 return_error = BR_FAILED_REPLY;
3088 return_error_param = -EPROTO;
3089 return_error_line = __LINE__;
3090 in_reply_to = NULL;
3091 target_thread = NULL;
3092 goto err_dead_binder;
3093 }
3094 target_proc = target_thread->proc;
3095 target_proc->tmp_ref++;
3096 binder_inner_proc_unlock(target_thread->proc);
3097 } else {
3098 if (tr->target.handle) {
3099 struct binder_ref *ref;
3100
3101 /*
3102 * There must already be a strong ref
3103 * on this node. If so, do a strong
3104 * increment on the node to ensure it
3105 * stays alive until the transaction is
3106 * done.
3107 */
3108 binder_proc_lock(proc);
3109 ref = binder_get_ref_olocked(proc, tr->target.handle,
3110 true);
3111 if (ref) {
3112 target_node = binder_get_node_refs_for_txn(
3113 ref->node, &target_proc,
3114 &return_error);
3115 } else {
3116 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3117 proc->pid, thread->pid, tr->target.handle);
3118 return_error = BR_FAILED_REPLY;
3119 }
3120 binder_proc_unlock(proc);
3121 } else {
3122 mutex_lock(&context->context_mgr_node_lock);
3123 target_node = context->binder_context_mgr_node;
3124 if (target_node)
3125 target_node = binder_get_node_refs_for_txn(
3126 target_node, &target_proc,
3127 &return_error);
3128 else
3129 return_error = BR_DEAD_REPLY;
3130 mutex_unlock(&context->context_mgr_node_lock);
3131 if (target_node && target_proc->pid == proc->pid) {
3132 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3133 proc->pid, thread->pid);
3134 return_error = BR_FAILED_REPLY;
3135 return_error_param = -EINVAL;
3136 return_error_line = __LINE__;
3137 goto err_invalid_target_handle;
3138 }
3139 }
3140 if (!target_node) {
3141 binder_txn_error("%d:%d cannot find target node\n",
3142 thread->pid, proc->pid);
3143 /*
3144 * return_error is set above
3145 */
3146 return_error_param = -EINVAL;
3147 return_error_line = __LINE__;
3148 goto err_dead_binder;
3149 }
3150 e->to_node = target_node->debug_id;
3151 if (WARN_ON(proc == target_proc)) {
3152 binder_txn_error("%d:%d self transactions not allowed\n",
3153 thread->pid, proc->pid);
3154 return_error = BR_FAILED_REPLY;
3155 return_error_param = -EINVAL;
3156 return_error_line = __LINE__;
3157 goto err_invalid_target_handle;
3158 }
3159 if (security_binder_transaction(proc->cred,
3160 target_proc->cred) < 0) {
3161 binder_txn_error("%d:%d transaction credentials failed\n",
3162 thread->pid, proc->pid);
3163 return_error = BR_FAILED_REPLY;
3164 return_error_param = -EPERM;
3165 return_error_line = __LINE__;
3166 goto err_invalid_target_handle;
3167 }
3168 binder_inner_proc_lock(proc);
3169
3170 w = list_first_entry_or_null(&thread->todo,
3171 struct binder_work, entry);
3172 if (!(tr->flags & TF_ONE_WAY) && w &&
3173 w->type == BINDER_WORK_TRANSACTION) {
3174 /*
3175 * Do not allow new outgoing transaction from a
3176 * thread that has a transaction at the head of
3177 * its todo list. Only need to check the head
3178 * because binder_select_thread_ilocked picks a
3179 * thread from proc->waiting_threads to enqueue
3180 * the transaction, and nothing is queued to the
3181 * todo list while the thread is on waiting_threads.
3182 */
3183 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3184 proc->pid, thread->pid);
3185 binder_inner_proc_unlock(proc);
3186 return_error = BR_FAILED_REPLY;
3187 return_error_param = -EPROTO;
3188 return_error_line = __LINE__;
3189 goto err_bad_todo_list;
3190 }
3191
3192 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3193 struct binder_transaction *tmp;
3194
3195 tmp = thread->transaction_stack;
3196 if (tmp->to_thread != thread) {
3197 spin_lock(&tmp->lock);
3198 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3199 proc->pid, thread->pid, tmp->debug_id,
3200 tmp->to_proc ? tmp->to_proc->pid : 0,
3201 tmp->to_thread ?
3202 tmp->to_thread->pid : 0);
3203 spin_unlock(&tmp->lock);
3204 binder_inner_proc_unlock(proc);
3205 return_error = BR_FAILED_REPLY;
3206 return_error_param = -EPROTO;
3207 return_error_line = __LINE__;
3208 goto err_bad_call_stack;
3209 }
3210 while (tmp) {
3211 struct binder_thread *from;
3212
3213 spin_lock(&tmp->lock);
3214 from = tmp->from;
3215 if (from && from->proc == target_proc) {
3216 atomic_inc(&from->tmp_ref);
3217 target_thread = from;
3218 spin_unlock(&tmp->lock);
3219 break;
3220 }
3221 spin_unlock(&tmp->lock);
3222 tmp = tmp->from_parent;
3223 }
3224 }
3225 binder_inner_proc_unlock(proc);
3226 }
3227 if (target_thread)
3228 e->to_thread = target_thread->pid;
3229 e->to_proc = target_proc->pid;
3230
3231 /* TODO: reuse incoming transaction for reply */
3232 t = kzalloc(sizeof(*t), GFP_KERNEL);
3233 if (t == NULL) {
3234 binder_txn_error("%d:%d cannot allocate transaction\n",
3235 thread->pid, proc->pid);
3236 return_error = BR_FAILED_REPLY;
3237 return_error_param = -ENOMEM;
3238 return_error_line = __LINE__;
3239 goto err_alloc_t_failed;
3240 }
3241 INIT_LIST_HEAD(&t->fd_fixups);
3242 binder_stats_created(BINDER_STAT_TRANSACTION);
3243 spin_lock_init(&t->lock);
3244
3245 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3246 if (tcomplete == NULL) {
3247 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3248 thread->pid, proc->pid);
3249 return_error = BR_FAILED_REPLY;
3250 return_error_param = -ENOMEM;
3251 return_error_line = __LINE__;
3252 goto err_alloc_tcomplete_failed;
3253 }
3254 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3255
3256 t->debug_id = t_debug_id;
3257 t->start_time = t_start_time;
3258
3259 if (reply)
3260 binder_debug(BINDER_DEBUG_TRANSACTION,
3261 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3262 proc->pid, thread->pid, t->debug_id,
3263 target_proc->pid, target_thread->pid,
3264 (u64)tr->data.ptr.buffer,
3265 (u64)tr->data.ptr.offsets,
3266 (u64)tr->data_size, (u64)tr->offsets_size,
3267 (u64)extra_buffers_size);
3268 else
3269 binder_debug(BINDER_DEBUG_TRANSACTION,
3270 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3271 proc->pid, thread->pid, t->debug_id,
3272 target_proc->pid, target_node->debug_id,
3273 (u64)tr->data.ptr.buffer,
3274 (u64)tr->data.ptr.offsets,
3275 (u64)tr->data_size, (u64)tr->offsets_size,
3276 (u64)extra_buffers_size);
3277
3278 if (!reply && !(tr->flags & TF_ONE_WAY))
3279 t->from = thread;
3280 else
3281 t->from = NULL;
3282 t->from_pid = proc->pid;
3283 t->from_tid = thread->pid;
3284 t->sender_euid = task_euid(proc->tsk);
3285 t->to_proc = target_proc;
3286 t->to_thread = target_thread;
3287 t->code = tr->code;
3288 t->flags = tr->flags;
3289 t->priority = task_nice(current);
3290
3291 if (target_node && target_node->txn_security_ctx) {
3292 u32 secid;
3293 size_t added_size;
3294
3295 security_cred_getsecid(proc->cred, &secid);
3296 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3297 if (ret) {
3298 binder_txn_error("%d:%d failed to get security context\n",
3299 thread->pid, proc->pid);
3300 return_error = BR_FAILED_REPLY;
3301 return_error_param = ret;
3302 return_error_line = __LINE__;
3303 goto err_get_secctx_failed;
3304 }
3305 added_size = ALIGN(secctx_sz, sizeof(u64));
3306 extra_buffers_size += added_size;
3307 if (extra_buffers_size < added_size) {
3308 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3309 thread->pid, proc->pid);
3310 return_error = BR_FAILED_REPLY;
3311 return_error_param = -EINVAL;
3312 return_error_line = __LINE__;
3313 goto err_bad_extra_size;
3314 }
3315 }
3316
3317 trace_binder_transaction(reply, t, target_node);
3318
3319 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3320 tr->offsets_size, extra_buffers_size,
3321 !reply && (t->flags & TF_ONE_WAY));
3322 if (IS_ERR(t->buffer)) {
3323 char *s;
3324
3325 ret = PTR_ERR(t->buffer);
3326 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3327 : (ret == -ENOSPC) ? ": no space left"
3328 : (ret == -ENOMEM) ? ": memory allocation failed"
3329 : "";
3330 binder_txn_error("cannot allocate buffer%s", s);
3331
3332 return_error_param = PTR_ERR(t->buffer);
3333 return_error = return_error_param == -ESRCH ?
3334 BR_DEAD_REPLY : BR_FAILED_REPLY;
3335 return_error_line = __LINE__;
3336 t->buffer = NULL;
3337 goto err_binder_alloc_buf_failed;
3338 }
3339 if (secctx) {
3340 int err;
3341 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3342 ALIGN(tr->offsets_size, sizeof(void *)) +
3343 ALIGN(extra_buffers_size, sizeof(void *)) -
3344 ALIGN(secctx_sz, sizeof(u64));
3345
3346 t->security_ctx = t->buffer->user_data + buf_offset;
3347 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3348 t->buffer, buf_offset,
3349 secctx, secctx_sz);
3350 if (err) {
3351 t->security_ctx = 0;
3352 WARN_ON(1);
3353 }
3354 security_release_secctx(secctx, secctx_sz);
3355 secctx = NULL;
3356 }
3357 t->buffer->debug_id = t->debug_id;
3358 t->buffer->transaction = t;
3359 t->buffer->target_node = target_node;
3360 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3361 trace_binder_transaction_alloc_buf(t->buffer);
3362
3363 if (binder_alloc_copy_user_to_buffer(
3364 &target_proc->alloc,
3365 t->buffer,
3366 ALIGN(tr->data_size, sizeof(void *)),
3367 (const void __user *)
3368 (uintptr_t)tr->data.ptr.offsets,
3369 tr->offsets_size)) {
3370 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3371 proc->pid, thread->pid);
3372 return_error = BR_FAILED_REPLY;
3373 return_error_param = -EFAULT;
3374 return_error_line = __LINE__;
3375 goto err_copy_data_failed;
3376 }
3377 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3378 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3379 proc->pid, thread->pid, (u64)tr->offsets_size);
3380 return_error = BR_FAILED_REPLY;
3381 return_error_param = -EINVAL;
3382 return_error_line = __LINE__;
3383 goto err_bad_offset;
3384 }
3385 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3386 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3387 proc->pid, thread->pid,
3388 (u64)extra_buffers_size);
3389 return_error = BR_FAILED_REPLY;
3390 return_error_param = -EINVAL;
3391 return_error_line = __LINE__;
3392 goto err_bad_offset;
3393 }
3394 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3395 buffer_offset = off_start_offset;
3396 off_end_offset = off_start_offset + tr->offsets_size;
3397 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3398 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3399 ALIGN(secctx_sz, sizeof(u64));
3400 off_min = 0;
3401 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3402 buffer_offset += sizeof(binder_size_t)) {
3403 struct binder_object_header *hdr;
3404 size_t object_size;
3405 struct binder_object object;
3406 binder_size_t object_offset;
3407 binder_size_t copy_size;
3408
3409 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3410 &object_offset,
3411 t->buffer,
3412 buffer_offset,
3413 sizeof(object_offset))) {
3414 binder_txn_error("%d:%d copy offset from buffer failed\n",
3415 thread->pid, proc->pid);
3416 return_error = BR_FAILED_REPLY;
3417 return_error_param = -EINVAL;
3418 return_error_line = __LINE__;
3419 goto err_bad_offset;
3420 }
3421
3422 /*
3423 * Copy the source user buffer up to the next object
3424 * that will be processed.
3425 */
3426 copy_size = object_offset - user_offset;
3427 if (copy_size && (user_offset > object_offset ||
3428 binder_alloc_copy_user_to_buffer(
3429 &target_proc->alloc,
3430 t->buffer, user_offset,
3431 user_buffer + user_offset,
3432 copy_size))) {
3433 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3434 proc->pid, thread->pid);
3435 return_error = BR_FAILED_REPLY;
3436 return_error_param = -EFAULT;
3437 return_error_line = __LINE__;
3438 goto err_copy_data_failed;
3439 }
3440 object_size = binder_get_object(target_proc, user_buffer,
3441 t->buffer, object_offset, &object);
3442 if (object_size == 0 || object_offset < off_min) {
3443 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3444 proc->pid, thread->pid,
3445 (u64)object_offset,
3446 (u64)off_min,
3447 (u64)t->buffer->data_size);
3448 return_error = BR_FAILED_REPLY;
3449 return_error_param = -EINVAL;
3450 return_error_line = __LINE__;
3451 goto err_bad_offset;
3452 }
3453 /*
3454 * Set offset to the next buffer fragment to be
3455 * copied
3456 */
3457 user_offset = object_offset + object_size;
3458
3459 hdr = &object.hdr;
3460 off_min = object_offset + object_size;
3461 switch (hdr->type) {
3462 case BINDER_TYPE_BINDER:
3463 case BINDER_TYPE_WEAK_BINDER: {
3464 struct flat_binder_object *fp;
3465
3466 fp = to_flat_binder_object(hdr);
3467 ret = binder_translate_binder(fp, t, thread);
3468
3469 if (ret < 0 ||
3470 binder_alloc_copy_to_buffer(&target_proc->alloc,
3471 t->buffer,
3472 object_offset,
3473 fp, sizeof(*fp))) {
3474 binder_txn_error("%d:%d translate binder failed\n",
3475 thread->pid, proc->pid);
3476 return_error = BR_FAILED_REPLY;
3477 return_error_param = ret;
3478 return_error_line = __LINE__;
3479 goto err_translate_failed;
3480 }
3481 } break;
3482 case BINDER_TYPE_HANDLE:
3483 case BINDER_TYPE_WEAK_HANDLE: {
3484 struct flat_binder_object *fp;
3485
3486 fp = to_flat_binder_object(hdr);
3487 ret = binder_translate_handle(fp, t, thread);
3488 if (ret < 0 ||
3489 binder_alloc_copy_to_buffer(&target_proc->alloc,
3490 t->buffer,
3491 object_offset,
3492 fp, sizeof(*fp))) {
3493 binder_txn_error("%d:%d translate handle failed\n",
3494 thread->pid, proc->pid);
3495 return_error = BR_FAILED_REPLY;
3496 return_error_param = ret;
3497 return_error_line = __LINE__;
3498 goto err_translate_failed;
3499 }
3500 } break;
3501
3502 case BINDER_TYPE_FD: {
3503 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3504 binder_size_t fd_offset = object_offset +
3505 (uintptr_t)&fp->fd - (uintptr_t)fp;
3506 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3507 thread, in_reply_to);
3508
3509 fp->pad_binder = 0;
3510 if (ret < 0 ||
3511 binder_alloc_copy_to_buffer(&target_proc->alloc,
3512 t->buffer,
3513 object_offset,
3514 fp, sizeof(*fp))) {
3515 binder_txn_error("%d:%d translate fd failed\n",
3516 thread->pid, proc->pid);
3517 return_error = BR_FAILED_REPLY;
3518 return_error_param = ret;
3519 return_error_line = __LINE__;
3520 goto err_translate_failed;
3521 }
3522 } break;
3523 case BINDER_TYPE_FDA: {
3524 struct binder_object ptr_object;
3525 binder_size_t parent_offset;
3526 struct binder_object user_object;
3527 size_t user_parent_size;
3528 struct binder_fd_array_object *fda =
3529 to_binder_fd_array_object(hdr);
3530 size_t num_valid = (buffer_offset - off_start_offset) /
3531 sizeof(binder_size_t);
3532 struct binder_buffer_object *parent =
3533 binder_validate_ptr(target_proc, t->buffer,
3534 &ptr_object, fda->parent,
3535 off_start_offset,
3536 &parent_offset,
3537 num_valid);
3538 if (!parent) {
3539 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3540 proc->pid, thread->pid);
3541 return_error = BR_FAILED_REPLY;
3542 return_error_param = -EINVAL;
3543 return_error_line = __LINE__;
3544 goto err_bad_parent;
3545 }
3546 if (!binder_validate_fixup(target_proc, t->buffer,
3547 off_start_offset,
3548 parent_offset,
3549 fda->parent_offset,
3550 last_fixup_obj_off,
3551 last_fixup_min_off)) {
3552 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3553 proc->pid, thread->pid);
3554 return_error = BR_FAILED_REPLY;
3555 return_error_param = -EINVAL;
3556 return_error_line = __LINE__;
3557 goto err_bad_parent;
3558 }
3559 /*
3560 * We need to read the user version of the parent
3561 * object to get the original user offset
3562 */
3563 user_parent_size =
3564 binder_get_object(proc, user_buffer, t->buffer,
3565 parent_offset, &user_object);
3566 if (user_parent_size != sizeof(user_object.bbo)) {
3567 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3568 proc->pid, thread->pid,
3569 user_parent_size,
3570 sizeof(user_object.bbo));
3571 return_error = BR_FAILED_REPLY;
3572 return_error_param = -EINVAL;
3573 return_error_line = __LINE__;
3574 goto err_bad_parent;
3575 }
3576 ret = binder_translate_fd_array(&pf_head, fda,
3577 user_buffer, parent,
3578 &user_object.bbo, t,
3579 thread, in_reply_to);
3580 if (!ret)
3581 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3582 t->buffer,
3583 object_offset,
3584 fda, sizeof(*fda));
3585 if (ret) {
3586 binder_txn_error("%d:%d translate fd array failed\n",
3587 thread->pid, proc->pid);
3588 return_error = BR_FAILED_REPLY;
3589 return_error_param = ret > 0 ? -EINVAL : ret;
3590 return_error_line = __LINE__;
3591 goto err_translate_failed;
3592 }
3593 last_fixup_obj_off = parent_offset;
3594 last_fixup_min_off =
3595 fda->parent_offset + sizeof(u32) * fda->num_fds;
3596 } break;
3597 case BINDER_TYPE_PTR: {
3598 struct binder_buffer_object *bp =
3599 to_binder_buffer_object(hdr);
3600 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3601 size_t num_valid;
3602
3603 if (bp->length > buf_left) {
3604 binder_user_error("%d:%d got transaction with too large buffer\n",
3605 proc->pid, thread->pid);
3606 return_error = BR_FAILED_REPLY;
3607 return_error_param = -EINVAL;
3608 return_error_line = __LINE__;
3609 goto err_bad_offset;
3610 }
3611 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3612 (const void __user *)(uintptr_t)bp->buffer,
3613 bp->length);
3614 if (ret) {
3615 binder_txn_error("%d:%d deferred copy failed\n",
3616 thread->pid, proc->pid);
3617 return_error = BR_FAILED_REPLY;
3618 return_error_param = ret;
3619 return_error_line = __LINE__;
3620 goto err_translate_failed;
3621 }
3622 /* Fixup buffer pointer to target proc address space */
3623 bp->buffer = t->buffer->user_data + sg_buf_offset;
3624 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3625
3626 num_valid = (buffer_offset - off_start_offset) /
3627 sizeof(binder_size_t);
3628 ret = binder_fixup_parent(&pf_head, t,
3629 thread, bp,
3630 off_start_offset,
3631 num_valid,
3632 last_fixup_obj_off,
3633 last_fixup_min_off);
3634 if (ret < 0 ||
3635 binder_alloc_copy_to_buffer(&target_proc->alloc,
3636 t->buffer,
3637 object_offset,
3638 bp, sizeof(*bp))) {
3639 binder_txn_error("%d:%d failed to fixup parent\n",
3640 thread->pid, proc->pid);
3641 return_error = BR_FAILED_REPLY;
3642 return_error_param = ret;
3643 return_error_line = __LINE__;
3644 goto err_translate_failed;
3645 }
3646 last_fixup_obj_off = object_offset;
3647 last_fixup_min_off = 0;
3648 } break;
3649 default:
3650 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3651 proc->pid, thread->pid, hdr->type);
3652 return_error = BR_FAILED_REPLY;
3653 return_error_param = -EINVAL;
3654 return_error_line = __LINE__;
3655 goto err_bad_object_type;
3656 }
3657 }
3658 /* Done processing objects, copy the rest of the buffer */
3659 if (binder_alloc_copy_user_to_buffer(
3660 &target_proc->alloc,
3661 t->buffer, user_offset,
3662 user_buffer + user_offset,
3663 tr->data_size - user_offset)) {
3664 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3665 proc->pid, thread->pid);
3666 return_error = BR_FAILED_REPLY;
3667 return_error_param = -EFAULT;
3668 return_error_line = __LINE__;
3669 goto err_copy_data_failed;
3670 }
3671
3672 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3673 &sgc_head, &pf_head);
3674 if (ret) {
3675 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3676 proc->pid, thread->pid);
3677 return_error = BR_FAILED_REPLY;
3678 return_error_param = ret;
3679 return_error_line = __LINE__;
3680 goto err_copy_data_failed;
3681 }
3682 if (t->buffer->oneway_spam_suspect)
3683 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3684 else
3685 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3686 t->work.type = BINDER_WORK_TRANSACTION;
3687
3688 if (reply) {
3689 binder_enqueue_thread_work(thread, tcomplete);
3690 binder_inner_proc_lock(target_proc);
3691 if (target_thread->is_dead) {
3692 return_error = BR_DEAD_REPLY;
3693 binder_inner_proc_unlock(target_proc);
3694 goto err_dead_proc_or_thread;
3695 }
3696 BUG_ON(t->buffer->async_transaction != 0);
3697 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3698 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3699 target_proc->outstanding_txns++;
3700 binder_inner_proc_unlock(target_proc);
3701 wake_up_interruptible_sync(&target_thread->wait);
3702 binder_free_transaction(in_reply_to);
3703 } else if (!(t->flags & TF_ONE_WAY)) {
3704 BUG_ON(t->buffer->async_transaction != 0);
3705 binder_inner_proc_lock(proc);
3706 /*
3707 * Defer the TRANSACTION_COMPLETE, so we don't return to
3708 * userspace immediately; this allows the target process to
3709 * immediately start processing this transaction, reducing
3710 * latency. We will then return the TRANSACTION_COMPLETE when
3711 * the target replies (or there is an error).
3712 */
3713 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3714 t->need_reply = 1;
3715 t->from_parent = thread->transaction_stack;
3716 thread->transaction_stack = t;
3717 binder_inner_proc_unlock(proc);
3718 return_error = binder_proc_transaction(t,
3719 target_proc, target_thread);
3720 if (return_error) {
3721 binder_inner_proc_lock(proc);
3722 binder_pop_transaction_ilocked(thread, t);
3723 binder_inner_proc_unlock(proc);
3724 goto err_dead_proc_or_thread;
3725 }
3726 } else {
3727 BUG_ON(target_node == NULL);
3728 BUG_ON(t->buffer->async_transaction != 1);
3729 return_error = binder_proc_transaction(t, target_proc, NULL);
3730 /*
3731 * Let the caller know when async transaction reaches a frozen
3732 * process and is put in a pending queue, waiting for the target
3733 * process to be unfrozen.
3734 */
3735 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3736 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3737 binder_enqueue_thread_work(thread, tcomplete);
3738 if (return_error &&
3739 return_error != BR_TRANSACTION_PENDING_FROZEN)
3740 goto err_dead_proc_or_thread;
3741 }
3742 if (target_thread)
3743 binder_thread_dec_tmpref(target_thread);
3744 binder_proc_dec_tmpref(target_proc);
3745 if (target_node)
3746 binder_dec_node_tmpref(target_node);
3747 /*
3748 * write barrier to synchronize with initialization
3749 * of log entry
3750 */
3751 smp_wmb();
3752 WRITE_ONCE(e->debug_id_done, t_debug_id);
3753 return;
3754
3755err_dead_proc_or_thread:
3756 binder_txn_error("%d:%d dead process or thread\n",
3757 thread->pid, proc->pid);
3758 return_error_line = __LINE__;
3759 binder_dequeue_work(proc, tcomplete);
3760err_translate_failed:
3761err_bad_object_type:
3762err_bad_offset:
3763err_bad_parent:
3764err_copy_data_failed:
3765 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3766 binder_free_txn_fixups(t);
3767 trace_binder_transaction_failed_buffer_release(t->buffer);
3768 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3769 buffer_offset, true);
3770 if (target_node)
3771 binder_dec_node_tmpref(target_node);
3772 target_node = NULL;
3773 t->buffer->transaction = NULL;
3774 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3775err_binder_alloc_buf_failed:
3776err_bad_extra_size:
3777 if (secctx)
3778 security_release_secctx(secctx, secctx_sz);
3779err_get_secctx_failed:
3780 kfree(tcomplete);
3781 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3782err_alloc_tcomplete_failed:
3783 if (trace_binder_txn_latency_free_enabled())
3784 binder_txn_latency_free(t);
3785 kfree(t);
3786 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3787err_alloc_t_failed:
3788err_bad_todo_list:
3789err_bad_call_stack:
3790err_empty_call_stack:
3791err_dead_binder:
3792err_invalid_target_handle:
3793 if (target_node) {
3794 binder_dec_node(target_node, 1, 0);
3795 binder_dec_node_tmpref(target_node);
3796 }
3797
3798 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3799 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3800 proc->pid, thread->pid, reply ? "reply" :
3801 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3802 target_proc ? target_proc->pid : 0,
3803 target_thread ? target_thread->pid : 0,
3804 t_debug_id, return_error, return_error_param,
3805 (u64)tr->data_size, (u64)tr->offsets_size,
3806 return_error_line);
3807
3808 if (target_thread)
3809 binder_thread_dec_tmpref(target_thread);
3810 if (target_proc)
3811 binder_proc_dec_tmpref(target_proc);
3812
3813 {
3814 struct binder_transaction_log_entry *fe;
3815
3816 e->return_error = return_error;
3817 e->return_error_param = return_error_param;
3818 e->return_error_line = return_error_line;
3819 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3820 *fe = *e;
3821 /*
3822 * write barrier to synchronize with initialization
3823 * of log entry
3824 */
3825 smp_wmb();
3826 WRITE_ONCE(e->debug_id_done, t_debug_id);
3827 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3828 }
3829
3830 BUG_ON(thread->return_error.cmd != BR_OK);
3831 if (in_reply_to) {
3832 binder_set_txn_from_error(in_reply_to, t_debug_id,
3833 return_error, return_error_param);
3834 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3835 binder_enqueue_thread_work(thread, &thread->return_error.work);
3836 binder_send_failed_reply(in_reply_to, return_error);
3837 } else {
3838 binder_inner_proc_lock(proc);
3839 binder_set_extended_error(&thread->ee, t_debug_id,
3840 return_error, return_error_param);
3841 binder_inner_proc_unlock(proc);
3842 thread->return_error.cmd = return_error;
3843 binder_enqueue_thread_work(thread, &thread->return_error.work);
3844 }
3845}
3846
3847/**
3848 * binder_free_buf() - free the specified buffer
3849 * @proc: binder proc that owns buffer
3850 * @buffer: buffer to be freed
3851 * @is_failure: failed to send transaction
3852 *
3853 * If buffer for an async transaction, enqueue the next async
3854 * transaction from the node.
3855 *
3856 * Cleanup buffer and free it.
3857 */
3858static void
3859binder_free_buf(struct binder_proc *proc,
3860 struct binder_thread *thread,
3861 struct binder_buffer *buffer, bool is_failure)
3862{
3863 binder_inner_proc_lock(proc);
3864 if (buffer->transaction) {
3865 buffer->transaction->buffer = NULL;
3866 buffer->transaction = NULL;
3867 }
3868 binder_inner_proc_unlock(proc);
3869 if (buffer->async_transaction && buffer->target_node) {
3870 struct binder_node *buf_node;
3871 struct binder_work *w;
3872
3873 buf_node = buffer->target_node;
3874 binder_node_inner_lock(buf_node);
3875 BUG_ON(!buf_node->has_async_transaction);
3876 BUG_ON(buf_node->proc != proc);
3877 w = binder_dequeue_work_head_ilocked(
3878 &buf_node->async_todo);
3879 if (!w) {
3880 buf_node->has_async_transaction = false;
3881 } else {
3882 binder_enqueue_work_ilocked(
3883 w, &proc->todo);
3884 binder_wakeup_proc_ilocked(proc);
3885 }
3886 binder_node_inner_unlock(buf_node);
3887 }
3888 trace_binder_transaction_buffer_release(buffer);
3889 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3890 binder_alloc_free_buf(&proc->alloc, buffer);
3891}
3892
3893static int binder_thread_write(struct binder_proc *proc,
3894 struct binder_thread *thread,
3895 binder_uintptr_t binder_buffer, size_t size,
3896 binder_size_t *consumed)
3897{
3898 uint32_t cmd;
3899 struct binder_context *context = proc->context;
3900 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3901 void __user *ptr = buffer + *consumed;
3902 void __user *end = buffer + size;
3903
3904 while (ptr < end && thread->return_error.cmd == BR_OK) {
3905 int ret;
3906
3907 if (get_user(cmd, (uint32_t __user *)ptr))
3908 return -EFAULT;
3909 ptr += sizeof(uint32_t);
3910 trace_binder_command(cmd);
3911 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3912 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3913 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3914 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3915 }
3916 switch (cmd) {
3917 case BC_INCREFS:
3918 case BC_ACQUIRE:
3919 case BC_RELEASE:
3920 case BC_DECREFS: {
3921 uint32_t target;
3922 const char *debug_string;
3923 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3924 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3925 struct binder_ref_data rdata;
3926
3927 if (get_user(target, (uint32_t __user *)ptr))
3928 return -EFAULT;
3929
3930 ptr += sizeof(uint32_t);
3931 ret = -1;
3932 if (increment && !target) {
3933 struct binder_node *ctx_mgr_node;
3934
3935 mutex_lock(&context->context_mgr_node_lock);
3936 ctx_mgr_node = context->binder_context_mgr_node;
3937 if (ctx_mgr_node) {
3938 if (ctx_mgr_node->proc == proc) {
3939 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3940 proc->pid, thread->pid);
3941 mutex_unlock(&context->context_mgr_node_lock);
3942 return -EINVAL;
3943 }
3944 ret = binder_inc_ref_for_node(
3945 proc, ctx_mgr_node,
3946 strong, NULL, &rdata);
3947 }
3948 mutex_unlock(&context->context_mgr_node_lock);
3949 }
3950 if (ret)
3951 ret = binder_update_ref_for_handle(
3952 proc, target, increment, strong,
3953 &rdata);
3954 if (!ret && rdata.desc != target) {
3955 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3956 proc->pid, thread->pid,
3957 target, rdata.desc);
3958 }
3959 switch (cmd) {
3960 case BC_INCREFS:
3961 debug_string = "IncRefs";
3962 break;
3963 case BC_ACQUIRE:
3964 debug_string = "Acquire";
3965 break;
3966 case BC_RELEASE:
3967 debug_string = "Release";
3968 break;
3969 case BC_DECREFS:
3970 default:
3971 debug_string = "DecRefs";
3972 break;
3973 }
3974 if (ret) {
3975 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3976 proc->pid, thread->pid, debug_string,
3977 strong, target, ret);
3978 break;
3979 }
3980 binder_debug(BINDER_DEBUG_USER_REFS,
3981 "%d:%d %s ref %d desc %d s %d w %d\n",
3982 proc->pid, thread->pid, debug_string,
3983 rdata.debug_id, rdata.desc, rdata.strong,
3984 rdata.weak);
3985 break;
3986 }
3987 case BC_INCREFS_DONE:
3988 case BC_ACQUIRE_DONE: {
3989 binder_uintptr_t node_ptr;
3990 binder_uintptr_t cookie;
3991 struct binder_node *node;
3992 bool free_node;
3993
3994 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3995 return -EFAULT;
3996 ptr += sizeof(binder_uintptr_t);
3997 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3998 return -EFAULT;
3999 ptr += sizeof(binder_uintptr_t);
4000 node = binder_get_node(proc, node_ptr);
4001 if (node == NULL) {
4002 binder_user_error("%d:%d %s u%016llx no match\n",
4003 proc->pid, thread->pid,
4004 cmd == BC_INCREFS_DONE ?
4005 "BC_INCREFS_DONE" :
4006 "BC_ACQUIRE_DONE",
4007 (u64)node_ptr);
4008 break;
4009 }
4010 if (cookie != node->cookie) {
4011 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4012 proc->pid, thread->pid,
4013 cmd == BC_INCREFS_DONE ?
4014 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4015 (u64)node_ptr, node->debug_id,
4016 (u64)cookie, (u64)node->cookie);
4017 binder_put_node(node);
4018 break;
4019 }
4020 binder_node_inner_lock(node);
4021 if (cmd == BC_ACQUIRE_DONE) {
4022 if (node->pending_strong_ref == 0) {
4023 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4024 proc->pid, thread->pid,
4025 node->debug_id);
4026 binder_node_inner_unlock(node);
4027 binder_put_node(node);
4028 break;
4029 }
4030 node->pending_strong_ref = 0;
4031 } else {
4032 if (node->pending_weak_ref == 0) {
4033 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4034 proc->pid, thread->pid,
4035 node->debug_id);
4036 binder_node_inner_unlock(node);
4037 binder_put_node(node);
4038 break;
4039 }
4040 node->pending_weak_ref = 0;
4041 }
4042 free_node = binder_dec_node_nilocked(node,
4043 cmd == BC_ACQUIRE_DONE, 0);
4044 WARN_ON(free_node);
4045 binder_debug(BINDER_DEBUG_USER_REFS,
4046 "%d:%d %s node %d ls %d lw %d tr %d\n",
4047 proc->pid, thread->pid,
4048 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4049 node->debug_id, node->local_strong_refs,
4050 node->local_weak_refs, node->tmp_refs);
4051 binder_node_inner_unlock(node);
4052 binder_put_node(node);
4053 break;
4054 }
4055 case BC_ATTEMPT_ACQUIRE:
4056 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4057 return -EINVAL;
4058 case BC_ACQUIRE_RESULT:
4059 pr_err("BC_ACQUIRE_RESULT not supported\n");
4060 return -EINVAL;
4061
4062 case BC_FREE_BUFFER: {
4063 binder_uintptr_t data_ptr;
4064 struct binder_buffer *buffer;
4065
4066 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4067 return -EFAULT;
4068 ptr += sizeof(binder_uintptr_t);
4069
4070 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4071 data_ptr);
4072 if (IS_ERR_OR_NULL(buffer)) {
4073 if (PTR_ERR(buffer) == -EPERM) {
4074 binder_user_error(
4075 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4076 proc->pid, thread->pid,
4077 (u64)data_ptr);
4078 } else {
4079 binder_user_error(
4080 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4081 proc->pid, thread->pid,
4082 (u64)data_ptr);
4083 }
4084 break;
4085 }
4086 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4087 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4088 proc->pid, thread->pid, (u64)data_ptr,
4089 buffer->debug_id,
4090 buffer->transaction ? "active" : "finished");
4091 binder_free_buf(proc, thread, buffer, false);
4092 break;
4093 }
4094
4095 case BC_TRANSACTION_SG:
4096 case BC_REPLY_SG: {
4097 struct binder_transaction_data_sg tr;
4098
4099 if (copy_from_user(&tr, ptr, sizeof(tr)))
4100 return -EFAULT;
4101 ptr += sizeof(tr);
4102 binder_transaction(proc, thread, &tr.transaction_data,
4103 cmd == BC_REPLY_SG, tr.buffers_size);
4104 break;
4105 }
4106 case BC_TRANSACTION:
4107 case BC_REPLY: {
4108 struct binder_transaction_data tr;
4109
4110 if (copy_from_user(&tr, ptr, sizeof(tr)))
4111 return -EFAULT;
4112 ptr += sizeof(tr);
4113 binder_transaction(proc, thread, &tr,
4114 cmd == BC_REPLY, 0);
4115 break;
4116 }
4117
4118 case BC_REGISTER_LOOPER:
4119 binder_debug(BINDER_DEBUG_THREADS,
4120 "%d:%d BC_REGISTER_LOOPER\n",
4121 proc->pid, thread->pid);
4122 binder_inner_proc_lock(proc);
4123 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4124 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4125 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4126 proc->pid, thread->pid);
4127 } else if (proc->requested_threads == 0) {
4128 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4129 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4130 proc->pid, thread->pid);
4131 } else {
4132 proc->requested_threads--;
4133 proc->requested_threads_started++;
4134 }
4135 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4136 binder_inner_proc_unlock(proc);
4137 break;
4138 case BC_ENTER_LOOPER:
4139 binder_debug(BINDER_DEBUG_THREADS,
4140 "%d:%d BC_ENTER_LOOPER\n",
4141 proc->pid, thread->pid);
4142 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4143 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4144 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4145 proc->pid, thread->pid);
4146 }
4147 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4148 break;
4149 case BC_EXIT_LOOPER:
4150 binder_debug(BINDER_DEBUG_THREADS,
4151 "%d:%d BC_EXIT_LOOPER\n",
4152 proc->pid, thread->pid);
4153 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4154 break;
4155
4156 case BC_REQUEST_DEATH_NOTIFICATION:
4157 case BC_CLEAR_DEATH_NOTIFICATION: {
4158 uint32_t target;
4159 binder_uintptr_t cookie;
4160 struct binder_ref *ref;
4161 struct binder_ref_death *death = NULL;
4162
4163 if (get_user(target, (uint32_t __user *)ptr))
4164 return -EFAULT;
4165 ptr += sizeof(uint32_t);
4166 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4167 return -EFAULT;
4168 ptr += sizeof(binder_uintptr_t);
4169 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4170 /*
4171 * Allocate memory for death notification
4172 * before taking lock
4173 */
4174 death = kzalloc(sizeof(*death), GFP_KERNEL);
4175 if (death == NULL) {
4176 WARN_ON(thread->return_error.cmd !=
4177 BR_OK);
4178 thread->return_error.cmd = BR_ERROR;
4179 binder_enqueue_thread_work(
4180 thread,
4181 &thread->return_error.work);
4182 binder_debug(
4183 BINDER_DEBUG_FAILED_TRANSACTION,
4184 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4185 proc->pid, thread->pid);
4186 break;
4187 }
4188 }
4189 binder_proc_lock(proc);
4190 ref = binder_get_ref_olocked(proc, target, false);
4191 if (ref == NULL) {
4192 binder_user_error("%d:%d %s invalid ref %d\n",
4193 proc->pid, thread->pid,
4194 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4195 "BC_REQUEST_DEATH_NOTIFICATION" :
4196 "BC_CLEAR_DEATH_NOTIFICATION",
4197 target);
4198 binder_proc_unlock(proc);
4199 kfree(death);
4200 break;
4201 }
4202
4203 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4204 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4205 proc->pid, thread->pid,
4206 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4207 "BC_REQUEST_DEATH_NOTIFICATION" :
4208 "BC_CLEAR_DEATH_NOTIFICATION",
4209 (u64)cookie, ref->data.debug_id,
4210 ref->data.desc, ref->data.strong,
4211 ref->data.weak, ref->node->debug_id);
4212
4213 binder_node_lock(ref->node);
4214 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4215 if (ref->death) {
4216 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4217 proc->pid, thread->pid);
4218 binder_node_unlock(ref->node);
4219 binder_proc_unlock(proc);
4220 kfree(death);
4221 break;
4222 }
4223 binder_stats_created(BINDER_STAT_DEATH);
4224 INIT_LIST_HEAD(&death->work.entry);
4225 death->cookie = cookie;
4226 ref->death = death;
4227 if (ref->node->proc == NULL) {
4228 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4229
4230 binder_inner_proc_lock(proc);
4231 binder_enqueue_work_ilocked(
4232 &ref->death->work, &proc->todo);
4233 binder_wakeup_proc_ilocked(proc);
4234 binder_inner_proc_unlock(proc);
4235 }
4236 } else {
4237 if (ref->death == NULL) {
4238 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4239 proc->pid, thread->pid);
4240 binder_node_unlock(ref->node);
4241 binder_proc_unlock(proc);
4242 break;
4243 }
4244 death = ref->death;
4245 if (death->cookie != cookie) {
4246 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4247 proc->pid, thread->pid,
4248 (u64)death->cookie,
4249 (u64)cookie);
4250 binder_node_unlock(ref->node);
4251 binder_proc_unlock(proc);
4252 break;
4253 }
4254 ref->death = NULL;
4255 binder_inner_proc_lock(proc);
4256 if (list_empty(&death->work.entry)) {
4257 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4258 if (thread->looper &
4259 (BINDER_LOOPER_STATE_REGISTERED |
4260 BINDER_LOOPER_STATE_ENTERED))
4261 binder_enqueue_thread_work_ilocked(
4262 thread,
4263 &death->work);
4264 else {
4265 binder_enqueue_work_ilocked(
4266 &death->work,
4267 &proc->todo);
4268 binder_wakeup_proc_ilocked(
4269 proc);
4270 }
4271 } else {
4272 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4273 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4274 }
4275 binder_inner_proc_unlock(proc);
4276 }
4277 binder_node_unlock(ref->node);
4278 binder_proc_unlock(proc);
4279 } break;
4280 case BC_DEAD_BINDER_DONE: {
4281 struct binder_work *w;
4282 binder_uintptr_t cookie;
4283 struct binder_ref_death *death = NULL;
4284
4285 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4286 return -EFAULT;
4287
4288 ptr += sizeof(cookie);
4289 binder_inner_proc_lock(proc);
4290 list_for_each_entry(w, &proc->delivered_death,
4291 entry) {
4292 struct binder_ref_death *tmp_death =
4293 container_of(w,
4294 struct binder_ref_death,
4295 work);
4296
4297 if (tmp_death->cookie == cookie) {
4298 death = tmp_death;
4299 break;
4300 }
4301 }
4302 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4303 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4304 proc->pid, thread->pid, (u64)cookie,
4305 death);
4306 if (death == NULL) {
4307 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4308 proc->pid, thread->pid, (u64)cookie);
4309 binder_inner_proc_unlock(proc);
4310 break;
4311 }
4312 binder_dequeue_work_ilocked(&death->work);
4313 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4314 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4315 if (thread->looper &
4316 (BINDER_LOOPER_STATE_REGISTERED |
4317 BINDER_LOOPER_STATE_ENTERED))
4318 binder_enqueue_thread_work_ilocked(
4319 thread, &death->work);
4320 else {
4321 binder_enqueue_work_ilocked(
4322 &death->work,
4323 &proc->todo);
4324 binder_wakeup_proc_ilocked(proc);
4325 }
4326 }
4327 binder_inner_proc_unlock(proc);
4328 } break;
4329
4330 default:
4331 pr_err("%d:%d unknown command %u\n",
4332 proc->pid, thread->pid, cmd);
4333 return -EINVAL;
4334 }
4335 *consumed = ptr - buffer;
4336 }
4337 return 0;
4338}
4339
4340static void binder_stat_br(struct binder_proc *proc,
4341 struct binder_thread *thread, uint32_t cmd)
4342{
4343 trace_binder_return(cmd);
4344 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4345 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4346 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4347 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4348 }
4349}
4350
4351static int binder_put_node_cmd(struct binder_proc *proc,
4352 struct binder_thread *thread,
4353 void __user **ptrp,
4354 binder_uintptr_t node_ptr,
4355 binder_uintptr_t node_cookie,
4356 int node_debug_id,
4357 uint32_t cmd, const char *cmd_name)
4358{
4359 void __user *ptr = *ptrp;
4360
4361 if (put_user(cmd, (uint32_t __user *)ptr))
4362 return -EFAULT;
4363 ptr += sizeof(uint32_t);
4364
4365 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4366 return -EFAULT;
4367 ptr += sizeof(binder_uintptr_t);
4368
4369 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4370 return -EFAULT;
4371 ptr += sizeof(binder_uintptr_t);
4372
4373 binder_stat_br(proc, thread, cmd);
4374 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4375 proc->pid, thread->pid, cmd_name, node_debug_id,
4376 (u64)node_ptr, (u64)node_cookie);
4377
4378 *ptrp = ptr;
4379 return 0;
4380}
4381
4382static int binder_wait_for_work(struct binder_thread *thread,
4383 bool do_proc_work)
4384{
4385 DEFINE_WAIT(wait);
4386 struct binder_proc *proc = thread->proc;
4387 int ret = 0;
4388
4389 binder_inner_proc_lock(proc);
4390 for (;;) {
4391 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4392 if (binder_has_work_ilocked(thread, do_proc_work))
4393 break;
4394 if (do_proc_work)
4395 list_add(&thread->waiting_thread_node,
4396 &proc->waiting_threads);
4397 binder_inner_proc_unlock(proc);
4398 schedule();
4399 binder_inner_proc_lock(proc);
4400 list_del_init(&thread->waiting_thread_node);
4401 if (signal_pending(current)) {
4402 ret = -EINTR;
4403 break;
4404 }
4405 }
4406 finish_wait(&thread->wait, &wait);
4407 binder_inner_proc_unlock(proc);
4408
4409 return ret;
4410}
4411
4412/**
4413 * binder_apply_fd_fixups() - finish fd translation
4414 * @proc: binder_proc associated @t->buffer
4415 * @t: binder transaction with list of fd fixups
4416 *
4417 * Now that we are in the context of the transaction target
4418 * process, we can allocate and install fds. Process the
4419 * list of fds to translate and fixup the buffer with the
4420 * new fds first and only then install the files.
4421 *
4422 * If we fail to allocate an fd, skip the install and release
4423 * any fds that have already been allocated.
4424 */
4425static int binder_apply_fd_fixups(struct binder_proc *proc,
4426 struct binder_transaction *t)
4427{
4428 struct binder_txn_fd_fixup *fixup, *tmp;
4429 int ret = 0;
4430
4431 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4432 int fd = get_unused_fd_flags(O_CLOEXEC);
4433
4434 if (fd < 0) {
4435 binder_debug(BINDER_DEBUG_TRANSACTION,
4436 "failed fd fixup txn %d fd %d\n",
4437 t->debug_id, fd);
4438 ret = -ENOMEM;
4439 goto err;
4440 }
4441 binder_debug(BINDER_DEBUG_TRANSACTION,
4442 "fd fixup txn %d fd %d\n",
4443 t->debug_id, fd);
4444 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4445 fixup->target_fd = fd;
4446 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4447 fixup->offset, &fd,
4448 sizeof(u32))) {
4449 ret = -EINVAL;
4450 goto err;
4451 }
4452 }
4453 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4454 fd_install(fixup->target_fd, fixup->file);
4455 list_del(&fixup->fixup_entry);
4456 kfree(fixup);
4457 }
4458
4459 return ret;
4460
4461err:
4462 binder_free_txn_fixups(t);
4463 return ret;
4464}
4465
4466static int binder_thread_read(struct binder_proc *proc,
4467 struct binder_thread *thread,
4468 binder_uintptr_t binder_buffer, size_t size,
4469 binder_size_t *consumed, int non_block)
4470{
4471 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4472 void __user *ptr = buffer + *consumed;
4473 void __user *end = buffer + size;
4474
4475 int ret = 0;
4476 int wait_for_proc_work;
4477
4478 if (*consumed == 0) {
4479 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4480 return -EFAULT;
4481 ptr += sizeof(uint32_t);
4482 }
4483
4484retry:
4485 binder_inner_proc_lock(proc);
4486 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4487 binder_inner_proc_unlock(proc);
4488
4489 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4490
4491 trace_binder_wait_for_work(wait_for_proc_work,
4492 !!thread->transaction_stack,
4493 !binder_worklist_empty(proc, &thread->todo));
4494 if (wait_for_proc_work) {
4495 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4496 BINDER_LOOPER_STATE_ENTERED))) {
4497 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4498 proc->pid, thread->pid, thread->looper);
4499 wait_event_interruptible(binder_user_error_wait,
4500 binder_stop_on_user_error < 2);
4501 }
4502 binder_set_nice(proc->default_priority);
4503 }
4504
4505 if (non_block) {
4506 if (!binder_has_work(thread, wait_for_proc_work))
4507 ret = -EAGAIN;
4508 } else {
4509 ret = binder_wait_for_work(thread, wait_for_proc_work);
4510 }
4511
4512 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4513
4514 if (ret)
4515 return ret;
4516
4517 while (1) {
4518 uint32_t cmd;
4519 struct binder_transaction_data_secctx tr;
4520 struct binder_transaction_data *trd = &tr.transaction_data;
4521 struct binder_work *w = NULL;
4522 struct list_head *list = NULL;
4523 struct binder_transaction *t = NULL;
4524 struct binder_thread *t_from;
4525 size_t trsize = sizeof(*trd);
4526
4527 binder_inner_proc_lock(proc);
4528 if (!binder_worklist_empty_ilocked(&thread->todo))
4529 list = &thread->todo;
4530 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4531 wait_for_proc_work)
4532 list = &proc->todo;
4533 else {
4534 binder_inner_proc_unlock(proc);
4535
4536 /* no data added */
4537 if (ptr - buffer == 4 && !thread->looper_need_return)
4538 goto retry;
4539 break;
4540 }
4541
4542 if (end - ptr < sizeof(tr) + 4) {
4543 binder_inner_proc_unlock(proc);
4544 break;
4545 }
4546 w = binder_dequeue_work_head_ilocked(list);
4547 if (binder_worklist_empty_ilocked(&thread->todo))
4548 thread->process_todo = false;
4549
4550 switch (w->type) {
4551 case BINDER_WORK_TRANSACTION: {
4552 binder_inner_proc_unlock(proc);
4553 t = container_of(w, struct binder_transaction, work);
4554 } break;
4555 case BINDER_WORK_RETURN_ERROR: {
4556 struct binder_error *e = container_of(
4557 w, struct binder_error, work);
4558
4559 WARN_ON(e->cmd == BR_OK);
4560 binder_inner_proc_unlock(proc);
4561 if (put_user(e->cmd, (uint32_t __user *)ptr))
4562 return -EFAULT;
4563 cmd = e->cmd;
4564 e->cmd = BR_OK;
4565 ptr += sizeof(uint32_t);
4566
4567 binder_stat_br(proc, thread, cmd);
4568 } break;
4569 case BINDER_WORK_TRANSACTION_COMPLETE:
4570 case BINDER_WORK_TRANSACTION_PENDING:
4571 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4572 if (proc->oneway_spam_detection_enabled &&
4573 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4574 cmd = BR_ONEWAY_SPAM_SUSPECT;
4575 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4576 cmd = BR_TRANSACTION_PENDING_FROZEN;
4577 else
4578 cmd = BR_TRANSACTION_COMPLETE;
4579 binder_inner_proc_unlock(proc);
4580 kfree(w);
4581 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4582 if (put_user(cmd, (uint32_t __user *)ptr))
4583 return -EFAULT;
4584 ptr += sizeof(uint32_t);
4585
4586 binder_stat_br(proc, thread, cmd);
4587 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4588 "%d:%d BR_TRANSACTION_COMPLETE\n",
4589 proc->pid, thread->pid);
4590 } break;
4591 case BINDER_WORK_NODE: {
4592 struct binder_node *node = container_of(w, struct binder_node, work);
4593 int strong, weak;
4594 binder_uintptr_t node_ptr = node->ptr;
4595 binder_uintptr_t node_cookie = node->cookie;
4596 int node_debug_id = node->debug_id;
4597 int has_weak_ref;
4598 int has_strong_ref;
4599 void __user *orig_ptr = ptr;
4600
4601 BUG_ON(proc != node->proc);
4602 strong = node->internal_strong_refs ||
4603 node->local_strong_refs;
4604 weak = !hlist_empty(&node->refs) ||
4605 node->local_weak_refs ||
4606 node->tmp_refs || strong;
4607 has_strong_ref = node->has_strong_ref;
4608 has_weak_ref = node->has_weak_ref;
4609
4610 if (weak && !has_weak_ref) {
4611 node->has_weak_ref = 1;
4612 node->pending_weak_ref = 1;
4613 node->local_weak_refs++;
4614 }
4615 if (strong && !has_strong_ref) {
4616 node->has_strong_ref = 1;
4617 node->pending_strong_ref = 1;
4618 node->local_strong_refs++;
4619 }
4620 if (!strong && has_strong_ref)
4621 node->has_strong_ref = 0;
4622 if (!weak && has_weak_ref)
4623 node->has_weak_ref = 0;
4624 if (!weak && !strong) {
4625 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4626 "%d:%d node %d u%016llx c%016llx deleted\n",
4627 proc->pid, thread->pid,
4628 node_debug_id,
4629 (u64)node_ptr,
4630 (u64)node_cookie);
4631 rb_erase(&node->rb_node, &proc->nodes);
4632 binder_inner_proc_unlock(proc);
4633 binder_node_lock(node);
4634 /*
4635 * Acquire the node lock before freeing the
4636 * node to serialize with other threads that
4637 * may have been holding the node lock while
4638 * decrementing this node (avoids race where
4639 * this thread frees while the other thread
4640 * is unlocking the node after the final
4641 * decrement)
4642 */
4643 binder_node_unlock(node);
4644 binder_free_node(node);
4645 } else
4646 binder_inner_proc_unlock(proc);
4647
4648 if (weak && !has_weak_ref)
4649 ret = binder_put_node_cmd(
4650 proc, thread, &ptr, node_ptr,
4651 node_cookie, node_debug_id,
4652 BR_INCREFS, "BR_INCREFS");
4653 if (!ret && strong && !has_strong_ref)
4654 ret = binder_put_node_cmd(
4655 proc, thread, &ptr, node_ptr,
4656 node_cookie, node_debug_id,
4657 BR_ACQUIRE, "BR_ACQUIRE");
4658 if (!ret && !strong && has_strong_ref)
4659 ret = binder_put_node_cmd(
4660 proc, thread, &ptr, node_ptr,
4661 node_cookie, node_debug_id,
4662 BR_RELEASE, "BR_RELEASE");
4663 if (!ret && !weak && has_weak_ref)
4664 ret = binder_put_node_cmd(
4665 proc, thread, &ptr, node_ptr,
4666 node_cookie, node_debug_id,
4667 BR_DECREFS, "BR_DECREFS");
4668 if (orig_ptr == ptr)
4669 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4670 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4671 proc->pid, thread->pid,
4672 node_debug_id,
4673 (u64)node_ptr,
4674 (u64)node_cookie);
4675 if (ret)
4676 return ret;
4677 } break;
4678 case BINDER_WORK_DEAD_BINDER:
4679 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4680 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4681 struct binder_ref_death *death;
4682 uint32_t cmd;
4683 binder_uintptr_t cookie;
4684
4685 death = container_of(w, struct binder_ref_death, work);
4686 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4687 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4688 else
4689 cmd = BR_DEAD_BINDER;
4690 cookie = death->cookie;
4691
4692 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4693 "%d:%d %s %016llx\n",
4694 proc->pid, thread->pid,
4695 cmd == BR_DEAD_BINDER ?
4696 "BR_DEAD_BINDER" :
4697 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4698 (u64)cookie);
4699 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4700 binder_inner_proc_unlock(proc);
4701 kfree(death);
4702 binder_stats_deleted(BINDER_STAT_DEATH);
4703 } else {
4704 binder_enqueue_work_ilocked(
4705 w, &proc->delivered_death);
4706 binder_inner_proc_unlock(proc);
4707 }
4708 if (put_user(cmd, (uint32_t __user *)ptr))
4709 return -EFAULT;
4710 ptr += sizeof(uint32_t);
4711 if (put_user(cookie,
4712 (binder_uintptr_t __user *)ptr))
4713 return -EFAULT;
4714 ptr += sizeof(binder_uintptr_t);
4715 binder_stat_br(proc, thread, cmd);
4716 if (cmd == BR_DEAD_BINDER)
4717 goto done; /* DEAD_BINDER notifications can cause transactions */
4718 } break;
4719 default:
4720 binder_inner_proc_unlock(proc);
4721 pr_err("%d:%d: bad work type %d\n",
4722 proc->pid, thread->pid, w->type);
4723 break;
4724 }
4725
4726 if (!t)
4727 continue;
4728
4729 BUG_ON(t->buffer == NULL);
4730 if (t->buffer->target_node) {
4731 struct binder_node *target_node = t->buffer->target_node;
4732
4733 trd->target.ptr = target_node->ptr;
4734 trd->cookie = target_node->cookie;
4735 t->saved_priority = task_nice(current);
4736 if (t->priority < target_node->min_priority &&
4737 !(t->flags & TF_ONE_WAY))
4738 binder_set_nice(t->priority);
4739 else if (!(t->flags & TF_ONE_WAY) ||
4740 t->saved_priority > target_node->min_priority)
4741 binder_set_nice(target_node->min_priority);
4742 cmd = BR_TRANSACTION;
4743 } else {
4744 trd->target.ptr = 0;
4745 trd->cookie = 0;
4746 cmd = BR_REPLY;
4747 }
4748 trd->code = t->code;
4749 trd->flags = t->flags;
4750 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4751
4752 t_from = binder_get_txn_from(t);
4753 if (t_from) {
4754 struct task_struct *sender = t_from->proc->tsk;
4755
4756 trd->sender_pid =
4757 task_tgid_nr_ns(sender,
4758 task_active_pid_ns(current));
4759 } else {
4760 trd->sender_pid = 0;
4761 }
4762
4763 ret = binder_apply_fd_fixups(proc, t);
4764 if (ret) {
4765 struct binder_buffer *buffer = t->buffer;
4766 bool oneway = !!(t->flags & TF_ONE_WAY);
4767 int tid = t->debug_id;
4768
4769 if (t_from)
4770 binder_thread_dec_tmpref(t_from);
4771 buffer->transaction = NULL;
4772 binder_cleanup_transaction(t, "fd fixups failed",
4773 BR_FAILED_REPLY);
4774 binder_free_buf(proc, thread, buffer, true);
4775 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4776 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4777 proc->pid, thread->pid,
4778 oneway ? "async " :
4779 (cmd == BR_REPLY ? "reply " : ""),
4780 tid, BR_FAILED_REPLY, ret, __LINE__);
4781 if (cmd == BR_REPLY) {
4782 cmd = BR_FAILED_REPLY;
4783 if (put_user(cmd, (uint32_t __user *)ptr))
4784 return -EFAULT;
4785 ptr += sizeof(uint32_t);
4786 binder_stat_br(proc, thread, cmd);
4787 break;
4788 }
4789 continue;
4790 }
4791 trd->data_size = t->buffer->data_size;
4792 trd->offsets_size = t->buffer->offsets_size;
4793 trd->data.ptr.buffer = t->buffer->user_data;
4794 trd->data.ptr.offsets = trd->data.ptr.buffer +
4795 ALIGN(t->buffer->data_size,
4796 sizeof(void *));
4797
4798 tr.secctx = t->security_ctx;
4799 if (t->security_ctx) {
4800 cmd = BR_TRANSACTION_SEC_CTX;
4801 trsize = sizeof(tr);
4802 }
4803 if (put_user(cmd, (uint32_t __user *)ptr)) {
4804 if (t_from)
4805 binder_thread_dec_tmpref(t_from);
4806
4807 binder_cleanup_transaction(t, "put_user failed",
4808 BR_FAILED_REPLY);
4809
4810 return -EFAULT;
4811 }
4812 ptr += sizeof(uint32_t);
4813 if (copy_to_user(ptr, &tr, trsize)) {
4814 if (t_from)
4815 binder_thread_dec_tmpref(t_from);
4816
4817 binder_cleanup_transaction(t, "copy_to_user failed",
4818 BR_FAILED_REPLY);
4819
4820 return -EFAULT;
4821 }
4822 ptr += trsize;
4823
4824 trace_binder_transaction_received(t);
4825 binder_stat_br(proc, thread, cmd);
4826 binder_debug(BINDER_DEBUG_TRANSACTION,
4827 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4828 proc->pid, thread->pid,
4829 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4830 (cmd == BR_TRANSACTION_SEC_CTX) ?
4831 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4832 t->debug_id, t_from ? t_from->proc->pid : 0,
4833 t_from ? t_from->pid : 0, cmd,
4834 t->buffer->data_size, t->buffer->offsets_size,
4835 (u64)trd->data.ptr.buffer,
4836 (u64)trd->data.ptr.offsets);
4837
4838 if (t_from)
4839 binder_thread_dec_tmpref(t_from);
4840 t->buffer->allow_user_free = 1;
4841 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4842 binder_inner_proc_lock(thread->proc);
4843 t->to_parent = thread->transaction_stack;
4844 t->to_thread = thread;
4845 thread->transaction_stack = t;
4846 binder_inner_proc_unlock(thread->proc);
4847 } else {
4848 binder_free_transaction(t);
4849 }
4850 break;
4851 }
4852
4853done:
4854
4855 *consumed = ptr - buffer;
4856 binder_inner_proc_lock(proc);
4857 if (proc->requested_threads == 0 &&
4858 list_empty(&thread->proc->waiting_threads) &&
4859 proc->requested_threads_started < proc->max_threads &&
4860 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4861 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4862 /*spawn a new thread if we leave this out */) {
4863 proc->requested_threads++;
4864 binder_inner_proc_unlock(proc);
4865 binder_debug(BINDER_DEBUG_THREADS,
4866 "%d:%d BR_SPAWN_LOOPER\n",
4867 proc->pid, thread->pid);
4868 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4869 return -EFAULT;
4870 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4871 } else
4872 binder_inner_proc_unlock(proc);
4873 return 0;
4874}
4875
4876static void binder_release_work(struct binder_proc *proc,
4877 struct list_head *list)
4878{
4879 struct binder_work *w;
4880 enum binder_work_type wtype;
4881
4882 while (1) {
4883 binder_inner_proc_lock(proc);
4884 w = binder_dequeue_work_head_ilocked(list);
4885 wtype = w ? w->type : 0;
4886 binder_inner_proc_unlock(proc);
4887 if (!w)
4888 return;
4889
4890 switch (wtype) {
4891 case BINDER_WORK_TRANSACTION: {
4892 struct binder_transaction *t;
4893
4894 t = container_of(w, struct binder_transaction, work);
4895
4896 binder_cleanup_transaction(t, "process died.",
4897 BR_DEAD_REPLY);
4898 } break;
4899 case BINDER_WORK_RETURN_ERROR: {
4900 struct binder_error *e = container_of(
4901 w, struct binder_error, work);
4902
4903 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4904 "undelivered TRANSACTION_ERROR: %u\n",
4905 e->cmd);
4906 } break;
4907 case BINDER_WORK_TRANSACTION_PENDING:
4908 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4909 case BINDER_WORK_TRANSACTION_COMPLETE: {
4910 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4911 "undelivered TRANSACTION_COMPLETE\n");
4912 kfree(w);
4913 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4914 } break;
4915 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4916 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4917 struct binder_ref_death *death;
4918
4919 death = container_of(w, struct binder_ref_death, work);
4920 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4921 "undelivered death notification, %016llx\n",
4922 (u64)death->cookie);
4923 kfree(death);
4924 binder_stats_deleted(BINDER_STAT_DEATH);
4925 } break;
4926 case BINDER_WORK_NODE:
4927 break;
4928 default:
4929 pr_err("unexpected work type, %d, not freed\n",
4930 wtype);
4931 break;
4932 }
4933 }
4934
4935}
4936
4937static struct binder_thread *binder_get_thread_ilocked(
4938 struct binder_proc *proc, struct binder_thread *new_thread)
4939{
4940 struct binder_thread *thread = NULL;
4941 struct rb_node *parent = NULL;
4942 struct rb_node **p = &proc->threads.rb_node;
4943
4944 while (*p) {
4945 parent = *p;
4946 thread = rb_entry(parent, struct binder_thread, rb_node);
4947
4948 if (current->pid < thread->pid)
4949 p = &(*p)->rb_left;
4950 else if (current->pid > thread->pid)
4951 p = &(*p)->rb_right;
4952 else
4953 return thread;
4954 }
4955 if (!new_thread)
4956 return NULL;
4957 thread = new_thread;
4958 binder_stats_created(BINDER_STAT_THREAD);
4959 thread->proc = proc;
4960 thread->pid = current->pid;
4961 atomic_set(&thread->tmp_ref, 0);
4962 init_waitqueue_head(&thread->wait);
4963 INIT_LIST_HEAD(&thread->todo);
4964 rb_link_node(&thread->rb_node, parent, p);
4965 rb_insert_color(&thread->rb_node, &proc->threads);
4966 thread->looper_need_return = true;
4967 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4968 thread->return_error.cmd = BR_OK;
4969 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4970 thread->reply_error.cmd = BR_OK;
4971 thread->ee.command = BR_OK;
4972 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4973 return thread;
4974}
4975
4976static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4977{
4978 struct binder_thread *thread;
4979 struct binder_thread *new_thread;
4980
4981 binder_inner_proc_lock(proc);
4982 thread = binder_get_thread_ilocked(proc, NULL);
4983 binder_inner_proc_unlock(proc);
4984 if (!thread) {
4985 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4986 if (new_thread == NULL)
4987 return NULL;
4988 binder_inner_proc_lock(proc);
4989 thread = binder_get_thread_ilocked(proc, new_thread);
4990 binder_inner_proc_unlock(proc);
4991 if (thread != new_thread)
4992 kfree(new_thread);
4993 }
4994 return thread;
4995}
4996
4997static void binder_free_proc(struct binder_proc *proc)
4998{
4999 struct binder_device *device;
5000
5001 BUG_ON(!list_empty(&proc->todo));
5002 BUG_ON(!list_empty(&proc->delivered_death));
5003 if (proc->outstanding_txns)
5004 pr_warn("%s: Unexpected outstanding_txns %d\n",
5005 __func__, proc->outstanding_txns);
5006 device = container_of(proc->context, struct binder_device, context);
5007 if (refcount_dec_and_test(&device->ref)) {
5008 kfree(proc->context->name);
5009 kfree(device);
5010 }
5011 binder_alloc_deferred_release(&proc->alloc);
5012 put_task_struct(proc->tsk);
5013 put_cred(proc->cred);
5014 binder_stats_deleted(BINDER_STAT_PROC);
5015 dbitmap_free(&proc->dmap);
5016 kfree(proc);
5017}
5018
5019static void binder_free_thread(struct binder_thread *thread)
5020{
5021 BUG_ON(!list_empty(&thread->todo));
5022 binder_stats_deleted(BINDER_STAT_THREAD);
5023 binder_proc_dec_tmpref(thread->proc);
5024 kfree(thread);
5025}
5026
5027static int binder_thread_release(struct binder_proc *proc,
5028 struct binder_thread *thread)
5029{
5030 struct binder_transaction *t;
5031 struct binder_transaction *send_reply = NULL;
5032 int active_transactions = 0;
5033 struct binder_transaction *last_t = NULL;
5034
5035 binder_inner_proc_lock(thread->proc);
5036 /*
5037 * take a ref on the proc so it survives
5038 * after we remove this thread from proc->threads.
5039 * The corresponding dec is when we actually
5040 * free the thread in binder_free_thread()
5041 */
5042 proc->tmp_ref++;
5043 /*
5044 * take a ref on this thread to ensure it
5045 * survives while we are releasing it
5046 */
5047 atomic_inc(&thread->tmp_ref);
5048 rb_erase(&thread->rb_node, &proc->threads);
5049 t = thread->transaction_stack;
5050 if (t) {
5051 spin_lock(&t->lock);
5052 if (t->to_thread == thread)
5053 send_reply = t;
5054 } else {
5055 __acquire(&t->lock);
5056 }
5057 thread->is_dead = true;
5058
5059 while (t) {
5060 last_t = t;
5061 active_transactions++;
5062 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5063 "release %d:%d transaction %d %s, still active\n",
5064 proc->pid, thread->pid,
5065 t->debug_id,
5066 (t->to_thread == thread) ? "in" : "out");
5067
5068 if (t->to_thread == thread) {
5069 thread->proc->outstanding_txns--;
5070 t->to_proc = NULL;
5071 t->to_thread = NULL;
5072 if (t->buffer) {
5073 t->buffer->transaction = NULL;
5074 t->buffer = NULL;
5075 }
5076 t = t->to_parent;
5077 } else if (t->from == thread) {
5078 t->from = NULL;
5079 t = t->from_parent;
5080 } else
5081 BUG();
5082 spin_unlock(&last_t->lock);
5083 if (t)
5084 spin_lock(&t->lock);
5085 else
5086 __acquire(&t->lock);
5087 }
5088 /* annotation for sparse, lock not acquired in last iteration above */
5089 __release(&t->lock);
5090
5091 /*
5092 * If this thread used poll, make sure we remove the waitqueue from any
5093 * poll data structures holding it.
5094 */
5095 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5096 wake_up_pollfree(&thread->wait);
5097
5098 binder_inner_proc_unlock(thread->proc);
5099
5100 /*
5101 * This is needed to avoid races between wake_up_pollfree() above and
5102 * someone else removing the last entry from the queue for other reasons
5103 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5104 * descriptor being closed). Such other users hold an RCU read lock, so
5105 * we can be sure they're done after we call synchronize_rcu().
5106 */
5107 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5108 synchronize_rcu();
5109
5110 if (send_reply)
5111 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5112 binder_release_work(proc, &thread->todo);
5113 binder_thread_dec_tmpref(thread);
5114 return active_transactions;
5115}
5116
5117static __poll_t binder_poll(struct file *filp,
5118 struct poll_table_struct *wait)
5119{
5120 struct binder_proc *proc = filp->private_data;
5121 struct binder_thread *thread = NULL;
5122 bool wait_for_proc_work;
5123
5124 thread = binder_get_thread(proc);
5125 if (!thread)
5126 return EPOLLERR;
5127
5128 binder_inner_proc_lock(thread->proc);
5129 thread->looper |= BINDER_LOOPER_STATE_POLL;
5130 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5131
5132 binder_inner_proc_unlock(thread->proc);
5133
5134 poll_wait(filp, &thread->wait, wait);
5135
5136 if (binder_has_work(thread, wait_for_proc_work))
5137 return EPOLLIN;
5138
5139 return 0;
5140}
5141
5142static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5143 struct binder_thread *thread)
5144{
5145 int ret = 0;
5146 struct binder_proc *proc = filp->private_data;
5147 void __user *ubuf = (void __user *)arg;
5148 struct binder_write_read bwr;
5149
5150 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5151 ret = -EFAULT;
5152 goto out;
5153 }
5154 binder_debug(BINDER_DEBUG_READ_WRITE,
5155 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5156 proc->pid, thread->pid,
5157 (u64)bwr.write_size, (u64)bwr.write_buffer,
5158 (u64)bwr.read_size, (u64)bwr.read_buffer);
5159
5160 if (bwr.write_size > 0) {
5161 ret = binder_thread_write(proc, thread,
5162 bwr.write_buffer,
5163 bwr.write_size,
5164 &bwr.write_consumed);
5165 trace_binder_write_done(ret);
5166 if (ret < 0) {
5167 bwr.read_consumed = 0;
5168 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5169 ret = -EFAULT;
5170 goto out;
5171 }
5172 }
5173 if (bwr.read_size > 0) {
5174 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5175 bwr.read_size,
5176 &bwr.read_consumed,
5177 filp->f_flags & O_NONBLOCK);
5178 trace_binder_read_done(ret);
5179 binder_inner_proc_lock(proc);
5180 if (!binder_worklist_empty_ilocked(&proc->todo))
5181 binder_wakeup_proc_ilocked(proc);
5182 binder_inner_proc_unlock(proc);
5183 if (ret < 0) {
5184 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5185 ret = -EFAULT;
5186 goto out;
5187 }
5188 }
5189 binder_debug(BINDER_DEBUG_READ_WRITE,
5190 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5191 proc->pid, thread->pid,
5192 (u64)bwr.write_consumed, (u64)bwr.write_size,
5193 (u64)bwr.read_consumed, (u64)bwr.read_size);
5194 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5195 ret = -EFAULT;
5196 goto out;
5197 }
5198out:
5199 return ret;
5200}
5201
5202static int binder_ioctl_set_ctx_mgr(struct file *filp,
5203 struct flat_binder_object *fbo)
5204{
5205 int ret = 0;
5206 struct binder_proc *proc = filp->private_data;
5207 struct binder_context *context = proc->context;
5208 struct binder_node *new_node;
5209 kuid_t curr_euid = current_euid();
5210
5211 mutex_lock(&context->context_mgr_node_lock);
5212 if (context->binder_context_mgr_node) {
5213 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5214 ret = -EBUSY;
5215 goto out;
5216 }
5217 ret = security_binder_set_context_mgr(proc->cred);
5218 if (ret < 0)
5219 goto out;
5220 if (uid_valid(context->binder_context_mgr_uid)) {
5221 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5222 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5223 from_kuid(&init_user_ns, curr_euid),
5224 from_kuid(&init_user_ns,
5225 context->binder_context_mgr_uid));
5226 ret = -EPERM;
5227 goto out;
5228 }
5229 } else {
5230 context->binder_context_mgr_uid = curr_euid;
5231 }
5232 new_node = binder_new_node(proc, fbo);
5233 if (!new_node) {
5234 ret = -ENOMEM;
5235 goto out;
5236 }
5237 binder_node_lock(new_node);
5238 new_node->local_weak_refs++;
5239 new_node->local_strong_refs++;
5240 new_node->has_strong_ref = 1;
5241 new_node->has_weak_ref = 1;
5242 context->binder_context_mgr_node = new_node;
5243 binder_node_unlock(new_node);
5244 binder_put_node(new_node);
5245out:
5246 mutex_unlock(&context->context_mgr_node_lock);
5247 return ret;
5248}
5249
5250static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5251 struct binder_node_info_for_ref *info)
5252{
5253 struct binder_node *node;
5254 struct binder_context *context = proc->context;
5255 __u32 handle = info->handle;
5256
5257 if (info->strong_count || info->weak_count || info->reserved1 ||
5258 info->reserved2 || info->reserved3) {
5259 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5260 proc->pid);
5261 return -EINVAL;
5262 }
5263
5264 /* This ioctl may only be used by the context manager */
5265 mutex_lock(&context->context_mgr_node_lock);
5266 if (!context->binder_context_mgr_node ||
5267 context->binder_context_mgr_node->proc != proc) {
5268 mutex_unlock(&context->context_mgr_node_lock);
5269 return -EPERM;
5270 }
5271 mutex_unlock(&context->context_mgr_node_lock);
5272
5273 node = binder_get_node_from_ref(proc, handle, true, NULL);
5274 if (!node)
5275 return -EINVAL;
5276
5277 info->strong_count = node->local_strong_refs +
5278 node->internal_strong_refs;
5279 info->weak_count = node->local_weak_refs;
5280
5281 binder_put_node(node);
5282
5283 return 0;
5284}
5285
5286static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5287 struct binder_node_debug_info *info)
5288{
5289 struct rb_node *n;
5290 binder_uintptr_t ptr = info->ptr;
5291
5292 memset(info, 0, sizeof(*info));
5293
5294 binder_inner_proc_lock(proc);
5295 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5296 struct binder_node *node = rb_entry(n, struct binder_node,
5297 rb_node);
5298 if (node->ptr > ptr) {
5299 info->ptr = node->ptr;
5300 info->cookie = node->cookie;
5301 info->has_strong_ref = node->has_strong_ref;
5302 info->has_weak_ref = node->has_weak_ref;
5303 break;
5304 }
5305 }
5306 binder_inner_proc_unlock(proc);
5307
5308 return 0;
5309}
5310
5311static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5312{
5313 struct rb_node *n;
5314 struct binder_thread *thread;
5315
5316 if (proc->outstanding_txns > 0)
5317 return true;
5318
5319 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5320 thread = rb_entry(n, struct binder_thread, rb_node);
5321 if (thread->transaction_stack)
5322 return true;
5323 }
5324 return false;
5325}
5326
5327static int binder_ioctl_freeze(struct binder_freeze_info *info,
5328 struct binder_proc *target_proc)
5329{
5330 int ret = 0;
5331
5332 if (!info->enable) {
5333 binder_inner_proc_lock(target_proc);
5334 target_proc->sync_recv = false;
5335 target_proc->async_recv = false;
5336 target_proc->is_frozen = false;
5337 binder_inner_proc_unlock(target_proc);
5338 return 0;
5339 }
5340
5341 /*
5342 * Freezing the target. Prevent new transactions by
5343 * setting frozen state. If timeout specified, wait
5344 * for transactions to drain.
5345 */
5346 binder_inner_proc_lock(target_proc);
5347 target_proc->sync_recv = false;
5348 target_proc->async_recv = false;
5349 target_proc->is_frozen = true;
5350 binder_inner_proc_unlock(target_proc);
5351
5352 if (info->timeout_ms > 0)
5353 ret = wait_event_interruptible_timeout(
5354 target_proc->freeze_wait,
5355 (!target_proc->outstanding_txns),
5356 msecs_to_jiffies(info->timeout_ms));
5357
5358 /* Check pending transactions that wait for reply */
5359 if (ret >= 0) {
5360 binder_inner_proc_lock(target_proc);
5361 if (binder_txns_pending_ilocked(target_proc))
5362 ret = -EAGAIN;
5363 binder_inner_proc_unlock(target_proc);
5364 }
5365
5366 if (ret < 0) {
5367 binder_inner_proc_lock(target_proc);
5368 target_proc->is_frozen = false;
5369 binder_inner_proc_unlock(target_proc);
5370 }
5371
5372 return ret;
5373}
5374
5375static int binder_ioctl_get_freezer_info(
5376 struct binder_frozen_status_info *info)
5377{
5378 struct binder_proc *target_proc;
5379 bool found = false;
5380 __u32 txns_pending;
5381
5382 info->sync_recv = 0;
5383 info->async_recv = 0;
5384
5385 mutex_lock(&binder_procs_lock);
5386 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5387 if (target_proc->pid == info->pid) {
5388 found = true;
5389 binder_inner_proc_lock(target_proc);
5390 txns_pending = binder_txns_pending_ilocked(target_proc);
5391 info->sync_recv |= target_proc->sync_recv |
5392 (txns_pending << 1);
5393 info->async_recv |= target_proc->async_recv;
5394 binder_inner_proc_unlock(target_proc);
5395 }
5396 }
5397 mutex_unlock(&binder_procs_lock);
5398
5399 if (!found)
5400 return -EINVAL;
5401
5402 return 0;
5403}
5404
5405static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5406 void __user *ubuf)
5407{
5408 struct binder_extended_error ee;
5409
5410 binder_inner_proc_lock(thread->proc);
5411 ee = thread->ee;
5412 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5413 binder_inner_proc_unlock(thread->proc);
5414
5415 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5416 return -EFAULT;
5417
5418 return 0;
5419}
5420
5421static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5422{
5423 int ret;
5424 struct binder_proc *proc = filp->private_data;
5425 struct binder_thread *thread;
5426 void __user *ubuf = (void __user *)arg;
5427
5428 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5429 proc->pid, current->pid, cmd, arg);*/
5430
5431 binder_selftest_alloc(&proc->alloc);
5432
5433 trace_binder_ioctl(cmd, arg);
5434
5435 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5436 if (ret)
5437 goto err_unlocked;
5438
5439 thread = binder_get_thread(proc);
5440 if (thread == NULL) {
5441 ret = -ENOMEM;
5442 goto err;
5443 }
5444
5445 switch (cmd) {
5446 case BINDER_WRITE_READ:
5447 ret = binder_ioctl_write_read(filp, arg, thread);
5448 if (ret)
5449 goto err;
5450 break;
5451 case BINDER_SET_MAX_THREADS: {
5452 u32 max_threads;
5453
5454 if (copy_from_user(&max_threads, ubuf,
5455 sizeof(max_threads))) {
5456 ret = -EINVAL;
5457 goto err;
5458 }
5459 binder_inner_proc_lock(proc);
5460 proc->max_threads = max_threads;
5461 binder_inner_proc_unlock(proc);
5462 break;
5463 }
5464 case BINDER_SET_CONTEXT_MGR_EXT: {
5465 struct flat_binder_object fbo;
5466
5467 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5468 ret = -EINVAL;
5469 goto err;
5470 }
5471 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5472 if (ret)
5473 goto err;
5474 break;
5475 }
5476 case BINDER_SET_CONTEXT_MGR:
5477 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5478 if (ret)
5479 goto err;
5480 break;
5481 case BINDER_THREAD_EXIT:
5482 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5483 proc->pid, thread->pid);
5484 binder_thread_release(proc, thread);
5485 thread = NULL;
5486 break;
5487 case BINDER_VERSION: {
5488 struct binder_version __user *ver = ubuf;
5489
5490 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5491 &ver->protocol_version)) {
5492 ret = -EINVAL;
5493 goto err;
5494 }
5495 break;
5496 }
5497 case BINDER_GET_NODE_INFO_FOR_REF: {
5498 struct binder_node_info_for_ref info;
5499
5500 if (copy_from_user(&info, ubuf, sizeof(info))) {
5501 ret = -EFAULT;
5502 goto err;
5503 }
5504
5505 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5506 if (ret < 0)
5507 goto err;
5508
5509 if (copy_to_user(ubuf, &info, sizeof(info))) {
5510 ret = -EFAULT;
5511 goto err;
5512 }
5513
5514 break;
5515 }
5516 case BINDER_GET_NODE_DEBUG_INFO: {
5517 struct binder_node_debug_info info;
5518
5519 if (copy_from_user(&info, ubuf, sizeof(info))) {
5520 ret = -EFAULT;
5521 goto err;
5522 }
5523
5524 ret = binder_ioctl_get_node_debug_info(proc, &info);
5525 if (ret < 0)
5526 goto err;
5527
5528 if (copy_to_user(ubuf, &info, sizeof(info))) {
5529 ret = -EFAULT;
5530 goto err;
5531 }
5532 break;
5533 }
5534 case BINDER_FREEZE: {
5535 struct binder_freeze_info info;
5536 struct binder_proc **target_procs = NULL, *target_proc;
5537 int target_procs_count = 0, i = 0;
5538
5539 ret = 0;
5540
5541 if (copy_from_user(&info, ubuf, sizeof(info))) {
5542 ret = -EFAULT;
5543 goto err;
5544 }
5545
5546 mutex_lock(&binder_procs_lock);
5547 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5548 if (target_proc->pid == info.pid)
5549 target_procs_count++;
5550 }
5551
5552 if (target_procs_count == 0) {
5553 mutex_unlock(&binder_procs_lock);
5554 ret = -EINVAL;
5555 goto err;
5556 }
5557
5558 target_procs = kcalloc(target_procs_count,
5559 sizeof(struct binder_proc *),
5560 GFP_KERNEL);
5561
5562 if (!target_procs) {
5563 mutex_unlock(&binder_procs_lock);
5564 ret = -ENOMEM;
5565 goto err;
5566 }
5567
5568 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5569 if (target_proc->pid != info.pid)
5570 continue;
5571
5572 binder_inner_proc_lock(target_proc);
5573 target_proc->tmp_ref++;
5574 binder_inner_proc_unlock(target_proc);
5575
5576 target_procs[i++] = target_proc;
5577 }
5578 mutex_unlock(&binder_procs_lock);
5579
5580 for (i = 0; i < target_procs_count; i++) {
5581 if (ret >= 0)
5582 ret = binder_ioctl_freeze(&info,
5583 target_procs[i]);
5584
5585 binder_proc_dec_tmpref(target_procs[i]);
5586 }
5587
5588 kfree(target_procs);
5589
5590 if (ret < 0)
5591 goto err;
5592 break;
5593 }
5594 case BINDER_GET_FROZEN_INFO: {
5595 struct binder_frozen_status_info info;
5596
5597 if (copy_from_user(&info, ubuf, sizeof(info))) {
5598 ret = -EFAULT;
5599 goto err;
5600 }
5601
5602 ret = binder_ioctl_get_freezer_info(&info);
5603 if (ret < 0)
5604 goto err;
5605
5606 if (copy_to_user(ubuf, &info, sizeof(info))) {
5607 ret = -EFAULT;
5608 goto err;
5609 }
5610 break;
5611 }
5612 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5613 uint32_t enable;
5614
5615 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5616 ret = -EFAULT;
5617 goto err;
5618 }
5619 binder_inner_proc_lock(proc);
5620 proc->oneway_spam_detection_enabled = (bool)enable;
5621 binder_inner_proc_unlock(proc);
5622 break;
5623 }
5624 case BINDER_GET_EXTENDED_ERROR:
5625 ret = binder_ioctl_get_extended_error(thread, ubuf);
5626 if (ret < 0)
5627 goto err;
5628 break;
5629 default:
5630 ret = -EINVAL;
5631 goto err;
5632 }
5633 ret = 0;
5634err:
5635 if (thread)
5636 thread->looper_need_return = false;
5637 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5638 if (ret && ret != -EINTR)
5639 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5640err_unlocked:
5641 trace_binder_ioctl_done(ret);
5642 return ret;
5643}
5644
5645static void binder_vma_open(struct vm_area_struct *vma)
5646{
5647 struct binder_proc *proc = vma->vm_private_data;
5648
5649 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5650 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5651 proc->pid, vma->vm_start, vma->vm_end,
5652 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5653 (unsigned long)pgprot_val(vma->vm_page_prot));
5654}
5655
5656static void binder_vma_close(struct vm_area_struct *vma)
5657{
5658 struct binder_proc *proc = vma->vm_private_data;
5659
5660 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5661 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5662 proc->pid, vma->vm_start, vma->vm_end,
5663 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5664 (unsigned long)pgprot_val(vma->vm_page_prot));
5665 binder_alloc_vma_close(&proc->alloc);
5666}
5667
5668static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5669{
5670 return VM_FAULT_SIGBUS;
5671}
5672
5673static const struct vm_operations_struct binder_vm_ops = {
5674 .open = binder_vma_open,
5675 .close = binder_vma_close,
5676 .fault = binder_vm_fault,
5677};
5678
5679static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5680{
5681 struct binder_proc *proc = filp->private_data;
5682
5683 if (proc->tsk != current->group_leader)
5684 return -EINVAL;
5685
5686 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5687 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5688 __func__, proc->pid, vma->vm_start, vma->vm_end,
5689 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5690 (unsigned long)pgprot_val(vma->vm_page_prot));
5691
5692 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5693 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5694 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5695 return -EPERM;
5696 }
5697 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5698
5699 vma->vm_ops = &binder_vm_ops;
5700 vma->vm_private_data = proc;
5701
5702 return binder_alloc_mmap_handler(&proc->alloc, vma);
5703}
5704
5705static int binder_open(struct inode *nodp, struct file *filp)
5706{
5707 struct binder_proc *proc, *itr;
5708 struct binder_device *binder_dev;
5709 struct binderfs_info *info;
5710 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5711 bool existing_pid = false;
5712
5713 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5714 current->group_leader->pid, current->pid);
5715
5716 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5717 if (proc == NULL)
5718 return -ENOMEM;
5719
5720 dbitmap_init(&proc->dmap);
5721 spin_lock_init(&proc->inner_lock);
5722 spin_lock_init(&proc->outer_lock);
5723 get_task_struct(current->group_leader);
5724 proc->tsk = current->group_leader;
5725 proc->cred = get_cred(filp->f_cred);
5726 INIT_LIST_HEAD(&proc->todo);
5727 init_waitqueue_head(&proc->freeze_wait);
5728 proc->default_priority = task_nice(current);
5729 /* binderfs stashes devices in i_private */
5730 if (is_binderfs_device(nodp)) {
5731 binder_dev = nodp->i_private;
5732 info = nodp->i_sb->s_fs_info;
5733 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5734 } else {
5735 binder_dev = container_of(filp->private_data,
5736 struct binder_device, miscdev);
5737 }
5738 refcount_inc(&binder_dev->ref);
5739 proc->context = &binder_dev->context;
5740 binder_alloc_init(&proc->alloc);
5741
5742 binder_stats_created(BINDER_STAT_PROC);
5743 proc->pid = current->group_leader->pid;
5744 INIT_LIST_HEAD(&proc->delivered_death);
5745 INIT_LIST_HEAD(&proc->waiting_threads);
5746 filp->private_data = proc;
5747
5748 mutex_lock(&binder_procs_lock);
5749 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5750 if (itr->pid == proc->pid) {
5751 existing_pid = true;
5752 break;
5753 }
5754 }
5755 hlist_add_head(&proc->proc_node, &binder_procs);
5756 mutex_unlock(&binder_procs_lock);
5757
5758 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5759 char strbuf[11];
5760
5761 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5762 /*
5763 * proc debug entries are shared between contexts.
5764 * Only create for the first PID to avoid debugfs log spamming
5765 * The printing code will anyway print all contexts for a given
5766 * PID so this is not a problem.
5767 */
5768 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5769 binder_debugfs_dir_entry_proc,
5770 (void *)(unsigned long)proc->pid,
5771 &proc_fops);
5772 }
5773
5774 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5775 char strbuf[11];
5776 struct dentry *binderfs_entry;
5777
5778 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5779 /*
5780 * Similar to debugfs, the process specific log file is shared
5781 * between contexts. Only create for the first PID.
5782 * This is ok since same as debugfs, the log file will contain
5783 * information on all contexts of a given PID.
5784 */
5785 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5786 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5787 if (!IS_ERR(binderfs_entry)) {
5788 proc->binderfs_entry = binderfs_entry;
5789 } else {
5790 int error;
5791
5792 error = PTR_ERR(binderfs_entry);
5793 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5794 strbuf, error);
5795 }
5796 }
5797
5798 return 0;
5799}
5800
5801static int binder_flush(struct file *filp, fl_owner_t id)
5802{
5803 struct binder_proc *proc = filp->private_data;
5804
5805 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5806
5807 return 0;
5808}
5809
5810static void binder_deferred_flush(struct binder_proc *proc)
5811{
5812 struct rb_node *n;
5813 int wake_count = 0;
5814
5815 binder_inner_proc_lock(proc);
5816 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5817 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5818
5819 thread->looper_need_return = true;
5820 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5821 wake_up_interruptible(&thread->wait);
5822 wake_count++;
5823 }
5824 }
5825 binder_inner_proc_unlock(proc);
5826
5827 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5828 "binder_flush: %d woke %d threads\n", proc->pid,
5829 wake_count);
5830}
5831
5832static int binder_release(struct inode *nodp, struct file *filp)
5833{
5834 struct binder_proc *proc = filp->private_data;
5835
5836 debugfs_remove(proc->debugfs_entry);
5837
5838 if (proc->binderfs_entry) {
5839 binderfs_remove_file(proc->binderfs_entry);
5840 proc->binderfs_entry = NULL;
5841 }
5842
5843 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5844
5845 return 0;
5846}
5847
5848static int binder_node_release(struct binder_node *node, int refs)
5849{
5850 struct binder_ref *ref;
5851 int death = 0;
5852 struct binder_proc *proc = node->proc;
5853
5854 binder_release_work(proc, &node->async_todo);
5855
5856 binder_node_lock(node);
5857 binder_inner_proc_lock(proc);
5858 binder_dequeue_work_ilocked(&node->work);
5859 /*
5860 * The caller must have taken a temporary ref on the node,
5861 */
5862 BUG_ON(!node->tmp_refs);
5863 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5864 binder_inner_proc_unlock(proc);
5865 binder_node_unlock(node);
5866 binder_free_node(node);
5867
5868 return refs;
5869 }
5870
5871 node->proc = NULL;
5872 node->local_strong_refs = 0;
5873 node->local_weak_refs = 0;
5874 binder_inner_proc_unlock(proc);
5875
5876 spin_lock(&binder_dead_nodes_lock);
5877 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5878 spin_unlock(&binder_dead_nodes_lock);
5879
5880 hlist_for_each_entry(ref, &node->refs, node_entry) {
5881 refs++;
5882 /*
5883 * Need the node lock to synchronize
5884 * with new notification requests and the
5885 * inner lock to synchronize with queued
5886 * death notifications.
5887 */
5888 binder_inner_proc_lock(ref->proc);
5889 if (!ref->death) {
5890 binder_inner_proc_unlock(ref->proc);
5891 continue;
5892 }
5893
5894 death++;
5895
5896 BUG_ON(!list_empty(&ref->death->work.entry));
5897 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5898 binder_enqueue_work_ilocked(&ref->death->work,
5899 &ref->proc->todo);
5900 binder_wakeup_proc_ilocked(ref->proc);
5901 binder_inner_proc_unlock(ref->proc);
5902 }
5903
5904 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5905 "node %d now dead, refs %d, death %d\n",
5906 node->debug_id, refs, death);
5907 binder_node_unlock(node);
5908 binder_put_node(node);
5909
5910 return refs;
5911}
5912
5913static void binder_deferred_release(struct binder_proc *proc)
5914{
5915 struct binder_context *context = proc->context;
5916 struct rb_node *n;
5917 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5918
5919 mutex_lock(&binder_procs_lock);
5920 hlist_del(&proc->proc_node);
5921 mutex_unlock(&binder_procs_lock);
5922
5923 mutex_lock(&context->context_mgr_node_lock);
5924 if (context->binder_context_mgr_node &&
5925 context->binder_context_mgr_node->proc == proc) {
5926 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5927 "%s: %d context_mgr_node gone\n",
5928 __func__, proc->pid);
5929 context->binder_context_mgr_node = NULL;
5930 }
5931 mutex_unlock(&context->context_mgr_node_lock);
5932 binder_inner_proc_lock(proc);
5933 /*
5934 * Make sure proc stays alive after we
5935 * remove all the threads
5936 */
5937 proc->tmp_ref++;
5938
5939 proc->is_dead = true;
5940 proc->is_frozen = false;
5941 proc->sync_recv = false;
5942 proc->async_recv = false;
5943 threads = 0;
5944 active_transactions = 0;
5945 while ((n = rb_first(&proc->threads))) {
5946 struct binder_thread *thread;
5947
5948 thread = rb_entry(n, struct binder_thread, rb_node);
5949 binder_inner_proc_unlock(proc);
5950 threads++;
5951 active_transactions += binder_thread_release(proc, thread);
5952 binder_inner_proc_lock(proc);
5953 }
5954
5955 nodes = 0;
5956 incoming_refs = 0;
5957 while ((n = rb_first(&proc->nodes))) {
5958 struct binder_node *node;
5959
5960 node = rb_entry(n, struct binder_node, rb_node);
5961 nodes++;
5962 /*
5963 * take a temporary ref on the node before
5964 * calling binder_node_release() which will either
5965 * kfree() the node or call binder_put_node()
5966 */
5967 binder_inc_node_tmpref_ilocked(node);
5968 rb_erase(&node->rb_node, &proc->nodes);
5969 binder_inner_proc_unlock(proc);
5970 incoming_refs = binder_node_release(node, incoming_refs);
5971 binder_inner_proc_lock(proc);
5972 }
5973 binder_inner_proc_unlock(proc);
5974
5975 outgoing_refs = 0;
5976 binder_proc_lock(proc);
5977 while ((n = rb_first(&proc->refs_by_desc))) {
5978 struct binder_ref *ref;
5979
5980 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5981 outgoing_refs++;
5982 binder_cleanup_ref_olocked(ref);
5983 binder_proc_unlock(proc);
5984 binder_free_ref(ref);
5985 binder_proc_lock(proc);
5986 }
5987 binder_proc_unlock(proc);
5988
5989 binder_release_work(proc, &proc->todo);
5990 binder_release_work(proc, &proc->delivered_death);
5991
5992 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5993 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5994 __func__, proc->pid, threads, nodes, incoming_refs,
5995 outgoing_refs, active_transactions);
5996
5997 binder_proc_dec_tmpref(proc);
5998}
5999
6000static void binder_deferred_func(struct work_struct *work)
6001{
6002 struct binder_proc *proc;
6003
6004 int defer;
6005
6006 do {
6007 mutex_lock(&binder_deferred_lock);
6008 if (!hlist_empty(&binder_deferred_list)) {
6009 proc = hlist_entry(binder_deferred_list.first,
6010 struct binder_proc, deferred_work_node);
6011 hlist_del_init(&proc->deferred_work_node);
6012 defer = proc->deferred_work;
6013 proc->deferred_work = 0;
6014 } else {
6015 proc = NULL;
6016 defer = 0;
6017 }
6018 mutex_unlock(&binder_deferred_lock);
6019
6020 if (defer & BINDER_DEFERRED_FLUSH)
6021 binder_deferred_flush(proc);
6022
6023 if (defer & BINDER_DEFERRED_RELEASE)
6024 binder_deferred_release(proc); /* frees proc */
6025 } while (proc);
6026}
6027static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6028
6029static void
6030binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6031{
6032 mutex_lock(&binder_deferred_lock);
6033 proc->deferred_work |= defer;
6034 if (hlist_unhashed(&proc->deferred_work_node)) {
6035 hlist_add_head(&proc->deferred_work_node,
6036 &binder_deferred_list);
6037 schedule_work(&binder_deferred_work);
6038 }
6039 mutex_unlock(&binder_deferred_lock);
6040}
6041
6042static void print_binder_transaction_ilocked(struct seq_file *m,
6043 struct binder_proc *proc,
6044 const char *prefix,
6045 struct binder_transaction *t)
6046{
6047 struct binder_proc *to_proc;
6048 struct binder_buffer *buffer = t->buffer;
6049 ktime_t current_time = ktime_get();
6050
6051 spin_lock(&t->lock);
6052 to_proc = t->to_proc;
6053 seq_printf(m,
6054 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6055 prefix, t->debug_id, t,
6056 t->from_pid,
6057 t->from_tid,
6058 to_proc ? to_proc->pid : 0,
6059 t->to_thread ? t->to_thread->pid : 0,
6060 t->code, t->flags, t->priority, t->need_reply,
6061 ktime_ms_delta(current_time, t->start_time));
6062 spin_unlock(&t->lock);
6063
6064 if (proc != to_proc) {
6065 /*
6066 * Can only safely deref buffer if we are holding the
6067 * correct proc inner lock for this node
6068 */
6069 seq_puts(m, "\n");
6070 return;
6071 }
6072
6073 if (buffer == NULL) {
6074 seq_puts(m, " buffer free\n");
6075 return;
6076 }
6077 if (buffer->target_node)
6078 seq_printf(m, " node %d", buffer->target_node->debug_id);
6079 seq_printf(m, " size %zd:%zd offset %lx\n",
6080 buffer->data_size, buffer->offsets_size,
6081 proc->alloc.buffer - buffer->user_data);
6082}
6083
6084static void print_binder_work_ilocked(struct seq_file *m,
6085 struct binder_proc *proc,
6086 const char *prefix,
6087 const char *transaction_prefix,
6088 struct binder_work *w)
6089{
6090 struct binder_node *node;
6091 struct binder_transaction *t;
6092
6093 switch (w->type) {
6094 case BINDER_WORK_TRANSACTION:
6095 t = container_of(w, struct binder_transaction, work);
6096 print_binder_transaction_ilocked(
6097 m, proc, transaction_prefix, t);
6098 break;
6099 case BINDER_WORK_RETURN_ERROR: {
6100 struct binder_error *e = container_of(
6101 w, struct binder_error, work);
6102
6103 seq_printf(m, "%stransaction error: %u\n",
6104 prefix, e->cmd);
6105 } break;
6106 case BINDER_WORK_TRANSACTION_COMPLETE:
6107 seq_printf(m, "%stransaction complete\n", prefix);
6108 break;
6109 case BINDER_WORK_NODE:
6110 node = container_of(w, struct binder_node, work);
6111 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6112 prefix, node->debug_id,
6113 (u64)node->ptr, (u64)node->cookie);
6114 break;
6115 case BINDER_WORK_DEAD_BINDER:
6116 seq_printf(m, "%shas dead binder\n", prefix);
6117 break;
6118 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6119 seq_printf(m, "%shas cleared dead binder\n", prefix);
6120 break;
6121 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6122 seq_printf(m, "%shas cleared death notification\n", prefix);
6123 break;
6124 default:
6125 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6126 break;
6127 }
6128}
6129
6130static void print_binder_thread_ilocked(struct seq_file *m,
6131 struct binder_thread *thread,
6132 int print_always)
6133{
6134 struct binder_transaction *t;
6135 struct binder_work *w;
6136 size_t start_pos = m->count;
6137 size_t header_pos;
6138
6139 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6140 thread->pid, thread->looper,
6141 thread->looper_need_return,
6142 atomic_read(&thread->tmp_ref));
6143 header_pos = m->count;
6144 t = thread->transaction_stack;
6145 while (t) {
6146 if (t->from == thread) {
6147 print_binder_transaction_ilocked(m, thread->proc,
6148 " outgoing transaction", t);
6149 t = t->from_parent;
6150 } else if (t->to_thread == thread) {
6151 print_binder_transaction_ilocked(m, thread->proc,
6152 " incoming transaction", t);
6153 t = t->to_parent;
6154 } else {
6155 print_binder_transaction_ilocked(m, thread->proc,
6156 " bad transaction", t);
6157 t = NULL;
6158 }
6159 }
6160 list_for_each_entry(w, &thread->todo, entry) {
6161 print_binder_work_ilocked(m, thread->proc, " ",
6162 " pending transaction", w);
6163 }
6164 if (!print_always && m->count == header_pos)
6165 m->count = start_pos;
6166}
6167
6168static void print_binder_node_nilocked(struct seq_file *m,
6169 struct binder_node *node)
6170{
6171 struct binder_ref *ref;
6172 struct binder_work *w;
6173 int count;
6174
6175 count = hlist_count_nodes(&node->refs);
6176
6177 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6178 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6179 node->has_strong_ref, node->has_weak_ref,
6180 node->local_strong_refs, node->local_weak_refs,
6181 node->internal_strong_refs, count, node->tmp_refs);
6182 if (count) {
6183 seq_puts(m, " proc");
6184 hlist_for_each_entry(ref, &node->refs, node_entry)
6185 seq_printf(m, " %d", ref->proc->pid);
6186 }
6187 seq_puts(m, "\n");
6188 if (node->proc) {
6189 list_for_each_entry(w, &node->async_todo, entry)
6190 print_binder_work_ilocked(m, node->proc, " ",
6191 " pending async transaction", w);
6192 }
6193}
6194
6195static void print_binder_ref_olocked(struct seq_file *m,
6196 struct binder_ref *ref)
6197{
6198 binder_node_lock(ref->node);
6199 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6200 ref->data.debug_id, ref->data.desc,
6201 ref->node->proc ? "" : "dead ",
6202 ref->node->debug_id, ref->data.strong,
6203 ref->data.weak, ref->death);
6204 binder_node_unlock(ref->node);
6205}
6206
6207static void print_binder_proc(struct seq_file *m,
6208 struct binder_proc *proc, int print_all)
6209{
6210 struct binder_work *w;
6211 struct rb_node *n;
6212 size_t start_pos = m->count;
6213 size_t header_pos;
6214 struct binder_node *last_node = NULL;
6215
6216 seq_printf(m, "proc %d\n", proc->pid);
6217 seq_printf(m, "context %s\n", proc->context->name);
6218 header_pos = m->count;
6219
6220 binder_inner_proc_lock(proc);
6221 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6222 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6223 rb_node), print_all);
6224
6225 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6226 struct binder_node *node = rb_entry(n, struct binder_node,
6227 rb_node);
6228 if (!print_all && !node->has_async_transaction)
6229 continue;
6230
6231 /*
6232 * take a temporary reference on the node so it
6233 * survives and isn't removed from the tree
6234 * while we print it.
6235 */
6236 binder_inc_node_tmpref_ilocked(node);
6237 /* Need to drop inner lock to take node lock */
6238 binder_inner_proc_unlock(proc);
6239 if (last_node)
6240 binder_put_node(last_node);
6241 binder_node_inner_lock(node);
6242 print_binder_node_nilocked(m, node);
6243 binder_node_inner_unlock(node);
6244 last_node = node;
6245 binder_inner_proc_lock(proc);
6246 }
6247 binder_inner_proc_unlock(proc);
6248 if (last_node)
6249 binder_put_node(last_node);
6250
6251 if (print_all) {
6252 binder_proc_lock(proc);
6253 for (n = rb_first(&proc->refs_by_desc);
6254 n != NULL;
6255 n = rb_next(n))
6256 print_binder_ref_olocked(m, rb_entry(n,
6257 struct binder_ref,
6258 rb_node_desc));
6259 binder_proc_unlock(proc);
6260 }
6261 binder_alloc_print_allocated(m, &proc->alloc);
6262 binder_inner_proc_lock(proc);
6263 list_for_each_entry(w, &proc->todo, entry)
6264 print_binder_work_ilocked(m, proc, " ",
6265 " pending transaction", w);
6266 list_for_each_entry(w, &proc->delivered_death, entry) {
6267 seq_puts(m, " has delivered dead binder\n");
6268 break;
6269 }
6270 binder_inner_proc_unlock(proc);
6271 if (!print_all && m->count == header_pos)
6272 m->count = start_pos;
6273}
6274
6275static const char * const binder_return_strings[] = {
6276 "BR_ERROR",
6277 "BR_OK",
6278 "BR_TRANSACTION",
6279 "BR_REPLY",
6280 "BR_ACQUIRE_RESULT",
6281 "BR_DEAD_REPLY",
6282 "BR_TRANSACTION_COMPLETE",
6283 "BR_INCREFS",
6284 "BR_ACQUIRE",
6285 "BR_RELEASE",
6286 "BR_DECREFS",
6287 "BR_ATTEMPT_ACQUIRE",
6288 "BR_NOOP",
6289 "BR_SPAWN_LOOPER",
6290 "BR_FINISHED",
6291 "BR_DEAD_BINDER",
6292 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6293 "BR_FAILED_REPLY",
6294 "BR_FROZEN_REPLY",
6295 "BR_ONEWAY_SPAM_SUSPECT",
6296 "BR_TRANSACTION_PENDING_FROZEN"
6297};
6298
6299static const char * const binder_command_strings[] = {
6300 "BC_TRANSACTION",
6301 "BC_REPLY",
6302 "BC_ACQUIRE_RESULT",
6303 "BC_FREE_BUFFER",
6304 "BC_INCREFS",
6305 "BC_ACQUIRE",
6306 "BC_RELEASE",
6307 "BC_DECREFS",
6308 "BC_INCREFS_DONE",
6309 "BC_ACQUIRE_DONE",
6310 "BC_ATTEMPT_ACQUIRE",
6311 "BC_REGISTER_LOOPER",
6312 "BC_ENTER_LOOPER",
6313 "BC_EXIT_LOOPER",
6314 "BC_REQUEST_DEATH_NOTIFICATION",
6315 "BC_CLEAR_DEATH_NOTIFICATION",
6316 "BC_DEAD_BINDER_DONE",
6317 "BC_TRANSACTION_SG",
6318 "BC_REPLY_SG",
6319};
6320
6321static const char * const binder_objstat_strings[] = {
6322 "proc",
6323 "thread",
6324 "node",
6325 "ref",
6326 "death",
6327 "transaction",
6328 "transaction_complete"
6329};
6330
6331static void print_binder_stats(struct seq_file *m, const char *prefix,
6332 struct binder_stats *stats)
6333{
6334 int i;
6335
6336 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6337 ARRAY_SIZE(binder_command_strings));
6338 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6339 int temp = atomic_read(&stats->bc[i]);
6340
6341 if (temp)
6342 seq_printf(m, "%s%s: %d\n", prefix,
6343 binder_command_strings[i], temp);
6344 }
6345
6346 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6347 ARRAY_SIZE(binder_return_strings));
6348 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6349 int temp = atomic_read(&stats->br[i]);
6350
6351 if (temp)
6352 seq_printf(m, "%s%s: %d\n", prefix,
6353 binder_return_strings[i], temp);
6354 }
6355
6356 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6357 ARRAY_SIZE(binder_objstat_strings));
6358 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6359 ARRAY_SIZE(stats->obj_deleted));
6360 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6361 int created = atomic_read(&stats->obj_created[i]);
6362 int deleted = atomic_read(&stats->obj_deleted[i]);
6363
6364 if (created || deleted)
6365 seq_printf(m, "%s%s: active %d total %d\n",
6366 prefix,
6367 binder_objstat_strings[i],
6368 created - deleted,
6369 created);
6370 }
6371}
6372
6373static void print_binder_proc_stats(struct seq_file *m,
6374 struct binder_proc *proc)
6375{
6376 struct binder_work *w;
6377 struct binder_thread *thread;
6378 struct rb_node *n;
6379 int count, strong, weak, ready_threads;
6380 size_t free_async_space =
6381 binder_alloc_get_free_async_space(&proc->alloc);
6382
6383 seq_printf(m, "proc %d\n", proc->pid);
6384 seq_printf(m, "context %s\n", proc->context->name);
6385 count = 0;
6386 ready_threads = 0;
6387 binder_inner_proc_lock(proc);
6388 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6389 count++;
6390
6391 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6392 ready_threads++;
6393
6394 seq_printf(m, " threads: %d\n", count);
6395 seq_printf(m, " requested threads: %d+%d/%d\n"
6396 " ready threads %d\n"
6397 " free async space %zd\n", proc->requested_threads,
6398 proc->requested_threads_started, proc->max_threads,
6399 ready_threads,
6400 free_async_space);
6401 count = 0;
6402 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6403 count++;
6404 binder_inner_proc_unlock(proc);
6405 seq_printf(m, " nodes: %d\n", count);
6406 count = 0;
6407 strong = 0;
6408 weak = 0;
6409 binder_proc_lock(proc);
6410 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6411 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6412 rb_node_desc);
6413 count++;
6414 strong += ref->data.strong;
6415 weak += ref->data.weak;
6416 }
6417 binder_proc_unlock(proc);
6418 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6419
6420 count = binder_alloc_get_allocated_count(&proc->alloc);
6421 seq_printf(m, " buffers: %d\n", count);
6422
6423 binder_alloc_print_pages(m, &proc->alloc);
6424
6425 count = 0;
6426 binder_inner_proc_lock(proc);
6427 list_for_each_entry(w, &proc->todo, entry) {
6428 if (w->type == BINDER_WORK_TRANSACTION)
6429 count++;
6430 }
6431 binder_inner_proc_unlock(proc);
6432 seq_printf(m, " pending transactions: %d\n", count);
6433
6434 print_binder_stats(m, " ", &proc->stats);
6435}
6436
6437static int state_show(struct seq_file *m, void *unused)
6438{
6439 struct binder_proc *proc;
6440 struct binder_node *node;
6441 struct binder_node *last_node = NULL;
6442
6443 seq_puts(m, "binder state:\n");
6444
6445 spin_lock(&binder_dead_nodes_lock);
6446 if (!hlist_empty(&binder_dead_nodes))
6447 seq_puts(m, "dead nodes:\n");
6448 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6449 /*
6450 * take a temporary reference on the node so it
6451 * survives and isn't removed from the list
6452 * while we print it.
6453 */
6454 node->tmp_refs++;
6455 spin_unlock(&binder_dead_nodes_lock);
6456 if (last_node)
6457 binder_put_node(last_node);
6458 binder_node_lock(node);
6459 print_binder_node_nilocked(m, node);
6460 binder_node_unlock(node);
6461 last_node = node;
6462 spin_lock(&binder_dead_nodes_lock);
6463 }
6464 spin_unlock(&binder_dead_nodes_lock);
6465 if (last_node)
6466 binder_put_node(last_node);
6467
6468 mutex_lock(&binder_procs_lock);
6469 hlist_for_each_entry(proc, &binder_procs, proc_node)
6470 print_binder_proc(m, proc, 1);
6471 mutex_unlock(&binder_procs_lock);
6472
6473 return 0;
6474}
6475
6476static int stats_show(struct seq_file *m, void *unused)
6477{
6478 struct binder_proc *proc;
6479
6480 seq_puts(m, "binder stats:\n");
6481
6482 print_binder_stats(m, "", &binder_stats);
6483
6484 mutex_lock(&binder_procs_lock);
6485 hlist_for_each_entry(proc, &binder_procs, proc_node)
6486 print_binder_proc_stats(m, proc);
6487 mutex_unlock(&binder_procs_lock);
6488
6489 return 0;
6490}
6491
6492static int transactions_show(struct seq_file *m, void *unused)
6493{
6494 struct binder_proc *proc;
6495
6496 seq_puts(m, "binder transactions:\n");
6497 mutex_lock(&binder_procs_lock);
6498 hlist_for_each_entry(proc, &binder_procs, proc_node)
6499 print_binder_proc(m, proc, 0);
6500 mutex_unlock(&binder_procs_lock);
6501
6502 return 0;
6503}
6504
6505static int proc_show(struct seq_file *m, void *unused)
6506{
6507 struct binder_proc *itr;
6508 int pid = (unsigned long)m->private;
6509
6510 mutex_lock(&binder_procs_lock);
6511 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6512 if (itr->pid == pid) {
6513 seq_puts(m, "binder proc state:\n");
6514 print_binder_proc(m, itr, 1);
6515 }
6516 }
6517 mutex_unlock(&binder_procs_lock);
6518
6519 return 0;
6520}
6521
6522static void print_binder_transaction_log_entry(struct seq_file *m,
6523 struct binder_transaction_log_entry *e)
6524{
6525 int debug_id = READ_ONCE(e->debug_id_done);
6526 /*
6527 * read barrier to guarantee debug_id_done read before
6528 * we print the log values
6529 */
6530 smp_rmb();
6531 seq_printf(m,
6532 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6533 e->debug_id, (e->call_type == 2) ? "reply" :
6534 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6535 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6536 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6537 e->return_error, e->return_error_param,
6538 e->return_error_line);
6539 /*
6540 * read-barrier to guarantee read of debug_id_done after
6541 * done printing the fields of the entry
6542 */
6543 smp_rmb();
6544 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6545 "\n" : " (incomplete)\n");
6546}
6547
6548static int transaction_log_show(struct seq_file *m, void *unused)
6549{
6550 struct binder_transaction_log *log = m->private;
6551 unsigned int log_cur = atomic_read(&log->cur);
6552 unsigned int count;
6553 unsigned int cur;
6554 int i;
6555
6556 count = log_cur + 1;
6557 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6558 0 : count % ARRAY_SIZE(log->entry);
6559 if (count > ARRAY_SIZE(log->entry) || log->full)
6560 count = ARRAY_SIZE(log->entry);
6561 for (i = 0; i < count; i++) {
6562 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6563
6564 print_binder_transaction_log_entry(m, &log->entry[index]);
6565 }
6566 return 0;
6567}
6568
6569const struct file_operations binder_fops = {
6570 .owner = THIS_MODULE,
6571 .poll = binder_poll,
6572 .unlocked_ioctl = binder_ioctl,
6573 .compat_ioctl = compat_ptr_ioctl,
6574 .mmap = binder_mmap,
6575 .open = binder_open,
6576 .flush = binder_flush,
6577 .release = binder_release,
6578};
6579
6580DEFINE_SHOW_ATTRIBUTE(state);
6581DEFINE_SHOW_ATTRIBUTE(stats);
6582DEFINE_SHOW_ATTRIBUTE(transactions);
6583DEFINE_SHOW_ATTRIBUTE(transaction_log);
6584
6585const struct binder_debugfs_entry binder_debugfs_entries[] = {
6586 {
6587 .name = "state",
6588 .mode = 0444,
6589 .fops = &state_fops,
6590 .data = NULL,
6591 },
6592 {
6593 .name = "stats",
6594 .mode = 0444,
6595 .fops = &stats_fops,
6596 .data = NULL,
6597 },
6598 {
6599 .name = "transactions",
6600 .mode = 0444,
6601 .fops = &transactions_fops,
6602 .data = NULL,
6603 },
6604 {
6605 .name = "transaction_log",
6606 .mode = 0444,
6607 .fops = &transaction_log_fops,
6608 .data = &binder_transaction_log,
6609 },
6610 {
6611 .name = "failed_transaction_log",
6612 .mode = 0444,
6613 .fops = &transaction_log_fops,
6614 .data = &binder_transaction_log_failed,
6615 },
6616 {} /* terminator */
6617};
6618
6619static int __init init_binder_device(const char *name)
6620{
6621 int ret;
6622 struct binder_device *binder_device;
6623
6624 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6625 if (!binder_device)
6626 return -ENOMEM;
6627
6628 binder_device->miscdev.fops = &binder_fops;
6629 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6630 binder_device->miscdev.name = name;
6631
6632 refcount_set(&binder_device->ref, 1);
6633 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6634 binder_device->context.name = name;
6635 mutex_init(&binder_device->context.context_mgr_node_lock);
6636
6637 ret = misc_register(&binder_device->miscdev);
6638 if (ret < 0) {
6639 kfree(binder_device);
6640 return ret;
6641 }
6642
6643 hlist_add_head(&binder_device->hlist, &binder_devices);
6644
6645 return ret;
6646}
6647
6648static int __init binder_init(void)
6649{
6650 int ret;
6651 char *device_name, *device_tmp;
6652 struct binder_device *device;
6653 struct hlist_node *tmp;
6654 char *device_names = NULL;
6655 const struct binder_debugfs_entry *db_entry;
6656
6657 ret = binder_alloc_shrinker_init();
6658 if (ret)
6659 return ret;
6660
6661 atomic_set(&binder_transaction_log.cur, ~0U);
6662 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6663
6664 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6665
6666 binder_for_each_debugfs_entry(db_entry)
6667 debugfs_create_file(db_entry->name,
6668 db_entry->mode,
6669 binder_debugfs_dir_entry_root,
6670 db_entry->data,
6671 db_entry->fops);
6672
6673 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6674 binder_debugfs_dir_entry_root);
6675
6676 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6677 strcmp(binder_devices_param, "") != 0) {
6678 /*
6679 * Copy the module_parameter string, because we don't want to
6680 * tokenize it in-place.
6681 */
6682 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6683 if (!device_names) {
6684 ret = -ENOMEM;
6685 goto err_alloc_device_names_failed;
6686 }
6687
6688 device_tmp = device_names;
6689 while ((device_name = strsep(&device_tmp, ","))) {
6690 ret = init_binder_device(device_name);
6691 if (ret)
6692 goto err_init_binder_device_failed;
6693 }
6694 }
6695
6696 ret = init_binderfs();
6697 if (ret)
6698 goto err_init_binder_device_failed;
6699
6700 return ret;
6701
6702err_init_binder_device_failed:
6703 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6704 misc_deregister(&device->miscdev);
6705 hlist_del(&device->hlist);
6706 kfree(device);
6707 }
6708
6709 kfree(device_names);
6710
6711err_alloc_device_names_failed:
6712 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6713 binder_alloc_shrinker_exit();
6714
6715 return ret;
6716}
6717
6718device_initcall(binder_init);
6719
6720#define CREATE_TRACE_POINTS
6721#include "binder_trace.h"
6722
6723MODULE_LICENSE("GPL v2");