Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Staging: android: binder: Add some tracepoints

Add tracepoints:
- ioctl entry and exit
- Main binder lock: lock, locked and unlock
- Command and return buffer opcodes
- Transaction: create and receive
- Transaction buffer: create and free
- Object and file descriptor transfer
- binder_update_page_range

Signed-off-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Arve Hjønnevåg and committed by
Greg Kroah-Hartman
975a1ac9 89334ab4

+400 -20
+2
drivers/staging/android/Makefile
··· 1 + ccflags-y += -I$(src) # needed for trace events 2 + 1 3 obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o 2 4 obj-$(CONFIG_ASHMEM) += ashmem.o 3 5 obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+71 -20
drivers/staging/android/binder.c
··· 35 35 #include <linux/slab.h> 36 36 37 37 #include "binder.h" 38 + #include "binder_trace.h" 38 39 39 - static DEFINE_MUTEX(binder_lock); 40 + static DEFINE_MUTEX(binder_main_lock); 40 41 static DEFINE_MUTEX(binder_deferred_lock); 41 42 static DEFINE_MUTEX(binder_mmap_lock); 42 43 ··· 412 411 return retval; 413 412 } 414 413 414 + static inline void binder_lock(const char *tag) 415 + { 416 + trace_binder_lock(tag); 417 + mutex_lock(&binder_main_lock); 418 + trace_binder_locked(tag); 419 + } 420 + 421 + static inline void binder_unlock(const char *tag) 422 + { 423 + trace_binder_unlock(tag); 424 + mutex_unlock(&binder_main_lock); 425 + } 426 + 415 427 static void binder_set_nice(long nice) 416 428 { 417 429 long min_nice; ··· 550 536 551 537 if (end <= start) 552 538 return 0; 539 + 540 + trace_binder_update_page_range(proc, allocate, start, end); 553 541 554 542 if (vma) 555 543 mm = NULL; ··· 1477 1461 t->code = tr->code; 1478 1462 t->flags = tr->flags; 1479 1463 t->priority = task_nice(current); 1464 + 1465 + trace_binder_transaction(reply, t, target_node); 1466 + 1480 1467 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1481 1468 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1482 1469 if (t->buffer == NULL) { ··· 1490 1471 t->buffer->debug_id = t->debug_id; 1491 1472 t->buffer->transaction = t; 1492 1473 t->buffer->target_node = target_node; 1474 + trace_binder_transaction_alloc_buf(t->buffer); 1493 1475 if (target_node) 1494 1476 binder_inc_node(target_node, 1, 0, NULL); 1495 1477 ··· 1563 1543 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1564 1544 &thread->todo); 1565 1545 1546 + trace_binder_transaction_node_to_ref(t, node, ref); 1566 1547 binder_debug(BINDER_DEBUG_TRANSACTION, 1567 1548 " node %d u%p -> ref %d desc %d\n", 1568 1549 node->debug_id, node->ptr, ref->debug_id, ··· 1588 1567 fp->binder = ref->node->ptr; 1589 1568 fp->cookie = ref->node->cookie; 1590 1569 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1570 + trace_binder_transaction_ref_to_node(t, ref); 1591 1571 binder_debug(BINDER_DEBUG_TRANSACTION, 1592 1572 " ref %d desc %d -> node %d u%p\n", 1593 1573 ref->debug_id, ref->desc, ref->node->debug_id, ··· 1602 1580 } 1603 1581 fp->handle = new_ref->desc; 1604 1582 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1583 + trace_binder_transaction_ref_to_ref(t, ref, 1584 + new_ref); 1605 1585 binder_debug(BINDER_DEBUG_TRANSACTION, 1606 1586 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1607 1587 ref->debug_id, ref->desc, new_ref->debug_id, ··· 1643 1619 goto err_get_unused_fd_failed; 1644 1620 } 1645 1621 task_fd_install(target_proc, target_fd, file); 1622 + trace_binder_transaction_fd(t, fp->handle, target_fd); 1646 1623 binder_debug(BINDER_DEBUG_TRANSACTION, 1647 1624 " fd %ld -> %d\n", fp->handle, target_fd); 1648 1625 /* TODO: fput? */ ··· 1692 1667 err_bad_object_type: 1693 1668 err_bad_offset: 1694 1669 err_copy_data_failed: 1670 + trace_binder_transaction_failed_buffer_release(t->buffer); 1695 1671 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1696 1672 t->buffer->transaction = NULL; 1697 1673 binder_free_buf(target_proc, t->buffer); ··· 1738 1712 if (get_user(cmd, (uint32_t __user *)ptr)) 1739 1713 return -EFAULT; 1740 1714 ptr += sizeof(uint32_t); 1715 + trace_binder_command(cmd); 1741 1716 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1742 1717 binder_stats.bc[_IOC_NR(cmd)]++; 1743 1718 proc->stats.bc[_IOC_NR(cmd)]++; ··· 1908 1881 else 1909 1882 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1910 1883 } 1884 + trace_binder_transaction_buffer_release(buffer); 1911 1885 binder_transaction_buffer_release(proc, buffer, NULL); 1912 1886 binder_free_buf(proc, buffer); 1913 1887 break; ··· 2117 2089 void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, 2118 2090 uint32_t cmd) 2119 2091 { 2092 + trace_binder_return(cmd); 2120 2093 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2121 2094 binder_stats.br[_IOC_NR(cmd)]++; 2122 2095 proc->stats.br[_IOC_NR(cmd)]++; ··· 2181 2152 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2182 2153 if (wait_for_proc_work) 2183 2154 proc->ready_threads++; 2184 - mutex_unlock(&binder_lock); 2155 + 2156 + binder_unlock(__func__); 2157 + 2158 + trace_binder_wait_for_work(wait_for_proc_work, 2159 + !!thread->transaction_stack, 2160 + !list_empty(&thread->todo)); 2185 2161 if (wait_for_proc_work) { 2186 2162 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2187 2163 BINDER_LOOPER_STATE_ENTERED))) { ··· 2210 2176 } else 2211 2177 ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); 2212 2178 } 2213 - mutex_lock(&binder_lock); 2179 + 2180 + binder_lock(__func__); 2181 + 2214 2182 if (wait_for_proc_work) 2215 2183 proc->ready_threads--; 2216 2184 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; ··· 2403 2367 return -EFAULT; 2404 2368 ptr += sizeof(tr); 2405 2369 2370 + trace_binder_transaction_received(t); 2406 2371 binder_stat_br(proc, thread, cmd); 2407 2372 binder_debug(BINDER_DEBUG_TRANSACTION, 2408 2373 "binder: %d:%d %s %d %d:%d, cmd %d" ··· 2557 2520 struct binder_thread *thread = NULL; 2558 2521 int wait_for_proc_work; 2559 2522 2560 - mutex_lock(&binder_lock); 2523 + binder_lock(__func__); 2524 + 2561 2525 thread = binder_get_thread(proc); 2562 2526 2563 2527 wait_for_proc_work = thread->transaction_stack == NULL && 2564 2528 list_empty(&thread->todo) && thread->return_error == BR_OK; 2565 - mutex_unlock(&binder_lock); 2529 + 2530 + binder_unlock(__func__); 2566 2531 2567 2532 if (wait_for_proc_work) { 2568 2533 if (binder_has_proc_work(proc, thread)) ··· 2592 2553 2593 2554 /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ 2594 2555 2556 + trace_binder_ioctl(cmd, arg); 2557 + 2595 2558 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2596 2559 if (ret) 2597 - return ret; 2560 + goto err_unlocked; 2598 2561 2599 - mutex_lock(&binder_lock); 2562 + binder_lock(__func__); 2600 2563 thread = binder_get_thread(proc); 2601 2564 if (thread == NULL) { 2602 2565 ret = -ENOMEM; ··· 2623 2582 2624 2583 if (bwr.write_size > 0) { 2625 2584 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); 2585 + trace_binder_write_done(ret); 2626 2586 if (ret < 0) { 2627 2587 bwr.read_consumed = 0; 2628 2588 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ··· 2633 2591 } 2634 2592 if (bwr.read_size > 0) { 2635 2593 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); 2594 + trace_binder_read_done(ret); 2636 2595 if (!list_empty(&proc->todo)) 2637 2596 wake_up_interruptible(&proc->wait); 2638 2597 if (ret < 0) { ··· 2709 2666 err: 2710 2667 if (thread) 2711 2668 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2712 - mutex_unlock(&binder_lock); 2669 + binder_unlock(__func__); 2713 2670 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2714 2671 if (ret && ret != -ERESTARTSYS) 2715 2672 pr_info("binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2673 + err_unlocked: 2674 + trace_binder_ioctl_done(ret); 2716 2675 return ret; 2717 2676 } 2718 2677 ··· 2860 2815 INIT_LIST_HEAD(&proc->todo); 2861 2816 init_waitqueue_head(&proc->wait); 2862 2817 proc->default_priority = task_nice(current); 2863 - mutex_lock(&binder_lock); 2818 + 2819 + binder_lock(__func__); 2820 + 2864 2821 binder_stats_created(BINDER_STAT_PROC); 2865 2822 hlist_add_head(&proc->proc_node, &binder_procs); 2866 2823 proc->pid = current->group_leader->pid; 2867 2824 INIT_LIST_HEAD(&proc->delivered_death); 2868 2825 filp->private_data = proc; 2869 - mutex_unlock(&binder_lock); 2826 + 2827 + binder_unlock(__func__); 2870 2828 2871 2829 if (binder_debugfs_dir_entry_proc) { 2872 2830 char strbuf[11]; ··· 3049 3001 3050 3002 int defer; 3051 3003 do { 3052 - mutex_lock(&binder_lock); 3004 + binder_lock(__func__); 3053 3005 mutex_lock(&binder_deferred_lock); 3054 3006 if (!hlist_empty(&binder_deferred_list)) { 3055 3007 proc = hlist_entry(binder_deferred_list.first, ··· 3076 3028 if (defer & BINDER_DEFERRED_RELEASE) 3077 3029 binder_deferred_release(proc); /* frees proc */ 3078 3030 3079 - mutex_unlock(&binder_lock); 3031 + binder_unlock(__func__); 3080 3032 if (files) 3081 3033 put_files_struct(files); 3082 3034 } while (proc); ··· 3417 3369 int do_lock = !binder_debug_no_lock; 3418 3370 3419 3371 if (do_lock) 3420 - mutex_lock(&binder_lock); 3372 + binder_lock(__func__); 3421 3373 3422 3374 seq_puts(m, "binder state:\n"); 3423 3375 ··· 3429 3381 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3430 3382 print_binder_proc(m, proc, 1); 3431 3383 if (do_lock) 3432 - mutex_unlock(&binder_lock); 3384 + binder_unlock(__func__); 3433 3385 return 0; 3434 3386 } 3435 3387 ··· 3440 3392 int do_lock = !binder_debug_no_lock; 3441 3393 3442 3394 if (do_lock) 3443 - mutex_lock(&binder_lock); 3395 + binder_lock(__func__); 3444 3396 3445 3397 seq_puts(m, "binder stats:\n"); 3446 3398 ··· 3449 3401 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3450 3402 print_binder_proc_stats(m, proc); 3451 3403 if (do_lock) 3452 - mutex_unlock(&binder_lock); 3404 + binder_unlock(__func__); 3453 3405 return 0; 3454 3406 } 3455 3407 ··· 3460 3412 int do_lock = !binder_debug_no_lock; 3461 3413 3462 3414 if (do_lock) 3463 - mutex_lock(&binder_lock); 3415 + binder_lock(__func__); 3464 3416 3465 3417 seq_puts(m, "binder transactions:\n"); 3466 3418 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3467 3419 print_binder_proc(m, proc, 0); 3468 3420 if (do_lock) 3469 - mutex_unlock(&binder_lock); 3421 + binder_unlock(__func__); 3470 3422 return 0; 3471 3423 } 3472 3424 ··· 3476 3428 int do_lock = !binder_debug_no_lock; 3477 3429 3478 3430 if (do_lock) 3479 - mutex_lock(&binder_lock); 3431 + binder_lock(__func__); 3480 3432 seq_puts(m, "binder proc state:\n"); 3481 3433 print_binder_proc(m, proc, 1); 3482 3434 if (do_lock) 3483 - mutex_unlock(&binder_lock); 3435 + binder_unlock(__func__); 3484 3436 return 0; 3485 3437 } 3486 3438 ··· 3574 3526 } 3575 3527 3576 3528 device_initcall(binder_init); 3529 + 3530 + #define CREATE_TRACE_POINTS 3531 + #include "binder_trace.h" 3577 3532 3578 3533 MODULE_LICENSE("GPL v2");
+327
drivers/staging/android/binder_trace.h
··· 1 + /* 2 + * Copyright (C) 2012 Google, Inc. 3 + * 4 + * This software is licensed under the terms of the GNU General Public 5 + * License version 2, as published by the Free Software Foundation, and 6 + * may be copied, distributed, and modified under those terms. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + */ 14 + 15 + #undef TRACE_SYSTEM 16 + #define TRACE_SYSTEM binder 17 + 18 + #if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 19 + #define _BINDER_TRACE_H 20 + 21 + #include <linux/tracepoint.h> 22 + 23 + struct binder_buffer; 24 + struct binder_node; 25 + struct binder_proc; 26 + struct binder_ref; 27 + struct binder_thread; 28 + struct binder_transaction; 29 + 30 + TRACE_EVENT(binder_ioctl, 31 + TP_PROTO(unsigned int cmd, unsigned long arg), 32 + TP_ARGS(cmd, arg), 33 + 34 + TP_STRUCT__entry( 35 + __field(unsigned int, cmd) 36 + __field(unsigned long, arg) 37 + ), 38 + TP_fast_assign( 39 + __entry->cmd = cmd; 40 + __entry->arg = arg; 41 + ), 42 + TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) 43 + ); 44 + 45 + DECLARE_EVENT_CLASS(binder_lock_class, 46 + TP_PROTO(const char *tag), 47 + TP_ARGS(tag), 48 + TP_STRUCT__entry( 49 + __field(const char *, tag) 50 + ), 51 + TP_fast_assign( 52 + __entry->tag = tag; 53 + ), 54 + TP_printk("tag=%s", __entry->tag) 55 + ); 56 + 57 + #define DEFINE_BINDER_LOCK_EVENT(name) \ 58 + DEFINE_EVENT(binder_lock_class, name, \ 59 + TP_PROTO(const char *func), \ 60 + TP_ARGS(func)) 61 + 62 + DEFINE_BINDER_LOCK_EVENT(binder_lock); 63 + DEFINE_BINDER_LOCK_EVENT(binder_locked); 64 + DEFINE_BINDER_LOCK_EVENT(binder_unlock); 65 + 66 + DECLARE_EVENT_CLASS(binder_function_return_class, 67 + TP_PROTO(int ret), 68 + TP_ARGS(ret), 69 + TP_STRUCT__entry( 70 + __field(int, ret) 71 + ), 72 + TP_fast_assign( 73 + __entry->ret = ret; 74 + ), 75 + TP_printk("ret=%d", __entry->ret) 76 + ); 77 + 78 + #define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \ 79 + DEFINE_EVENT(binder_function_return_class, name, \ 80 + TP_PROTO(int ret), \ 81 + TP_ARGS(ret)) 82 + 83 + DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); 84 + DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); 85 + DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); 86 + 87 + TRACE_EVENT(binder_wait_for_work, 88 + TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), 89 + TP_ARGS(proc_work, transaction_stack, thread_todo), 90 + 91 + TP_STRUCT__entry( 92 + __field(bool, proc_work) 93 + __field(bool, transaction_stack) 94 + __field(bool, thread_todo) 95 + ), 96 + TP_fast_assign( 97 + __entry->proc_work = proc_work; 98 + __entry->transaction_stack = transaction_stack; 99 + __entry->thread_todo = thread_todo; 100 + ), 101 + TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d", 102 + __entry->proc_work, __entry->transaction_stack, 103 + __entry->thread_todo) 104 + ); 105 + 106 + TRACE_EVENT(binder_transaction, 107 + TP_PROTO(bool reply, struct binder_transaction *t, 108 + struct binder_node *target_node), 109 + TP_ARGS(reply, t, target_node), 110 + TP_STRUCT__entry( 111 + __field(int, debug_id) 112 + __field(int, target_node) 113 + __field(int, to_proc) 114 + __field(int, to_thread) 115 + __field(int, reply) 116 + __field(unsigned int, code) 117 + __field(unsigned int, flags) 118 + ), 119 + TP_fast_assign( 120 + __entry->debug_id = t->debug_id; 121 + __entry->target_node = target_node ? target_node->debug_id : 0; 122 + __entry->to_proc = t->to_proc->pid; 123 + __entry->to_thread = t->to_thread ? t->to_thread->pid : 0; 124 + __entry->reply = reply; 125 + __entry->code = t->code; 126 + __entry->flags = t->flags; 127 + ), 128 + TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x", 129 + __entry->debug_id, __entry->target_node, 130 + __entry->to_proc, __entry->to_thread, 131 + __entry->reply, __entry->flags, __entry->code) 132 + ); 133 + 134 + TRACE_EVENT(binder_transaction_received, 135 + TP_PROTO(struct binder_transaction *t), 136 + TP_ARGS(t), 137 + 138 + TP_STRUCT__entry( 139 + __field(int, debug_id) 140 + ), 141 + TP_fast_assign( 142 + __entry->debug_id = t->debug_id; 143 + ), 144 + TP_printk("transaction=%d", __entry->debug_id) 145 + ); 146 + 147 + TRACE_EVENT(binder_transaction_node_to_ref, 148 + TP_PROTO(struct binder_transaction *t, struct binder_node *node, 149 + struct binder_ref *ref), 150 + TP_ARGS(t, node, ref), 151 + 152 + TP_STRUCT__entry( 153 + __field(int, debug_id) 154 + __field(int, node_debug_id) 155 + __field(void __user *, node_ptr) 156 + __field(int, ref_debug_id) 157 + __field(uint32_t, ref_desc) 158 + ), 159 + TP_fast_assign( 160 + __entry->debug_id = t->debug_id; 161 + __entry->node_debug_id = node->debug_id; 162 + __entry->node_ptr = node->ptr; 163 + __entry->ref_debug_id = ref->debug_id; 164 + __entry->ref_desc = ref->desc; 165 + ), 166 + TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d", 167 + __entry->debug_id, __entry->node_debug_id, __entry->node_ptr, 168 + __entry->ref_debug_id, __entry->ref_desc) 169 + ); 170 + 171 + TRACE_EVENT(binder_transaction_ref_to_node, 172 + TP_PROTO(struct binder_transaction *t, struct binder_ref *ref), 173 + TP_ARGS(t, ref), 174 + 175 + TP_STRUCT__entry( 176 + __field(int, debug_id) 177 + __field(int, ref_debug_id) 178 + __field(uint32_t, ref_desc) 179 + __field(int, node_debug_id) 180 + __field(void __user *, node_ptr) 181 + ), 182 + TP_fast_assign( 183 + __entry->debug_id = t->debug_id; 184 + __entry->ref_debug_id = ref->debug_id; 185 + __entry->ref_desc = ref->desc; 186 + __entry->node_debug_id = ref->node->debug_id; 187 + __entry->node_ptr = ref->node->ptr; 188 + ), 189 + TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p", 190 + __entry->debug_id, __entry->node_debug_id, 191 + __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr) 192 + ); 193 + 194 + TRACE_EVENT(binder_transaction_ref_to_ref, 195 + TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref, 196 + struct binder_ref *dest_ref), 197 + TP_ARGS(t, src_ref, dest_ref), 198 + 199 + TP_STRUCT__entry( 200 + __field(int, debug_id) 201 + __field(int, node_debug_id) 202 + __field(int, src_ref_debug_id) 203 + __field(uint32_t, src_ref_desc) 204 + __field(int, dest_ref_debug_id) 205 + __field(uint32_t, dest_ref_desc) 206 + ), 207 + TP_fast_assign( 208 + __entry->debug_id = t->debug_id; 209 + __entry->node_debug_id = src_ref->node->debug_id; 210 + __entry->src_ref_debug_id = src_ref->debug_id; 211 + __entry->src_ref_desc = src_ref->desc; 212 + __entry->dest_ref_debug_id = dest_ref->debug_id; 213 + __entry->dest_ref_desc = dest_ref->desc; 214 + ), 215 + TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d", 216 + __entry->debug_id, __entry->node_debug_id, 217 + __entry->src_ref_debug_id, __entry->src_ref_desc, 218 + __entry->dest_ref_debug_id, __entry->dest_ref_desc) 219 + ); 220 + 221 + TRACE_EVENT(binder_transaction_fd, 222 + TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), 223 + TP_ARGS(t, src_fd, dest_fd), 224 + 225 + TP_STRUCT__entry( 226 + __field(int, debug_id) 227 + __field(int, src_fd) 228 + __field(int, dest_fd) 229 + ), 230 + TP_fast_assign( 231 + __entry->debug_id = t->debug_id; 232 + __entry->src_fd = src_fd; 233 + __entry->dest_fd = dest_fd; 234 + ), 235 + TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", 236 + __entry->debug_id, __entry->src_fd, __entry->dest_fd) 237 + ); 238 + 239 + DECLARE_EVENT_CLASS(binder_buffer_class, 240 + TP_PROTO(struct binder_buffer *buf), 241 + TP_ARGS(buf), 242 + TP_STRUCT__entry( 243 + __field(int, debug_id) 244 + __field(size_t, data_size) 245 + __field(size_t, offsets_size) 246 + ), 247 + TP_fast_assign( 248 + __entry->debug_id = buf->debug_id; 249 + __entry->data_size = buf->data_size; 250 + __entry->offsets_size = buf->offsets_size; 251 + ), 252 + TP_printk("transaction=%d data_size=%zd offsets_size=%zd", 253 + __entry->debug_id, __entry->data_size, __entry->offsets_size) 254 + ); 255 + 256 + DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, 257 + TP_PROTO(struct binder_buffer *buffer), 258 + TP_ARGS(buffer)); 259 + 260 + DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release, 261 + TP_PROTO(struct binder_buffer *buffer), 262 + TP_ARGS(buffer)); 263 + 264 + DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, 265 + TP_PROTO(struct binder_buffer *buffer), 266 + TP_ARGS(buffer)); 267 + 268 + TRACE_EVENT(binder_update_page_range, 269 + TP_PROTO(struct binder_proc *proc, bool allocate, 270 + void *start, void *end), 271 + TP_ARGS(proc, allocate, start, end), 272 + TP_STRUCT__entry( 273 + __field(int, proc) 274 + __field(bool, allocate) 275 + __field(size_t, offset) 276 + __field(size_t, size) 277 + ), 278 + TP_fast_assign( 279 + __entry->proc = proc->pid; 280 + __entry->allocate = allocate; 281 + __entry->offset = start - proc->buffer; 282 + __entry->size = end - start; 283 + ), 284 + TP_printk("proc=%d allocate=%d offset=%zu size=%zu", 285 + __entry->proc, __entry->allocate, 286 + __entry->offset, __entry->size) 287 + ); 288 + 289 + TRACE_EVENT(binder_command, 290 + TP_PROTO(uint32_t cmd), 291 + TP_ARGS(cmd), 292 + TP_STRUCT__entry( 293 + __field(uint32_t, cmd) 294 + ), 295 + TP_fast_assign( 296 + __entry->cmd = cmd; 297 + ), 298 + TP_printk("cmd=0x%x %s", 299 + __entry->cmd, 300 + _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ? 301 + binder_command_strings[_IOC_NR(__entry->cmd)] : 302 + "unknown") 303 + ); 304 + 305 + TRACE_EVENT(binder_return, 306 + TP_PROTO(uint32_t cmd), 307 + TP_ARGS(cmd), 308 + TP_STRUCT__entry( 309 + __field(uint32_t, cmd) 310 + ), 311 + TP_fast_assign( 312 + __entry->cmd = cmd; 313 + ), 314 + TP_printk("cmd=0x%x %s", 315 + __entry->cmd, 316 + _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ? 317 + binder_return_strings[_IOC_NR(__entry->cmd)] : 318 + "unknown") 319 + ); 320 + 321 + #endif /* _BINDER_TRACE_H */ 322 + 323 + #undef TRACE_INCLUDE_PATH 324 + #undef TRACE_INCLUDE_FILE 325 + #define TRACE_INCLUDE_PATH . 326 + #define TRACE_INCLUDE_FILE binder_trace 327 + #include <trace/define_trace.h>