Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Staging: android: add binder driver

It builds, but not as a module, and with lots of warnings.

I also had to fix up a few syntax errors to get it to build
properly, I'm doubting that anyone has built it in a while :(

Signed-off-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Brian Swetland <swetland@google.com>
Cc: Robert Love <rlove@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Arve Hjønnevåg and committed by
Greg Kroah-Hartman
457b9a6f 6dc9c9e8

+3834
+4
drivers/staging/android/Kconfig
··· 6 6 ---help--- 7 7 Enable support for various drivers needed on the Android platform 8 8 9 + config ANDROID_BINDER_IPC 10 + tristate "Android Binder IPC Driver" 11 + default y 12 + 9 13 endmenu
+1
drivers/staging/android/Makefile
··· 1 1 obj-$(CONFIG_ANDROID) += android.o 2 + obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+3499
drivers/staging/android/binder.c
··· 1 + /* binder.c 2 + * 3 + * Android IPC Subsystem 4 + * 5 + * Copyright (C) 2007-2008 Google, Inc. 6 + * 7 + * This software is licensed under the terms of the GNU General Public 8 + * License version 2, as published by the Free Software Foundation, and 9 + * may be copied, distributed, and modified under those terms. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + */ 17 + 18 + #include <asm/cacheflush.h> 19 + #include <linux/fdtable.h> 20 + #include <linux/file.h> 21 + #include <linux/fs.h> 22 + #include <linux/list.h> 23 + #include <linux/miscdevice.h> 24 + #include <linux/mm.h> 25 + #include <linux/module.h> 26 + #include <linux/mutex.h> 27 + #include <linux/nsproxy.h> 28 + #include <linux/poll.h> 29 + #include <linux/proc_fs.h> 30 + #include <linux/rbtree.h> 31 + #include <linux/sched.h> 32 + #include <linux/uaccess.h> 33 + #include <linux/vmalloc.h> 34 + #include "binder.h" 35 + 36 + static DEFINE_MUTEX(binder_lock); 37 + static HLIST_HEAD(binder_procs); 38 + static struct binder_node *binder_context_mgr_node; 39 + static uid_t binder_context_mgr_uid = -1; 40 + static int binder_last_id; 41 + static struct proc_dir_entry *binder_proc_dir_entry_root; 42 + static struct proc_dir_entry *binder_proc_dir_entry_proc; 43 + static struct hlist_head binder_dead_nodes; 44 + 45 + static int binder_read_proc_proc( 46 + char *page, char **start, off_t off, int count, int *eof, void *data); 47 + 48 + /* This is only defined in include/asm-arm/sizes.h */ 49 + #ifndef SZ_1K 50 + #define SZ_1K 0x400 51 + #endif 52 + 53 + #ifndef SZ_4M 54 + #define SZ_4M 0x400000 55 + #endif 56 + 57 + #ifndef __i386__ 58 + #define FORBIDDEN_MMAP_FLAGS (VM_WRITE | VM_EXEC) 59 + #else 60 + #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 61 + #endif 62 + 63 + #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 64 + 65 + enum { 66 + BINDER_DEBUG_USER_ERROR = 1U << 0, 67 + BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 68 + BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 69 + BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 70 + BINDER_DEBUG_DEAD_BINDER = 1U << 4, 71 + BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 72 + BINDER_DEBUG_READ_WRITE = 1U << 6, 73 + BINDER_DEBUG_USER_REFS = 1U << 7, 74 + BINDER_DEBUG_THREADS = 1U << 8, 75 + BINDER_DEBUG_TRANSACTION = 1U << 9, 76 + BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 77 + BINDER_DEBUG_FREE_BUFFER = 1U << 11, 78 + BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 79 + BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 80 + BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 81 + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 82 + }; 83 + static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 84 + BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 85 + module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 86 + static int binder_debug_no_lock; 87 + module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 88 + static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 89 + static int binder_stop_on_user_error; 90 + static int binder_set_stop_on_user_error( 91 + const char *val, struct kernel_param *kp) 92 + { 93 + int ret; 94 + ret = param_set_int(val, kp); 95 + if (binder_stop_on_user_error < 2) 96 + wake_up(&binder_user_error_wait); 97 + return ret; 98 + } 99 + module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 100 + param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 101 + 102 + #define binder_user_error(x...) \ 103 + do { \ 104 + if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 105 + printk(KERN_INFO x); \ 106 + if (binder_stop_on_user_error) \ 107 + binder_stop_on_user_error = 2; \ 108 + } while (0) 109 + 110 + enum { 111 + BINDER_STAT_PROC, 112 + BINDER_STAT_THREAD, 113 + BINDER_STAT_NODE, 114 + BINDER_STAT_REF, 115 + BINDER_STAT_DEATH, 116 + BINDER_STAT_TRANSACTION, 117 + BINDER_STAT_TRANSACTION_COMPLETE, 118 + BINDER_STAT_COUNT 119 + }; 120 + 121 + struct binder_stats { 122 + int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 123 + int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 124 + int obj_created[BINDER_STAT_COUNT]; 125 + int obj_deleted[BINDER_STAT_COUNT]; 126 + }; 127 + 128 + static struct binder_stats binder_stats; 129 + 130 + struct binder_transaction_log_entry { 131 + int debug_id; 132 + int call_type; 133 + int from_proc; 134 + int from_thread; 135 + int target_handle; 136 + int to_proc; 137 + int to_thread; 138 + int to_node; 139 + int data_size; 140 + int offsets_size; 141 + }; 142 + struct binder_transaction_log { 143 + int next; 144 + int full; 145 + struct binder_transaction_log_entry entry[32]; 146 + }; 147 + struct binder_transaction_log binder_transaction_log; 148 + struct binder_transaction_log binder_transaction_log_failed; 149 + 150 + static struct binder_transaction_log_entry *binder_transaction_log_add( 151 + struct binder_transaction_log *log) 152 + { 153 + struct binder_transaction_log_entry *e; 154 + e = &log->entry[log->next]; 155 + memset(e, 0, sizeof(*e)); 156 + log->next++; 157 + if (log->next == ARRAY_SIZE(log->entry)) { 158 + log->next = 0; 159 + log->full = 1; 160 + } 161 + return e; 162 + } 163 + 164 + struct binder_work { 165 + struct list_head entry; 166 + enum { 167 + BINDER_WORK_TRANSACTION = 1, 168 + BINDER_WORK_TRANSACTION_COMPLETE, 169 + BINDER_WORK_NODE, 170 + BINDER_WORK_DEAD_BINDER, 171 + BINDER_WORK_DEAD_BINDER_AND_CLEAR, 172 + BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 173 + } type; 174 + }; 175 + 176 + struct binder_node { 177 + int debug_id; 178 + struct binder_work work; 179 + union { 180 + struct rb_node rb_node; 181 + struct hlist_node dead_node; 182 + }; 183 + struct binder_proc *proc; 184 + struct hlist_head refs; 185 + int internal_strong_refs; 186 + int local_weak_refs; 187 + int local_strong_refs; 188 + void __user *ptr; 189 + void __user *cookie; 190 + unsigned has_strong_ref : 1; 191 + unsigned pending_strong_ref : 1; 192 + unsigned has_weak_ref : 1; 193 + unsigned pending_weak_ref : 1; 194 + unsigned has_async_transaction : 1; 195 + unsigned accept_fds : 1; 196 + int min_priority : 8; 197 + struct list_head async_todo; 198 + }; 199 + 200 + struct binder_ref_death { 201 + struct binder_work work; 202 + void __user *cookie; 203 + }; 204 + 205 + struct binder_ref { 206 + /* Lookups needed: */ 207 + /* node + proc => ref (transaction) */ 208 + /* desc + proc => ref (transaction, inc/dec ref) */ 209 + /* node => refs + procs (proc exit) */ 210 + int debug_id; 211 + struct rb_node rb_node_desc; 212 + struct rb_node rb_node_node; 213 + struct hlist_node node_entry; 214 + struct binder_proc *proc; 215 + struct binder_node *node; 216 + uint32_t desc; 217 + int strong; 218 + int weak; 219 + struct binder_ref_death *death; 220 + }; 221 + 222 + struct binder_buffer { 223 + struct list_head entry; /* free and allocated entries by addesss */ 224 + struct rb_node rb_node; /* free entry by size or allocated entry */ 225 + /* by address */ 226 + unsigned free : 1; 227 + unsigned allow_user_free : 1; 228 + unsigned async_transaction : 1; 229 + unsigned debug_id : 29; 230 + 231 + struct binder_transaction *transaction; 232 + 233 + struct binder_node *target_node; 234 + size_t data_size; 235 + size_t offsets_size; 236 + uint8_t data[0]; 237 + }; 238 + 239 + struct binder_proc { 240 + struct hlist_node proc_node; 241 + struct rb_root threads; 242 + struct rb_root nodes; 243 + struct rb_root refs_by_desc; 244 + struct rb_root refs_by_node; 245 + int pid; 246 + struct vm_area_struct *vma; 247 + struct task_struct *tsk; 248 + void *buffer; 249 + size_t user_buffer_offset; 250 + 251 + struct list_head buffers; 252 + struct rb_root free_buffers; 253 + struct rb_root allocated_buffers; 254 + size_t free_async_space; 255 + 256 + struct page **pages; 257 + size_t buffer_size; 258 + uint32_t buffer_free; 259 + struct list_head todo; 260 + wait_queue_head_t wait; 261 + struct binder_stats stats; 262 + struct list_head delivered_death; 263 + int max_threads; 264 + int requested_threads; 265 + int requested_threads_started; 266 + int ready_threads; 267 + long default_priority; 268 + }; 269 + 270 + enum { 271 + BINDER_LOOPER_STATE_REGISTERED = 0x01, 272 + BINDER_LOOPER_STATE_ENTERED = 0x02, 273 + BINDER_LOOPER_STATE_EXITED = 0x04, 274 + BINDER_LOOPER_STATE_INVALID = 0x08, 275 + BINDER_LOOPER_STATE_WAITING = 0x10, 276 + BINDER_LOOPER_STATE_NEED_RETURN = 0x20 277 + }; 278 + 279 + struct binder_thread { 280 + struct binder_proc *proc; 281 + struct rb_node rb_node; 282 + int pid; 283 + int looper; 284 + struct binder_transaction *transaction_stack; 285 + struct list_head todo; 286 + uint32_t return_error; /* Write failed, return error code in read buf */ 287 + uint32_t return_error2; /* Write failed, return error code in read */ 288 + /* buffer. Used when sending a reply to a dead process that */ 289 + /* we are also waiting on */ 290 + wait_queue_head_t wait; 291 + struct binder_stats stats; 292 + }; 293 + 294 + struct binder_transaction { 295 + int debug_id; 296 + struct binder_work work; 297 + struct binder_thread *from; 298 + struct binder_transaction *from_parent; 299 + struct binder_proc *to_proc; 300 + struct binder_thread *to_thread; 301 + struct binder_transaction *to_parent; 302 + unsigned need_reply : 1; 303 + /*unsigned is_dead : 1;*/ /* not used at the moment */ 304 + 305 + struct binder_buffer *buffer; 306 + unsigned int code; 307 + unsigned int flags; 308 + long priority; 309 + long saved_priority; 310 + uid_t sender_euid; 311 + }; 312 + 313 + /* 314 + * copied from get_unused_fd_flags 315 + */ 316 + int task_get_unused_fd_flags(struct task_struct *tsk, int flags) 317 + { 318 + struct files_struct *files = get_files_struct(tsk); 319 + int fd, error; 320 + struct fdtable *fdt; 321 + unsigned long rlim_cur; 322 + 323 + if (files == NULL) 324 + return -ESRCH; 325 + 326 + error = -EMFILE; 327 + spin_lock(&files->file_lock); 328 + 329 + repeat: 330 + fdt = files_fdtable(files); 331 + fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, 332 + files->next_fd); 333 + 334 + /* 335 + * N.B. For clone tasks sharing a files structure, this test 336 + * will limit the total number of files that can be opened. 337 + */ 338 + rcu_read_lock(); 339 + if (tsk->signal) 340 + rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 341 + else 342 + rlim_cur = 0; 343 + rcu_read_unlock(); 344 + if (fd >= rlim_cur) 345 + goto out; 346 + 347 + /* Do we need to expand the fd array or fd set? */ 348 + error = expand_files(files, fd); 349 + if (error < 0) 350 + goto out; 351 + 352 + if (error) { 353 + /* 354 + * If we needed to expand the fs array we 355 + * might have blocked - try again. 356 + */ 357 + error = -EMFILE; 358 + goto repeat; 359 + } 360 + 361 + FD_SET(fd, fdt->open_fds); 362 + if (flags & O_CLOEXEC) 363 + FD_SET(fd, fdt->close_on_exec); 364 + else 365 + FD_CLR(fd, fdt->close_on_exec); 366 + files->next_fd = fd + 1; 367 + #if 1 368 + /* Sanity check */ 369 + if (fdt->fd[fd] != NULL) { 370 + printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); 371 + fdt->fd[fd] = NULL; 372 + } 373 + #endif 374 + error = fd; 375 + 376 + out: 377 + spin_unlock(&files->file_lock); 378 + put_files_struct(files); 379 + return error; 380 + } 381 + 382 + /* 383 + * copied from fd_install 384 + */ 385 + static void task_fd_install( 386 + struct task_struct *tsk, unsigned int fd, struct file *file) 387 + { 388 + struct files_struct *files = get_files_struct(tsk); 389 + struct fdtable *fdt; 390 + 391 + if (files == NULL) 392 + return; 393 + 394 + spin_lock(&files->file_lock); 395 + fdt = files_fdtable(files); 396 + BUG_ON(fdt->fd[fd] != NULL); 397 + rcu_assign_pointer(fdt->fd[fd], file); 398 + spin_unlock(&files->file_lock); 399 + put_files_struct(files); 400 + } 401 + 402 + /* 403 + * copied from __put_unused_fd in open.c 404 + */ 405 + static void __put_unused_fd(struct files_struct *files, unsigned int fd) 406 + { 407 + struct fdtable *fdt = files_fdtable(files); 408 + __FD_CLR(fd, fdt->open_fds); 409 + if (fd < files->next_fd) 410 + files->next_fd = fd; 411 + } 412 + 413 + /* 414 + * copied from sys_close 415 + */ 416 + static long task_close_fd(struct task_struct *tsk, unsigned int fd) 417 + { 418 + struct file *filp; 419 + struct files_struct *files = get_files_struct(tsk); 420 + struct fdtable *fdt; 421 + int retval; 422 + 423 + if (files == NULL) 424 + return -ESRCH; 425 + 426 + spin_lock(&files->file_lock); 427 + fdt = files_fdtable(files); 428 + if (fd >= fdt->max_fds) 429 + goto out_unlock; 430 + filp = fdt->fd[fd]; 431 + if (!filp) 432 + goto out_unlock; 433 + rcu_assign_pointer(fdt->fd[fd], NULL); 434 + FD_CLR(fd, fdt->close_on_exec); 435 + __put_unused_fd(files, fd); 436 + spin_unlock(&files->file_lock); 437 + retval = filp_close(filp, files); 438 + 439 + /* can't restart close syscall because file table entry was cleared */ 440 + if (unlikely(retval == -ERESTARTSYS || 441 + retval == -ERESTARTNOINTR || 442 + retval == -ERESTARTNOHAND || 443 + retval == -ERESTART_RESTARTBLOCK)) 444 + retval = -EINTR; 445 + 446 + put_files_struct(files); 447 + return retval; 448 + 449 + out_unlock: 450 + spin_unlock(&files->file_lock); 451 + put_files_struct(files); 452 + return -EBADF; 453 + } 454 + 455 + static void binder_set_nice(long nice) 456 + { 457 + long min_nice; 458 + if (can_nice(current, nice)) { 459 + set_user_nice(current, nice); 460 + return; 461 + } 462 + min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; 463 + if (binder_debug_mask & BINDER_DEBUG_PRIORITY_CAP) 464 + printk(KERN_INFO "binder: %d: nice value %ld not allowed use " 465 + "%ld instead\n", current->pid, nice, min_nice); 466 + set_user_nice(current, min_nice); 467 + if (min_nice < 20) 468 + return; 469 + binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); 470 + } 471 + 472 + static size_t binder_buffer_size( 473 + struct binder_proc *proc, struct binder_buffer *buffer) 474 + { 475 + if (list_is_last(&buffer->entry, &proc->buffers)) 476 + return proc->buffer + proc->buffer_size - (void *)buffer->data; 477 + else 478 + return (size_t)list_entry(buffer->entry.next, 479 + struct binder_buffer, entry) - (size_t)buffer->data; 480 + } 481 + 482 + static void binder_insert_free_buffer( 483 + struct binder_proc *proc, struct binder_buffer *new_buffer) 484 + { 485 + struct rb_node **p = &proc->free_buffers.rb_node; 486 + struct rb_node *parent = NULL; 487 + struct binder_buffer *buffer; 488 + size_t buffer_size; 489 + size_t new_buffer_size; 490 + 491 + BUG_ON(!new_buffer->free); 492 + 493 + new_buffer_size = binder_buffer_size(proc, new_buffer); 494 + 495 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 496 + printk(KERN_INFO "binder: %d: add free buffer, size %d, " 497 + "at %p\n", proc->pid, new_buffer_size, new_buffer); 498 + 499 + while (*p) { 500 + parent = *p; 501 + buffer = rb_entry(parent, struct binder_buffer, rb_node); 502 + BUG_ON(!buffer->free); 503 + 504 + buffer_size = binder_buffer_size(proc, buffer); 505 + 506 + if (new_buffer_size < buffer_size) 507 + p = &parent->rb_left; 508 + else 509 + p = &parent->rb_right; 510 + } 511 + rb_link_node(&new_buffer->rb_node, parent, p); 512 + rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 513 + } 514 + 515 + static void binder_insert_allocated_buffer( 516 + struct binder_proc *proc, struct binder_buffer *new_buffer) 517 + { 518 + struct rb_node **p = &proc->allocated_buffers.rb_node; 519 + struct rb_node *parent = NULL; 520 + struct binder_buffer *buffer; 521 + 522 + BUG_ON(new_buffer->free); 523 + 524 + while (*p) { 525 + parent = *p; 526 + buffer = rb_entry(parent, struct binder_buffer, rb_node); 527 + BUG_ON(buffer->free); 528 + 529 + if (new_buffer < buffer) 530 + p = &parent->rb_left; 531 + else if (new_buffer > buffer) 532 + p = &parent->rb_right; 533 + else 534 + BUG(); 535 + } 536 + rb_link_node(&new_buffer->rb_node, parent, p); 537 + rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 538 + } 539 + 540 + static struct binder_buffer *binder_buffer_lookup( 541 + struct binder_proc *proc, void __user *user_ptr) 542 + { 543 + struct rb_node *n = proc->allocated_buffers.rb_node; 544 + struct binder_buffer *buffer; 545 + struct binder_buffer *kern_ptr; 546 + 547 + kern_ptr = user_ptr - proc->user_buffer_offset 548 + - offsetof(struct binder_buffer, data); 549 + 550 + while (n) { 551 + buffer = rb_entry(n, struct binder_buffer, rb_node); 552 + BUG_ON(buffer->free); 553 + 554 + if (kern_ptr < buffer) 555 + n = n->rb_left; 556 + else if (kern_ptr > buffer) 557 + n = n->rb_right; 558 + else 559 + return buffer; 560 + } 561 + return NULL; 562 + } 563 + 564 + static int binder_update_page_range(struct binder_proc *proc, int allocate, 565 + void *start, void *end, struct vm_area_struct *vma) 566 + { 567 + void *page_addr; 568 + unsigned long user_page_addr; 569 + struct vm_struct tmp_area; 570 + struct page **page; 571 + struct mm_struct *mm; 572 + 573 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 574 + printk(KERN_INFO "binder: %d: %s pages %p-%p\n", 575 + proc->pid, allocate ? "allocate" : "free", start, end); 576 + 577 + if (end <= start) 578 + return 0; 579 + 580 + if (vma) 581 + mm = NULL; 582 + else 583 + mm = get_task_mm(proc->tsk); 584 + 585 + if (mm) { 586 + down_write(&mm->mmap_sem); 587 + vma = proc->vma; 588 + } 589 + 590 + if (allocate == 0) 591 + goto free_range; 592 + 593 + if (vma == NULL) { 594 + printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " 595 + "map pages in userspace, no vma\n", proc->pid); 596 + goto err_no_vma; 597 + } 598 + 599 + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 600 + int ret; 601 + struct page **page_array_ptr; 602 + page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 603 + 604 + BUG_ON(*page); 605 + *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 606 + if (*page == NULL) { 607 + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 608 + "for page at %p\n", proc->pid, page_addr); 609 + goto err_alloc_page_failed; 610 + } 611 + tmp_area.addr = page_addr; 612 + tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 613 + page_array_ptr = page; 614 + ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); 615 + if (ret) { 616 + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 617 + "to map page at %p in kernel\n", 618 + proc->pid, page_addr); 619 + goto err_map_kernel_failed; 620 + } 621 + user_page_addr = (size_t)page_addr + proc->user_buffer_offset; 622 + ret = vm_insert_page(vma, user_page_addr, page[0]); 623 + if (ret) { 624 + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 625 + "to map page at %lx in userspace\n", 626 + proc->pid, user_page_addr); 627 + goto err_vm_insert_page_failed; 628 + } 629 + /* vm_insert_page does not seem to increment the refcount */ 630 + } 631 + if (mm) { 632 + up_write(&mm->mmap_sem); 633 + mmput(mm); 634 + } 635 + return 0; 636 + 637 + free_range: 638 + for (page_addr = end - PAGE_SIZE; page_addr >= start; 639 + page_addr -= PAGE_SIZE) { 640 + page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 641 + if (vma) 642 + zap_page_range(vma, (size_t)page_addr + 643 + proc->user_buffer_offset, PAGE_SIZE, NULL); 644 + err_vm_insert_page_failed: 645 + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 646 + err_map_kernel_failed: 647 + __free_page(*page); 648 + *page = NULL; 649 + err_alloc_page_failed: 650 + ; 651 + } 652 + err_no_vma: 653 + if (mm) { 654 + up_write(&mm->mmap_sem); 655 + mmput(mm); 656 + } 657 + return -ENOMEM; 658 + } 659 + 660 + static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 661 + size_t data_size, size_t offsets_size, int is_async) 662 + { 663 + struct rb_node *n = proc->free_buffers.rb_node; 664 + struct binder_buffer *buffer; 665 + size_t buffer_size; 666 + struct rb_node *best_fit = NULL; 667 + void *has_page_addr; 668 + void *end_page_addr; 669 + size_t size; 670 + 671 + if (proc->vma == NULL) { 672 + printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", 673 + proc->pid); 674 + return NULL; 675 + } 676 + 677 + size = ALIGN(data_size, sizeof(void *)) + 678 + ALIGN(offsets_size, sizeof(void *)); 679 + 680 + if (size < data_size || size < offsets_size) { 681 + binder_user_error("binder: %d: got transaction with invalid " 682 + "size %d-%d\n", proc->pid, data_size, offsets_size); 683 + return NULL; 684 + } 685 + 686 + if (is_async && 687 + proc->free_async_space < size + sizeof(struct binder_buffer)) { 688 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 689 + printk(KERN_ERR "binder: %d: binder_alloc_buf size %d f" 690 + "ailed, no async space left\n", proc->pid, size); 691 + return NULL; 692 + } 693 + 694 + while (n) { 695 + buffer = rb_entry(n, struct binder_buffer, rb_node); 696 + BUG_ON(!buffer->free); 697 + buffer_size = binder_buffer_size(proc, buffer); 698 + 699 + if (size < buffer_size) { 700 + best_fit = n; 701 + n = n->rb_left; 702 + } else if (size > buffer_size) 703 + n = n->rb_right; 704 + else { 705 + best_fit = n; 706 + break; 707 + } 708 + } 709 + if (best_fit == NULL) { 710 + printk(KERN_ERR "binder: %d: binder_alloc_buf size %d failed, " 711 + "no address space\n", proc->pid, size); 712 + return NULL; 713 + } 714 + if (n == NULL) { 715 + buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 716 + buffer_size = binder_buffer_size(proc, buffer); 717 + } 718 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 719 + printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got buff" 720 + "er %p size %d\n", proc->pid, size, buffer, buffer_size); 721 + 722 + has_page_addr = 723 + (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK); 724 + if (n == NULL) { 725 + if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 726 + buffer_size = size; /* no room for other buffers */ 727 + else 728 + buffer_size = size + sizeof(struct binder_buffer); 729 + } 730 + end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size); 731 + if (end_page_addr > has_page_addr) 732 + end_page_addr = has_page_addr; 733 + if (binder_update_page_range(proc, 1, 734 + (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL)) 735 + return NULL; 736 + 737 + rb_erase(best_fit, &proc->free_buffers); 738 + buffer->free = 0; 739 + binder_insert_allocated_buffer(proc, buffer); 740 + if (buffer_size != size) { 741 + struct binder_buffer *new_buffer = (void *)buffer->data + size; 742 + list_add(&new_buffer->entry, &buffer->entry); 743 + new_buffer->free = 1; 744 + binder_insert_free_buffer(proc, new_buffer); 745 + } 746 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 747 + printk(KERN_INFO "binder: %d: binder_alloc_buf size %d got " 748 + "%p\n", proc->pid, size, buffer); 749 + buffer->data_size = data_size; 750 + buffer->offsets_size = offsets_size; 751 + buffer->async_transaction = is_async; 752 + if (is_async) { 753 + proc->free_async_space -= size + sizeof(struct binder_buffer); 754 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC) 755 + printk(KERN_INFO "binder: %d: binder_alloc_buf size %d " 756 + "async free %d\n", proc->pid, size, 757 + proc->free_async_space); 758 + } 759 + 760 + return buffer; 761 + } 762 + 763 + static void *buffer_start_page(struct binder_buffer *buffer) 764 + { 765 + return (void *)((size_t)buffer & PAGE_MASK); 766 + } 767 + 768 + static void *buffer_end_page(struct binder_buffer *buffer) 769 + { 770 + return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK); 771 + } 772 + 773 + static void binder_delete_free_buffer( 774 + struct binder_proc *proc, struct binder_buffer *buffer) 775 + { 776 + struct binder_buffer *prev, *next = NULL; 777 + int free_page_end = 1; 778 + int free_page_start = 1; 779 + 780 + BUG_ON(proc->buffers.next == &buffer->entry); 781 + prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 782 + BUG_ON(!prev->free); 783 + if (buffer_end_page(prev) == buffer_start_page(buffer)) { 784 + free_page_start = 0; 785 + if (buffer_end_page(prev) == buffer_end_page(buffer)) 786 + free_page_end = 0; 787 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 788 + printk(KERN_INFO "binder: %d: merge free, buffer %p " 789 + "share page with %p\n", proc->pid, buffer, prev); 790 + } 791 + 792 + if (!list_is_last(&buffer->entry, &proc->buffers)) { 793 + next = list_entry(buffer->entry.next, 794 + struct binder_buffer, entry); 795 + if (buffer_start_page(next) == buffer_end_page(buffer)) { 796 + free_page_end = 0; 797 + if (buffer_start_page(next) == 798 + buffer_start_page(buffer)) 799 + free_page_start = 0; 800 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 801 + printk(KERN_INFO "binder: %d: merge free, " 802 + "buffer %p share page with %p\n", 803 + proc->pid, buffer, prev); 804 + } 805 + } 806 + list_del(&buffer->entry); 807 + if (free_page_start || free_page_end) { 808 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 809 + printk(KERN_INFO "binder: %d: merge free, buffer %p do " 810 + "not share page%s%s with with %p or %p\n", 811 + proc->pid, buffer, free_page_start ? "" : " end", 812 + free_page_end ? "" : " start", prev, next); 813 + binder_update_page_range(proc, 0, free_page_start ? 814 + buffer_start_page(buffer) : buffer_end_page(buffer), 815 + (free_page_end ? buffer_end_page(buffer) : 816 + buffer_start_page(buffer)) + PAGE_SIZE, NULL); 817 + } 818 + } 819 + 820 + static void binder_free_buf( 821 + struct binder_proc *proc, struct binder_buffer *buffer) 822 + { 823 + size_t size, buffer_size; 824 + 825 + buffer_size = binder_buffer_size(proc, buffer); 826 + 827 + size = ALIGN(buffer->data_size, sizeof(void *)) + 828 + ALIGN(buffer->offsets_size, sizeof(void *)); 829 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 830 + printk(KERN_INFO "binder: %d: binder_free_buf %p size %d buffer" 831 + "_size %d\n", proc->pid, buffer, size, buffer_size); 832 + 833 + BUG_ON(buffer->free); 834 + BUG_ON(size > buffer_size); 835 + BUG_ON(buffer->transaction != NULL); 836 + BUG_ON((void *)buffer < proc->buffer); 837 + BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 838 + 839 + if (buffer->async_transaction) { 840 + proc->free_async_space += size + sizeof(struct binder_buffer); 841 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC) 842 + printk(KERN_INFO "binder: %d: binder_free_buf size %d " 843 + "async free %d\n", proc->pid, size, 844 + proc->free_async_space); 845 + } 846 + 847 + binder_update_page_range(proc, 0, 848 + (void *)PAGE_ALIGN((size_t)buffer->data), 849 + (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK), 850 + NULL); 851 + rb_erase(&buffer->rb_node, &proc->allocated_buffers); 852 + buffer->free = 1; 853 + if (!list_is_last(&buffer->entry, &proc->buffers)) { 854 + struct binder_buffer *next = list_entry(buffer->entry.next, 855 + struct binder_buffer, entry); 856 + if (next->free) { 857 + rb_erase(&next->rb_node, &proc->free_buffers); 858 + binder_delete_free_buffer(proc, next); 859 + } 860 + } 861 + if (proc->buffers.next != &buffer->entry) { 862 + struct binder_buffer *prev = list_entry(buffer->entry.prev, 863 + struct binder_buffer, entry); 864 + if (prev->free) { 865 + binder_delete_free_buffer(proc, buffer); 866 + rb_erase(&prev->rb_node, &proc->free_buffers); 867 + buffer = prev; 868 + } 869 + } 870 + binder_insert_free_buffer(proc, buffer); 871 + } 872 + 873 + static struct binder_node * 874 + binder_get_node(struct binder_proc *proc, void __user *ptr) 875 + { 876 + struct rb_node *n = proc->nodes.rb_node; 877 + struct binder_node *node; 878 + 879 + while (n) { 880 + node = rb_entry(n, struct binder_node, rb_node); 881 + 882 + if (ptr < node->ptr) 883 + n = n->rb_left; 884 + else if (ptr > node->ptr) 885 + n = n->rb_right; 886 + else 887 + return node; 888 + } 889 + return NULL; 890 + } 891 + 892 + static struct binder_node * 893 + binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie) 894 + { 895 + struct rb_node **p = &proc->nodes.rb_node; 896 + struct rb_node *parent = NULL; 897 + struct binder_node *node; 898 + 899 + while (*p) { 900 + parent = *p; 901 + node = rb_entry(parent, struct binder_node, rb_node); 902 + 903 + if (ptr < node->ptr) 904 + p = &(*p)->rb_left; 905 + else if (ptr > node->ptr) 906 + p = &(*p)->rb_right; 907 + else 908 + return NULL; 909 + } 910 + 911 + node = kzalloc(sizeof(*node), GFP_KERNEL); 912 + if (node == NULL) 913 + return NULL; 914 + binder_stats.obj_created[BINDER_STAT_NODE]++; 915 + rb_link_node(&node->rb_node, parent, p); 916 + rb_insert_color(&node->rb_node, &proc->nodes); 917 + node->debug_id = ++binder_last_id; 918 + node->proc = proc; 919 + node->ptr = ptr; 920 + node->cookie = cookie; 921 + node->work.type = BINDER_WORK_NODE; 922 + INIT_LIST_HEAD(&node->work.entry); 923 + INIT_LIST_HEAD(&node->async_todo); 924 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 925 + printk(KERN_INFO "binder: %d:%d node %d u%p c%p created\n", 926 + proc->pid, current->pid, node->debug_id, 927 + node->ptr, node->cookie); 928 + return node; 929 + } 930 + 931 + static int 932 + binder_inc_node(struct binder_node *node, int strong, int internal, 933 + struct list_head *target_list) 934 + { 935 + if (strong) { 936 + if (internal) { 937 + if (target_list == NULL && 938 + node->internal_strong_refs == 0 && 939 + !(node == binder_context_mgr_node && 940 + node->has_strong_ref)) { 941 + printk(KERN_ERR "binder: invalid inc strong " 942 + "node for %d\n", node->debug_id); 943 + return -EINVAL; 944 + } 945 + node->internal_strong_refs++; 946 + } else 947 + node->local_strong_refs++; 948 + if (!node->has_strong_ref && target_list) { 949 + list_del_init(&node->work.entry); 950 + list_add_tail(&node->work.entry, target_list); 951 + } 952 + } else { 953 + if (!internal) 954 + node->local_weak_refs++; 955 + if (!node->has_weak_ref && list_empty(&node->work.entry)) { 956 + if (target_list == NULL) { 957 + printk(KERN_ERR "binder: invalid inc weak node " 958 + "for %d\n", node->debug_id); 959 + return -EINVAL; 960 + } 961 + list_add_tail(&node->work.entry, target_list); 962 + } 963 + } 964 + return 0; 965 + } 966 + 967 + static int 968 + binder_dec_node(struct binder_node *node, int strong, int internal) 969 + { 970 + if (strong) { 971 + if (internal) 972 + node->internal_strong_refs--; 973 + else 974 + node->local_strong_refs--; 975 + if (node->local_strong_refs || node->internal_strong_refs) 976 + return 0; 977 + } else { 978 + if (!internal) 979 + node->local_weak_refs--; 980 + if (node->local_weak_refs || !hlist_empty(&node->refs)) 981 + return 0; 982 + } 983 + if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 984 + if (list_empty(&node->work.entry)) { 985 + list_add_tail(&node->work.entry, &node->proc->todo); 986 + wake_up_interruptible(&node->proc->wait); 987 + } 988 + } else { 989 + if (hlist_empty(&node->refs) && !node->local_strong_refs && 990 + !node->local_weak_refs) { 991 + list_del_init(&node->work.entry); 992 + if (node->proc) { 993 + rb_erase(&node->rb_node, &node->proc->nodes); 994 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 995 + printk(KERN_INFO "binder: refless node %d deleted\n", node->debug_id); 996 + } else { 997 + hlist_del(&node->dead_node); 998 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 999 + printk(KERN_INFO "binder: dead node %d deleted\n", node->debug_id); 1000 + } 1001 + kfree(node); 1002 + binder_stats.obj_deleted[BINDER_STAT_NODE]++; 1003 + } 1004 + } 1005 + 1006 + return 0; 1007 + } 1008 + 1009 + 1010 + static struct binder_ref * 1011 + binder_get_ref(struct binder_proc *proc, uint32_t desc) 1012 + { 1013 + struct rb_node *n = proc->refs_by_desc.rb_node; 1014 + struct binder_ref *ref; 1015 + 1016 + while (n) { 1017 + ref = rb_entry(n, struct binder_ref, rb_node_desc); 1018 + 1019 + if (desc < ref->desc) 1020 + n = n->rb_left; 1021 + else if (desc > ref->desc) 1022 + n = n->rb_right; 1023 + else 1024 + return ref; 1025 + } 1026 + return NULL; 1027 + } 1028 + 1029 + static struct binder_ref * 1030 + binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node) 1031 + { 1032 + struct rb_node *n; 1033 + struct rb_node **p = &proc->refs_by_node.rb_node; 1034 + struct rb_node *parent = NULL; 1035 + struct binder_ref *ref, *new_ref; 1036 + 1037 + while (*p) { 1038 + parent = *p; 1039 + ref = rb_entry(parent, struct binder_ref, rb_node_node); 1040 + 1041 + if (node < ref->node) 1042 + p = &(*p)->rb_left; 1043 + else if (node > ref->node) 1044 + p = &(*p)->rb_right; 1045 + else 1046 + return ref; 1047 + } 1048 + new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1049 + if (new_ref == NULL) 1050 + return NULL; 1051 + binder_stats.obj_created[BINDER_STAT_REF]++; 1052 + new_ref->debug_id = ++binder_last_id; 1053 + new_ref->proc = proc; 1054 + new_ref->node = node; 1055 + rb_link_node(&new_ref->rb_node_node, parent, p); 1056 + rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1057 + 1058 + new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1059 + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1060 + ref = rb_entry(n, struct binder_ref, rb_node_desc); 1061 + if (ref->desc > new_ref->desc) 1062 + break; 1063 + new_ref->desc = ref->desc + 1; 1064 + } 1065 + 1066 + p = &proc->refs_by_desc.rb_node; 1067 + while (*p) { 1068 + parent = *p; 1069 + ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1070 + 1071 + if (new_ref->desc < ref->desc) 1072 + p = &(*p)->rb_left; 1073 + else if (new_ref->desc > ref->desc) 1074 + p = &(*p)->rb_right; 1075 + else 1076 + BUG(); 1077 + } 1078 + rb_link_node(&new_ref->rb_node_desc, parent, p); 1079 + rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1080 + if (node) { 1081 + hlist_add_head(&new_ref->node_entry, &node->refs); 1082 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 1083 + printk(KERN_INFO "binder: %d new ref %d desc %d for " 1084 + "node %d\n", proc->pid, new_ref->debug_id, 1085 + new_ref->desc, node->debug_id); 1086 + } else { 1087 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 1088 + printk(KERN_INFO "binder: %d new ref %d desc %d for " 1089 + "dead node\n", proc->pid, new_ref->debug_id, 1090 + new_ref->desc); 1091 + } 1092 + return new_ref; 1093 + } 1094 + 1095 + static void 1096 + binder_delete_ref(struct binder_ref *ref) 1097 + { 1098 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 1099 + printk(KERN_INFO "binder: %d delete ref %d desc %d for " 1100 + "node %d\n", ref->proc->pid, ref->debug_id, 1101 + ref->desc, ref->node->debug_id); 1102 + rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1103 + rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1104 + if (ref->strong) 1105 + binder_dec_node(ref->node, 1, 1); 1106 + hlist_del(&ref->node_entry); 1107 + binder_dec_node(ref->node, 0, 1); 1108 + if (ref->death) { 1109 + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) 1110 + printk(KERN_INFO "binder: %d delete ref %d desc %d " 1111 + "has death notification\n", ref->proc->pid, 1112 + ref->debug_id, ref->desc); 1113 + list_del(&ref->death->work.entry); 1114 + kfree(ref->death); 1115 + binder_stats.obj_deleted[BINDER_STAT_DEATH]++; 1116 + } 1117 + kfree(ref); 1118 + binder_stats.obj_deleted[BINDER_STAT_REF]++; 1119 + } 1120 + 1121 + static int 1122 + binder_inc_ref( 1123 + struct binder_ref *ref, int strong, struct list_head *target_list) 1124 + { 1125 + int ret; 1126 + if (strong) { 1127 + if (ref->strong == 0) { 1128 + ret = binder_inc_node(ref->node, 1, 1, target_list); 1129 + if (ret) 1130 + return ret; 1131 + } 1132 + ref->strong++; 1133 + } else { 1134 + if (ref->weak == 0) { 1135 + ret = binder_inc_node(ref->node, 0, 1, target_list); 1136 + if (ret) 1137 + return ret; 1138 + } 1139 + ref->weak++; 1140 + } 1141 + return 0; 1142 + } 1143 + 1144 + 1145 + static int 1146 + binder_dec_ref(struct binder_ref *ref, int strong) 1147 + { 1148 + if (strong) { 1149 + if (ref->strong == 0) { 1150 + binder_user_error("binder: %d invalid dec strong, " 1151 + "ref %d desc %d s %d w %d\n", 1152 + ref->proc->pid, ref->debug_id, 1153 + ref->desc, ref->strong, ref->weak); 1154 + return -EINVAL; 1155 + } 1156 + ref->strong--; 1157 + if (ref->strong == 0) { 1158 + int ret; 1159 + ret = binder_dec_node(ref->node, strong, 1); 1160 + if (ret) 1161 + return ret; 1162 + } 1163 + } else { 1164 + if (ref->weak == 0) { 1165 + binder_user_error("binder: %d invalid dec weak, " 1166 + "ref %d desc %d s %d w %d\n", 1167 + ref->proc->pid, ref->debug_id, 1168 + ref->desc, ref->strong, ref->weak); 1169 + return -EINVAL; 1170 + } 1171 + ref->weak--; 1172 + } 1173 + if (ref->strong == 0 && ref->weak == 0) 1174 + binder_delete_ref(ref); 1175 + return 0; 1176 + } 1177 + 1178 + static void 1179 + binder_pop_transaction( 1180 + struct binder_thread *target_thread, struct binder_transaction *t) 1181 + { 1182 + if (target_thread) { 1183 + BUG_ON(target_thread->transaction_stack != t); 1184 + BUG_ON(target_thread->transaction_stack->from != target_thread); 1185 + target_thread->transaction_stack = 1186 + target_thread->transaction_stack->from_parent; 1187 + t->from = NULL; 1188 + } 1189 + t->need_reply = 0; 1190 + if (t->buffer) 1191 + t->buffer->transaction = NULL; 1192 + kfree(t); 1193 + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++; 1194 + } 1195 + 1196 + static void 1197 + binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code) 1198 + { 1199 + struct binder_thread *target_thread; 1200 + BUG_ON(t->flags & TF_ONE_WAY); 1201 + while (1) { 1202 + target_thread = t->from; 1203 + if (target_thread) { 1204 + if (target_thread->return_error != BR_OK && 1205 + target_thread->return_error2 == BR_OK) { 1206 + target_thread->return_error2 = 1207 + target_thread->return_error; 1208 + target_thread->return_error = BR_OK; 1209 + } 1210 + if (target_thread->return_error == BR_OK) { 1211 + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) 1212 + printk(KERN_INFO "binder: send failed reply for transaction %d to %d:%d\n", 1213 + t->debug_id, target_thread->proc->pid, target_thread->pid); 1214 + 1215 + binder_pop_transaction(target_thread, t); 1216 + target_thread->return_error = error_code; 1217 + wake_up_interruptible(&target_thread->wait); 1218 + } else { 1219 + printk(KERN_ERR "binder: reply failed, target " 1220 + "thread, %d:%d, has error code %d " 1221 + "already\n", target_thread->proc->pid, 1222 + target_thread->pid, 1223 + target_thread->return_error); 1224 + } 1225 + return; 1226 + } else { 1227 + struct binder_transaction *next = t->from_parent; 1228 + 1229 + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) 1230 + printk(KERN_INFO "binder: send failed reply " 1231 + "for transaction %d, target dead\n", 1232 + t->debug_id); 1233 + 1234 + binder_pop_transaction(target_thread, t); 1235 + if (next == NULL) { 1236 + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) 1237 + printk(KERN_INFO "binder: reply failed," 1238 + " no target thread at root\n"); 1239 + return; 1240 + } 1241 + t = next; 1242 + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) 1243 + printk(KERN_INFO "binder: reply failed, no targ" 1244 + "et thread -- retry %d\n", t->debug_id); 1245 + } 1246 + } 1247 + } 1248 + 1249 + static void 1250 + binder_transaction_buffer_release(struct binder_proc *proc, 1251 + struct binder_buffer *buffer, size_t *failed_at); 1252 + 1253 + static void 1254 + binder_transaction(struct binder_proc *proc, struct binder_thread *thread, 1255 + struct binder_transaction_data *tr, int reply) 1256 + { 1257 + struct binder_transaction *t; 1258 + struct binder_work *tcomplete; 1259 + size_t *offp, *off_end; 1260 + struct binder_proc *target_proc; 1261 + struct binder_thread *target_thread = NULL; 1262 + struct binder_node *target_node = NULL; 1263 + struct list_head *target_list; 1264 + wait_queue_head_t *target_wait; 1265 + struct binder_transaction *in_reply_to = NULL; 1266 + struct binder_transaction_log_entry *e; 1267 + uint32_t return_error; 1268 + 1269 + e = binder_transaction_log_add(&binder_transaction_log); 1270 + e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1271 + e->from_proc = proc->pid; 1272 + e->from_thread = thread->pid; 1273 + e->target_handle = tr->target.handle; 1274 + e->data_size = tr->data_size; 1275 + e->offsets_size = tr->offsets_size; 1276 + 1277 + if (reply) { 1278 + in_reply_to = thread->transaction_stack; 1279 + if (in_reply_to == NULL) { 1280 + binder_user_error("binder: %d:%d got reply transaction " 1281 + "with no transaction stack\n", 1282 + proc->pid, thread->pid); 1283 + return_error = BR_FAILED_REPLY; 1284 + goto err_empty_call_stack; 1285 + } 1286 + binder_set_nice(in_reply_to->saved_priority); 1287 + if (in_reply_to->to_thread != thread) { 1288 + binder_user_error("binder: %d:%d got reply transaction " 1289 + "with bad transaction stack," 1290 + " transaction %d has target %d:%d\n", 1291 + proc->pid, thread->pid, in_reply_to->debug_id, 1292 + in_reply_to->to_proc ? 1293 + in_reply_to->to_proc->pid : 0, 1294 + in_reply_to->to_thread ? 1295 + in_reply_to->to_thread->pid : 0); 1296 + return_error = BR_FAILED_REPLY; 1297 + in_reply_to = NULL; 1298 + goto err_bad_call_stack; 1299 + } 1300 + thread->transaction_stack = in_reply_to->to_parent; 1301 + target_thread = in_reply_to->from; 1302 + if (target_thread == NULL) { 1303 + return_error = BR_DEAD_REPLY; 1304 + goto err_dead_binder; 1305 + } 1306 + if (target_thread->transaction_stack != in_reply_to) { 1307 + binder_user_error("binder: %d:%d got reply transaction " 1308 + "with bad target transaction stack %d, " 1309 + "expected %d\n", 1310 + proc->pid, thread->pid, 1311 + target_thread->transaction_stack ? 1312 + target_thread->transaction_stack->debug_id : 0, 1313 + in_reply_to->debug_id); 1314 + return_error = BR_FAILED_REPLY; 1315 + in_reply_to = NULL; 1316 + target_thread = NULL; 1317 + goto err_dead_binder; 1318 + } 1319 + target_proc = target_thread->proc; 1320 + } else { 1321 + if (tr->target.handle) { 1322 + struct binder_ref *ref; 1323 + ref = binder_get_ref(proc, tr->target.handle); 1324 + if (ref == NULL) { 1325 + binder_user_error("binder: %d:%d got " 1326 + "transaction to invalid handle\n", 1327 + proc->pid, thread->pid); 1328 + return_error = BR_FAILED_REPLY; 1329 + goto err_invalid_target_handle; 1330 + } 1331 + target_node = ref->node; 1332 + } else { 1333 + target_node = binder_context_mgr_node; 1334 + if (target_node == NULL) { 1335 + return_error = BR_DEAD_REPLY; 1336 + goto err_no_context_mgr_node; 1337 + } 1338 + } 1339 + e->to_node = target_node->debug_id; 1340 + target_proc = target_node->proc; 1341 + if (target_proc == NULL) { 1342 + return_error = BR_DEAD_REPLY; 1343 + goto err_dead_binder; 1344 + } 1345 + if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1346 + struct binder_transaction *tmp; 1347 + tmp = thread->transaction_stack; 1348 + while (tmp) { 1349 + if (tmp->from && tmp->from->proc == target_proc) 1350 + target_thread = tmp->from; 1351 + tmp = tmp->from_parent; 1352 + } 1353 + } 1354 + } 1355 + if (target_thread) { 1356 + e->to_thread = target_thread->pid; 1357 + target_list = &target_thread->todo; 1358 + target_wait = &target_thread->wait; 1359 + } else { 1360 + target_list = &target_proc->todo; 1361 + target_wait = &target_proc->wait; 1362 + } 1363 + e->to_proc = target_proc->pid; 1364 + 1365 + /* TODO: reuse incoming transaction for reply */ 1366 + t = kzalloc(sizeof(*t), GFP_KERNEL); 1367 + if (t == NULL) { 1368 + return_error = BR_FAILED_REPLY; 1369 + goto err_alloc_t_failed; 1370 + } 1371 + binder_stats.obj_created[BINDER_STAT_TRANSACTION]++; 1372 + 1373 + tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1374 + if (tcomplete == NULL) { 1375 + return_error = BR_FAILED_REPLY; 1376 + goto err_alloc_tcomplete_failed; 1377 + } 1378 + binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++; 1379 + 1380 + t->debug_id = ++binder_last_id; 1381 + e->debug_id = t->debug_id; 1382 + 1383 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) { 1384 + if (reply) 1385 + printk(KERN_INFO "binder: %d:%d BC_REPLY %d -> %d:%d, " 1386 + "data %p-%p size %d-%d\n", 1387 + proc->pid, thread->pid, t->debug_id, 1388 + target_proc->pid, target_thread->pid, 1389 + tr->data.ptr.buffer, tr->data.ptr.offsets, 1390 + tr->data_size, tr->offsets_size); 1391 + else 1392 + printk(KERN_INFO "binder: %d:%d BC_TRANSACTION %d -> " 1393 + "%d - node %d, data %p-%p size %d-%d\n", 1394 + proc->pid, thread->pid, t->debug_id, 1395 + target_proc->pid, target_node->debug_id, 1396 + tr->data.ptr.buffer, tr->data.ptr.offsets, 1397 + tr->data_size, tr->offsets_size); 1398 + } 1399 + 1400 + if (!reply && !(tr->flags & TF_ONE_WAY)) 1401 + t->from = thread; 1402 + else 1403 + t->from = NULL; 1404 + t->sender_euid = proc->tsk->euid; 1405 + t->to_proc = target_proc; 1406 + t->to_thread = target_thread; 1407 + t->code = tr->code; 1408 + t->flags = tr->flags; 1409 + t->priority = task_nice(current); 1410 + t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1411 + tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1412 + if (t->buffer == NULL) { 1413 + return_error = BR_FAILED_REPLY; 1414 + goto err_binder_alloc_buf_failed; 1415 + } 1416 + t->buffer->allow_user_free = 0; 1417 + t->buffer->debug_id = t->debug_id; 1418 + t->buffer->transaction = t; 1419 + t->buffer->target_node = target_node; 1420 + if (target_node) 1421 + binder_inc_node(target_node, 1, 0, NULL); 1422 + 1423 + offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); 1424 + 1425 + if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { 1426 + binder_user_error("binder: %d:%d got transaction with invalid " 1427 + "data ptr\n", proc->pid, thread->pid); 1428 + return_error = BR_FAILED_REPLY; 1429 + goto err_copy_data_failed; 1430 + } 1431 + if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { 1432 + binder_user_error("binder: %d:%d got transaction with invalid " 1433 + "offsets ptr\n", proc->pid, thread->pid); 1434 + return_error = BR_FAILED_REPLY; 1435 + goto err_copy_data_failed; 1436 + } 1437 + off_end = (void *)offp + tr->offsets_size; 1438 + for (; offp < off_end; offp++) { 1439 + struct flat_binder_object *fp; 1440 + if (*offp > t->buffer->data_size - sizeof(*fp)) { 1441 + binder_user_error("binder: %d:%d got transaction with " 1442 + "invalid offset, %d\n", 1443 + proc->pid, thread->pid, *offp); 1444 + return_error = BR_FAILED_REPLY; 1445 + goto err_bad_offset; 1446 + } 1447 + fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1448 + switch (fp->type) { 1449 + case BINDER_TYPE_BINDER: 1450 + case BINDER_TYPE_WEAK_BINDER: { 1451 + struct binder_ref *ref; 1452 + struct binder_node *node = binder_get_node(proc, fp->binder); 1453 + if (node == NULL) { 1454 + node = binder_new_node(proc, fp->binder, fp->cookie); 1455 + if (node == NULL) { 1456 + return_error = BR_FAILED_REPLY; 1457 + goto err_binder_new_node_failed; 1458 + } 1459 + node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1460 + node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1461 + } 1462 + if (fp->cookie != node->cookie) { 1463 + binder_user_error("binder: %d:%d sending u%p " 1464 + "node %d, cookie mismatch %p != %p\n", 1465 + proc->pid, thread->pid, 1466 + fp->binder, node->debug_id, 1467 + fp->cookie, node->cookie); 1468 + goto err_binder_get_ref_for_node_failed; 1469 + } 1470 + ref = binder_get_ref_for_node(target_proc, node); 1471 + if (ref == NULL) { 1472 + return_error = BR_FAILED_REPLY; 1473 + goto err_binder_get_ref_for_node_failed; 1474 + } 1475 + if (fp->type == BINDER_TYPE_BINDER) 1476 + fp->type = BINDER_TYPE_HANDLE; 1477 + else 1478 + fp->type = BINDER_TYPE_WEAK_HANDLE; 1479 + fp->handle = ref->desc; 1480 + binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); 1481 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1482 + printk(KERN_INFO " node %d u%p -> ref %d desc %d\n", 1483 + node->debug_id, node->ptr, ref->debug_id, ref->desc); 1484 + } break; 1485 + case BINDER_TYPE_HANDLE: 1486 + case BINDER_TYPE_WEAK_HANDLE: { 1487 + struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1488 + if (ref == NULL) { 1489 + binder_user_error("binder: %d:%d got " 1490 + "transaction with invalid " 1491 + "handle, %ld\n", proc->pid, 1492 + thread->pid, fp->handle); 1493 + return_error = BR_FAILED_REPLY; 1494 + goto err_binder_get_ref_failed; 1495 + } 1496 + if (ref->node->proc == target_proc) { 1497 + if (fp->type == BINDER_TYPE_HANDLE) 1498 + fp->type = BINDER_TYPE_BINDER; 1499 + else 1500 + fp->type = BINDER_TYPE_WEAK_BINDER; 1501 + fp->binder = ref->node->ptr; 1502 + fp->cookie = ref->node->cookie; 1503 + binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1504 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1505 + printk(KERN_INFO " ref %d desc %d -> node %d u%p\n", 1506 + ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr); 1507 + } else { 1508 + struct binder_ref *new_ref; 1509 + new_ref = binder_get_ref_for_node(target_proc, ref->node); 1510 + if (new_ref == NULL) { 1511 + return_error = BR_FAILED_REPLY; 1512 + goto err_binder_get_ref_for_node_failed; 1513 + } 1514 + fp->handle = new_ref->desc; 1515 + binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1516 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1517 + printk(KERN_INFO " ref %d desc %d -> ref %d desc %d (node %d)\n", 1518 + ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id); 1519 + } 1520 + } break; 1521 + 1522 + case BINDER_TYPE_FD: { 1523 + int target_fd; 1524 + struct file *file; 1525 + 1526 + if (reply) { 1527 + if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1528 + binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", 1529 + proc->pid, thread->pid, fp->handle); 1530 + return_error = BR_FAILED_REPLY; 1531 + goto err_fd_not_allowed; 1532 + } 1533 + } else if (!target_node->accept_fds) { 1534 + binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", 1535 + proc->pid, thread->pid, fp->handle); 1536 + return_error = BR_FAILED_REPLY; 1537 + goto err_fd_not_allowed; 1538 + } 1539 + 1540 + file = fget(fp->handle); 1541 + if (file == NULL) { 1542 + binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", 1543 + proc->pid, thread->pid, fp->handle); 1544 + return_error = BR_FAILED_REPLY; 1545 + goto err_fget_failed; 1546 + } 1547 + target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC); 1548 + if (target_fd < 0) { 1549 + fput(file); 1550 + return_error = BR_FAILED_REPLY; 1551 + goto err_get_unused_fd_failed; 1552 + } 1553 + task_fd_install(target_proc->tsk, target_fd, file); 1554 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1555 + printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); 1556 + /* TODO: fput? */ 1557 + fp->handle = target_fd; 1558 + } break; 1559 + 1560 + default: 1561 + binder_user_error("binder: %d:%d got transactio" 1562 + "n with invalid object type, %lx\n", 1563 + proc->pid, thread->pid, fp->type); 1564 + return_error = BR_FAILED_REPLY; 1565 + goto err_bad_object_type; 1566 + } 1567 + } 1568 + if (reply) { 1569 + BUG_ON(t->buffer->async_transaction != 0); 1570 + binder_pop_transaction(target_thread, in_reply_to); 1571 + } else if (!(t->flags & TF_ONE_WAY)) { 1572 + BUG_ON(t->buffer->async_transaction != 0); 1573 + t->need_reply = 1; 1574 + t->from_parent = thread->transaction_stack; 1575 + thread->transaction_stack = t; 1576 + } else { 1577 + BUG_ON(target_node == NULL); 1578 + BUG_ON(t->buffer->async_transaction != 1); 1579 + if (target_node->has_async_transaction) { 1580 + target_list = &target_node->async_todo; 1581 + target_wait = NULL; 1582 + } else 1583 + target_node->has_async_transaction = 1; 1584 + } 1585 + t->work.type = BINDER_WORK_TRANSACTION; 1586 + list_add_tail(&t->work.entry, target_list); 1587 + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1588 + list_add_tail(&tcomplete->entry, &thread->todo); 1589 + if (target_wait) 1590 + wake_up_interruptible(target_wait); 1591 + return; 1592 + 1593 + err_get_unused_fd_failed: 1594 + err_fget_failed: 1595 + err_fd_not_allowed: 1596 + err_binder_get_ref_for_node_failed: 1597 + err_binder_get_ref_failed: 1598 + err_binder_new_node_failed: 1599 + err_bad_object_type: 1600 + err_bad_offset: 1601 + err_copy_data_failed: 1602 + binder_transaction_buffer_release(target_proc, t->buffer, offp); 1603 + t->buffer->transaction = NULL; 1604 + binder_free_buf(target_proc, t->buffer); 1605 + err_binder_alloc_buf_failed: 1606 + kfree(tcomplete); 1607 + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++; 1608 + err_alloc_tcomplete_failed: 1609 + kfree(t); 1610 + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++; 1611 + err_alloc_t_failed: 1612 + err_bad_call_stack: 1613 + err_empty_call_stack: 1614 + err_dead_binder: 1615 + err_invalid_target_handle: 1616 + err_no_context_mgr_node: 1617 + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) 1618 + printk(KERN_INFO "binder: %d:%d transaction failed %d, size %d-%d\n", 1619 + proc->pid, thread->pid, return_error, 1620 + tr->data_size, tr->offsets_size); 1621 + 1622 + { 1623 + struct binder_transaction_log_entry *fe; 1624 + fe = binder_transaction_log_add(&binder_transaction_log_failed); 1625 + *fe = *e; 1626 + } 1627 + 1628 + BUG_ON(thread->return_error != BR_OK); 1629 + if (in_reply_to) { 1630 + thread->return_error = BR_TRANSACTION_COMPLETE; 1631 + binder_send_failed_reply(in_reply_to, return_error); 1632 + } else 1633 + thread->return_error = return_error; 1634 + } 1635 + 1636 + static void 1637 + binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at) 1638 + { 1639 + size_t *offp, *off_end; 1640 + int debug_id = buffer->debug_id; 1641 + 1642 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1643 + printk(KERN_INFO "binder: %d buffer release %d, size %d-%d, failed at %p\n", 1644 + proc->pid, buffer->debug_id, 1645 + buffer->data_size, buffer->offsets_size, failed_at); 1646 + 1647 + if (buffer->target_node) 1648 + binder_dec_node(buffer->target_node, 1, 0); 1649 + 1650 + offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); 1651 + if (failed_at) 1652 + off_end = failed_at; 1653 + else 1654 + off_end = (void *)offp + buffer->offsets_size; 1655 + for (; offp < off_end; offp++) { 1656 + struct flat_binder_object *fp; 1657 + if (*offp > buffer->data_size - sizeof(*fp)) { 1658 + printk(KERN_ERR "binder: transaction release %d bad offset %d, size %d\n", debug_id, *offp, buffer->data_size); 1659 + continue; 1660 + } 1661 + fp = (struct flat_binder_object *)(buffer->data + *offp); 1662 + switch (fp->type) { 1663 + case BINDER_TYPE_BINDER: 1664 + case BINDER_TYPE_WEAK_BINDER: { 1665 + struct binder_node *node = binder_get_node(proc, fp->binder); 1666 + if (node == NULL) { 1667 + printk(KERN_ERR "binder: transaction release %d bad node %p\n", debug_id, fp->binder); 1668 + break; 1669 + } 1670 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1671 + printk(KERN_INFO " node %d u%p\n", 1672 + node->debug_id, node->ptr); 1673 + binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1674 + } break; 1675 + case BINDER_TYPE_HANDLE: 1676 + case BINDER_TYPE_WEAK_HANDLE: { 1677 + struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1678 + if (ref == NULL) { 1679 + printk(KERN_ERR "binder: transaction release %d bad handle %ld\n", debug_id, fp->handle); 1680 + break; 1681 + } 1682 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1683 + printk(KERN_INFO " ref %d desc %d (node %d)\n", 1684 + ref->debug_id, ref->desc, ref->node->debug_id); 1685 + binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1686 + } break; 1687 + 1688 + case BINDER_TYPE_FD: 1689 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 1690 + printk(KERN_INFO " fd %ld\n", fp->handle); 1691 + if (failed_at) 1692 + task_close_fd(proc->tsk, fp->handle); 1693 + break; 1694 + 1695 + default: 1696 + printk(KERN_ERR "binder: transaction release %d bad object type %lx\n", debug_id, fp->type); 1697 + break; 1698 + } 1699 + } 1700 + } 1701 + 1702 + int 1703 + binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, 1704 + void __user *buffer, int size, signed long *consumed) 1705 + { 1706 + uint32_t cmd; 1707 + void __user *ptr = buffer + *consumed; 1708 + void __user *end = buffer + size; 1709 + 1710 + while (ptr < end && thread->return_error == BR_OK) { 1711 + if (get_user(cmd, (uint32_t __user *)ptr)) 1712 + return -EFAULT; 1713 + ptr += sizeof(uint32_t); 1714 + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1715 + binder_stats.bc[_IOC_NR(cmd)]++; 1716 + proc->stats.bc[_IOC_NR(cmd)]++; 1717 + thread->stats.bc[_IOC_NR(cmd)]++; 1718 + } 1719 + switch (cmd) { 1720 + case BC_INCREFS: 1721 + case BC_ACQUIRE: 1722 + case BC_RELEASE: 1723 + case BC_DECREFS: { 1724 + uint32_t target; 1725 + struct binder_ref *ref; 1726 + const char *debug_string; 1727 + 1728 + if (get_user(target, (uint32_t __user *)ptr)) 1729 + return -EFAULT; 1730 + ptr += sizeof(uint32_t); 1731 + if (target == 0 && binder_context_mgr_node && 1732 + (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1733 + ref = binder_get_ref_for_node(proc, 1734 + binder_context_mgr_node); 1735 + if (ref->desc != target) { 1736 + binder_user_error("binder: %d:" 1737 + "%d tried to acquire " 1738 + "reference to desc 0, " 1739 + "got %d instead\n", 1740 + proc->pid, thread->pid, 1741 + ref->desc); 1742 + } 1743 + } else 1744 + ref = binder_get_ref(proc, target); 1745 + if (ref == NULL) { 1746 + binder_user_error("binder: %d:%d refcou" 1747 + "nt change on invalid ref %d\n", 1748 + proc->pid, thread->pid, target); 1749 + break; 1750 + } 1751 + switch (cmd) { 1752 + case BC_INCREFS: 1753 + debug_string = "IncRefs"; 1754 + binder_inc_ref(ref, 0, NULL); 1755 + break; 1756 + case BC_ACQUIRE: 1757 + debug_string = "Acquire"; 1758 + binder_inc_ref(ref, 1, NULL); 1759 + break; 1760 + case BC_RELEASE: 1761 + debug_string = "Release"; 1762 + binder_dec_ref(ref, 1); 1763 + break; 1764 + case BC_DECREFS: 1765 + default: 1766 + debug_string = "DecRefs"; 1767 + binder_dec_ref(ref, 0); 1768 + break; 1769 + } 1770 + if (binder_debug_mask & BINDER_DEBUG_USER_REFS) 1771 + printk(KERN_INFO "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", 1772 + proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1773 + break; 1774 + } 1775 + case BC_INCREFS_DONE: 1776 + case BC_ACQUIRE_DONE: { 1777 + void __user *node_ptr; 1778 + void *cookie; 1779 + struct binder_node *node; 1780 + 1781 + if (get_user(node_ptr, (void * __user *)ptr)) 1782 + return -EFAULT; 1783 + ptr += sizeof(void *); 1784 + if (get_user(cookie, (void * __user *)ptr)) 1785 + return -EFAULT; 1786 + ptr += sizeof(void *); 1787 + node = binder_get_node(proc, node_ptr); 1788 + if (node == NULL) { 1789 + binder_user_error("binder: %d:%d " 1790 + "%s u%p no match\n", 1791 + proc->pid, thread->pid, 1792 + cmd == BC_INCREFS_DONE ? 1793 + "BC_INCREFS_DONE" : 1794 + "BC_ACQUIRE_DONE", 1795 + node_ptr); 1796 + break; 1797 + } 1798 + if (cookie != node->cookie) { 1799 + binder_user_error("binder: %d:%d %s u%p node %d" 1800 + " cookie mismatch %p != %p\n", 1801 + proc->pid, thread->pid, 1802 + cmd == BC_INCREFS_DONE ? 1803 + "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1804 + node_ptr, node->debug_id, 1805 + cookie, node->cookie); 1806 + break; 1807 + } 1808 + if (cmd == BC_ACQUIRE_DONE) { 1809 + if (node->pending_strong_ref == 0) { 1810 + binder_user_error("binder: %d:%d " 1811 + "BC_ACQUIRE_DONE node %d has " 1812 + "no pending acquire request\n", 1813 + proc->pid, thread->pid, 1814 + node->debug_id); 1815 + break; 1816 + } 1817 + node->pending_strong_ref = 0; 1818 + } else { 1819 + if (node->pending_weak_ref == 0) { 1820 + binder_user_error("binder: %d:%d " 1821 + "BC_INCREFS_DONE node %d has " 1822 + "no pending increfs request\n", 1823 + proc->pid, thread->pid, 1824 + node->debug_id); 1825 + break; 1826 + } 1827 + node->pending_weak_ref = 0; 1828 + } 1829 + binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1830 + if (binder_debug_mask & BINDER_DEBUG_USER_REFS) 1831 + printk(KERN_INFO "binder: %d:%d %s node %d ls %d lw %d\n", 1832 + proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs); 1833 + break; 1834 + } 1835 + case BC_ATTEMPT_ACQUIRE: 1836 + printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); 1837 + return -EINVAL; 1838 + case BC_ACQUIRE_RESULT: 1839 + printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); 1840 + return -EINVAL; 1841 + 1842 + case BC_FREE_BUFFER: { 1843 + void __user *data_ptr; 1844 + struct binder_buffer *buffer; 1845 + 1846 + if (get_user(data_ptr, (void * __user *)ptr)) 1847 + return -EFAULT; 1848 + ptr += sizeof(void *); 1849 + 1850 + buffer = binder_buffer_lookup(proc, data_ptr); 1851 + if (buffer == NULL) { 1852 + binder_user_error("binder: %d:%d " 1853 + "BC_FREE_BUFFER u%p no match\n", 1854 + proc->pid, thread->pid, data_ptr); 1855 + break; 1856 + } 1857 + if (!buffer->allow_user_free) { 1858 + binder_user_error("binder: %d:%d " 1859 + "BC_FREE_BUFFER u%p matched " 1860 + "unreturned buffer\n", 1861 + proc->pid, thread->pid, data_ptr); 1862 + break; 1863 + } 1864 + if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER) 1865 + printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", 1866 + proc->pid, thread->pid, data_ptr, buffer->debug_id, 1867 + buffer->transaction ? "active" : "finished"); 1868 + 1869 + if (buffer->transaction) { 1870 + buffer->transaction->buffer = NULL; 1871 + buffer->transaction = NULL; 1872 + } 1873 + if (buffer->async_transaction && buffer->target_node) { 1874 + BUG_ON(!buffer->target_node->has_async_transaction); 1875 + if (list_empty(&buffer->target_node->async_todo)) 1876 + buffer->target_node->has_async_transaction = 0; 1877 + else 1878 + list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1879 + } 1880 + binder_transaction_buffer_release(proc, buffer, NULL); 1881 + binder_free_buf(proc, buffer); 1882 + break; 1883 + } 1884 + 1885 + case BC_TRANSACTION: 1886 + case BC_REPLY: { 1887 + struct binder_transaction_data tr; 1888 + 1889 + if (copy_from_user(&tr, ptr, sizeof(tr))) 1890 + return -EFAULT; 1891 + ptr += sizeof(tr); 1892 + binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1893 + break; 1894 + } 1895 + 1896 + case BC_REGISTER_LOOPER: 1897 + if (binder_debug_mask & BINDER_DEBUG_THREADS) 1898 + printk(KERN_INFO "binder: %d:%d BC_REGISTER_LOOPER\n", 1899 + proc->pid, thread->pid); 1900 + if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1901 + thread->looper |= BINDER_LOOPER_STATE_INVALID; 1902 + binder_user_error("binder: %d:%d ERROR:" 1903 + " BC_REGISTER_LOOPER called " 1904 + "after BC_ENTER_LOOPER\n", 1905 + proc->pid, thread->pid); 1906 + } else if (proc->requested_threads == 0) { 1907 + thread->looper |= BINDER_LOOPER_STATE_INVALID; 1908 + binder_user_error("binder: %d:%d ERROR:" 1909 + " BC_REGISTER_LOOPER called " 1910 + "without request\n", 1911 + proc->pid, thread->pid); 1912 + } else { 1913 + proc->requested_threads--; 1914 + proc->requested_threads_started++; 1915 + } 1916 + thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 1917 + break; 1918 + case BC_ENTER_LOOPER: 1919 + if (binder_debug_mask & BINDER_DEBUG_THREADS) 1920 + printk(KERN_INFO "binder: %d:%d BC_ENTER_LOOPER\n", 1921 + proc->pid, thread->pid); 1922 + if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 1923 + thread->looper |= BINDER_LOOPER_STATE_INVALID; 1924 + binder_user_error("binder: %d:%d ERROR:" 1925 + " BC_ENTER_LOOPER called after " 1926 + "BC_REGISTER_LOOPER\n", 1927 + proc->pid, thread->pid); 1928 + } 1929 + thread->looper |= BINDER_LOOPER_STATE_ENTERED; 1930 + break; 1931 + case BC_EXIT_LOOPER: 1932 + if (binder_debug_mask & BINDER_DEBUG_THREADS) 1933 + printk(KERN_INFO "binder: %d:%d BC_EXIT_LOOPER\n", 1934 + proc->pid, thread->pid); 1935 + thread->looper |= BINDER_LOOPER_STATE_EXITED; 1936 + break; 1937 + 1938 + case BC_REQUEST_DEATH_NOTIFICATION: 1939 + case BC_CLEAR_DEATH_NOTIFICATION: { 1940 + uint32_t target; 1941 + void __user *cookie; 1942 + struct binder_ref *ref; 1943 + struct binder_ref_death *death; 1944 + 1945 + if (get_user(target, (uint32_t __user *)ptr)) 1946 + return -EFAULT; 1947 + ptr += sizeof(uint32_t); 1948 + if (get_user(cookie, (void __user * __user *)ptr)) 1949 + return -EFAULT; 1950 + ptr += sizeof(void *); 1951 + ref = binder_get_ref(proc, target); 1952 + if (ref == NULL) { 1953 + binder_user_error("binder: %d:%d %s " 1954 + "invalid ref %d\n", 1955 + proc->pid, thread->pid, 1956 + cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1957 + "BC_REQUEST_DEATH_NOTIFICATION" : 1958 + "BC_CLEAR_DEATH_NOTIFICATION", 1959 + target); 1960 + break; 1961 + } 1962 + 1963 + if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION) 1964 + printk(KERN_INFO "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", 1965 + proc->pid, thread->pid, 1966 + cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1967 + "BC_REQUEST_DEATH_NOTIFICATION" : 1968 + "BC_CLEAR_DEATH_NOTIFICATION", 1969 + cookie, ref->debug_id, ref->desc, 1970 + ref->strong, ref->weak, ref->node->debug_id); 1971 + 1972 + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 1973 + if (ref->death) { 1974 + binder_user_error("binder: %d:%" 1975 + "d BC_REQUEST_DEATH_NOTI" 1976 + "FICATION death notific" 1977 + "ation already set\n", 1978 + proc->pid, thread->pid); 1979 + break; 1980 + } 1981 + death = kzalloc(sizeof(*death), GFP_KERNEL); 1982 + if (death == NULL) { 1983 + thread->return_error = BR_ERROR; 1984 + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) 1985 + printk(KERN_INFO "binder: %d:%d " 1986 + "BC_REQUEST_DEATH_NOTIFICATION failed\n", 1987 + proc->pid, thread->pid); 1988 + break; 1989 + } 1990 + binder_stats.obj_created[BINDER_STAT_DEATH]++; 1991 + INIT_LIST_HEAD(&death->work.entry); 1992 + death->cookie = cookie; 1993 + ref->death = death; 1994 + if (ref->node->proc == NULL) { 1995 + ref->death->work.type = BINDER_WORK_DEAD_BINDER; 1996 + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 1997 + list_add_tail(&ref->death->work.entry, &thread->todo); 1998 + } else { 1999 + list_add_tail(&ref->death->work.entry, &proc->todo); 2000 + wake_up_interruptible(&proc->wait); 2001 + } 2002 + } 2003 + } else { 2004 + if (ref->death == NULL) { 2005 + binder_user_error("binder: %d:%" 2006 + "d BC_CLEAR_DEATH_NOTIFI" 2007 + "CATION death notificat" 2008 + "ion not active\n", 2009 + proc->pid, thread->pid); 2010 + break; 2011 + } 2012 + death = ref->death; 2013 + if (death->cookie != cookie) { 2014 + binder_user_error("binder: %d:%" 2015 + "d BC_CLEAR_DEATH_NOTIFI" 2016 + "CATION death notificat" 2017 + "ion cookie mismatch " 2018 + "%p != %p\n", 2019 + proc->pid, thread->pid, 2020 + death->cookie, cookie); 2021 + break; 2022 + } 2023 + ref->death = NULL; 2024 + if (list_empty(&death->work.entry)) { 2025 + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2026 + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2027 + list_add_tail(&death->work.entry, &thread->todo); 2028 + } else { 2029 + list_add_tail(&death->work.entry, &proc->todo); 2030 + wake_up_interruptible(&proc->wait); 2031 + } 2032 + } else { 2033 + BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2034 + death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2035 + } 2036 + } 2037 + } break; 2038 + case BC_DEAD_BINDER_DONE: { 2039 + struct binder_work *w; 2040 + void __user *cookie; 2041 + struct binder_ref_death *death = NULL; 2042 + if (get_user(cookie, (void __user * __user *)ptr)) 2043 + return -EFAULT; 2044 + 2045 + ptr += sizeof(void *); 2046 + list_for_each_entry(w, &proc->delivered_death, entry) { 2047 + struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2048 + if (tmp_death->cookie == cookie) { 2049 + death = tmp_death; 2050 + break; 2051 + } 2052 + } 2053 + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) 2054 + printk(KERN_INFO "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", 2055 + proc->pid, thread->pid, cookie, death); 2056 + if (death == NULL) { 2057 + binder_user_error("binder: %d:%d BC_DEAD" 2058 + "_BINDER_DONE %p not found\n", 2059 + proc->pid, thread->pid, cookie); 2060 + break; 2061 + } 2062 + 2063 + list_del_init(&death->work.entry); 2064 + if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2065 + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2066 + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2067 + list_add_tail(&death->work.entry, &thread->todo); 2068 + } else { 2069 + list_add_tail(&death->work.entry, &proc->todo); 2070 + wake_up_interruptible(&proc->wait); 2071 + } 2072 + } 2073 + } break; 2074 + 2075 + default: 2076 + printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd); 2077 + return -EINVAL; 2078 + } 2079 + *consumed = ptr - buffer; 2080 + } 2081 + return 0; 2082 + } 2083 + 2084 + void 2085 + binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) 2086 + { 2087 + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2088 + binder_stats.br[_IOC_NR(cmd)]++; 2089 + proc->stats.br[_IOC_NR(cmd)]++; 2090 + thread->stats.br[_IOC_NR(cmd)]++; 2091 + } 2092 + } 2093 + 2094 + static int 2095 + binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) 2096 + { 2097 + return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2098 + } 2099 + 2100 + static int 2101 + binder_has_thread_work(struct binder_thread *thread) 2102 + { 2103 + return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2104 + (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2105 + } 2106 + 2107 + static int 2108 + binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, 2109 + void __user *buffer, int size, signed long *consumed, int non_block) 2110 + { 2111 + void __user *ptr = buffer + *consumed; 2112 + void __user *end = buffer + size; 2113 + 2114 + int ret = 0; 2115 + int wait_for_proc_work; 2116 + 2117 + if (*consumed == 0) { 2118 + if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2119 + return -EFAULT; 2120 + ptr += sizeof(uint32_t); 2121 + } 2122 + 2123 + retry: 2124 + wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); 2125 + 2126 + if (thread->return_error != BR_OK && ptr < end) { 2127 + if (thread->return_error2 != BR_OK) { 2128 + if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2129 + return -EFAULT; 2130 + ptr += sizeof(uint32_t); 2131 + if (ptr == end) 2132 + goto done; 2133 + thread->return_error2 = BR_OK; 2134 + } 2135 + if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2136 + return -EFAULT; 2137 + ptr += sizeof(uint32_t); 2138 + thread->return_error = BR_OK; 2139 + goto done; 2140 + } 2141 + 2142 + 2143 + thread->looper |= BINDER_LOOPER_STATE_WAITING; 2144 + if (wait_for_proc_work) 2145 + proc->ready_threads++; 2146 + mutex_unlock(&binder_lock); 2147 + if (wait_for_proc_work) { 2148 + if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2149 + BINDER_LOOPER_STATE_ENTERED))) { 2150 + binder_user_error("binder: %d:%d ERROR: Thread waiting " 2151 + "for process work before calling BC_REGISTER_" 2152 + "LOOPER or BC_ENTER_LOOPER (state %x)\n", 2153 + proc->pid, thread->pid, thread->looper); 2154 + wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2155 + } 2156 + binder_set_nice(proc->default_priority); 2157 + if (non_block) { 2158 + if (!binder_has_proc_work(proc, thread)) 2159 + ret = -EAGAIN; 2160 + } else 2161 + ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2162 + } else { 2163 + if (non_block) { 2164 + if (!binder_has_thread_work(thread)) 2165 + ret = -EAGAIN; 2166 + } else 2167 + ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); 2168 + } 2169 + mutex_lock(&binder_lock); 2170 + if (wait_for_proc_work) 2171 + proc->ready_threads--; 2172 + thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2173 + 2174 + if (ret) 2175 + return ret; 2176 + 2177 + while (1) { 2178 + uint32_t cmd; 2179 + struct binder_transaction_data tr; 2180 + struct binder_work *w; 2181 + struct binder_transaction *t = NULL; 2182 + 2183 + if (!list_empty(&thread->todo)) 2184 + w = list_first_entry(&thread->todo, struct binder_work, entry); 2185 + else if (!list_empty(&proc->todo) && wait_for_proc_work) 2186 + w = list_first_entry(&proc->todo, struct binder_work, entry); 2187 + else { 2188 + if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ 2189 + goto retry; 2190 + break; 2191 + } 2192 + 2193 + if (end - ptr < sizeof(tr) + 4) 2194 + break; 2195 + 2196 + switch (w->type) { 2197 + case BINDER_WORK_TRANSACTION: { 2198 + t = container_of(w, struct binder_transaction, work); 2199 + } break; 2200 + case BINDER_WORK_TRANSACTION_COMPLETE: { 2201 + cmd = BR_TRANSACTION_COMPLETE; 2202 + if (put_user(cmd, (uint32_t __user *)ptr)) 2203 + return -EFAULT; 2204 + ptr += sizeof(uint32_t); 2205 + 2206 + binder_stat_br(proc, thread, cmd); 2207 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE) 2208 + printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n", 2209 + proc->pid, thread->pid); 2210 + 2211 + list_del(&w->entry); 2212 + kfree(w); 2213 + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++; 2214 + } break; 2215 + case BINDER_WORK_NODE: { 2216 + struct binder_node *node = container_of(w, struct binder_node, work); 2217 + uint32_t cmd = BR_NOOP; 2218 + const char *cmd_name; 2219 + int strong = node->internal_strong_refs || node->local_strong_refs; 2220 + int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2221 + if (weak && !node->has_weak_ref) { 2222 + cmd = BR_INCREFS; 2223 + cmd_name = "BR_INCREFS"; 2224 + node->has_weak_ref = 1; 2225 + node->pending_weak_ref = 1; 2226 + node->local_weak_refs++; 2227 + } else if (strong && !node->has_strong_ref) { 2228 + cmd = BR_ACQUIRE; 2229 + cmd_name = "BR_ACQUIRE"; 2230 + node->has_strong_ref = 1; 2231 + node->pending_strong_ref = 1; 2232 + node->local_strong_refs++; 2233 + } else if (!strong && node->has_strong_ref) { 2234 + cmd = BR_RELEASE; 2235 + cmd_name = "BR_RELEASE"; 2236 + node->has_strong_ref = 0; 2237 + } else if (!weak && node->has_weak_ref) { 2238 + cmd = BR_DECREFS; 2239 + cmd_name = "BR_DECREFS"; 2240 + node->has_weak_ref = 0; 2241 + } 2242 + if (cmd != BR_NOOP) { 2243 + if (put_user(cmd, (uint32_t __user *)ptr)) 2244 + return -EFAULT; 2245 + ptr += sizeof(uint32_t); 2246 + if (put_user(node->ptr, (void * __user *)ptr)) 2247 + return -EFAULT; 2248 + ptr += sizeof(void *); 2249 + if (put_user(node->cookie, (void * __user *)ptr)) 2250 + return -EFAULT; 2251 + ptr += sizeof(void *); 2252 + 2253 + binder_stat_br(proc, thread, cmd); 2254 + if (binder_debug_mask & BINDER_DEBUG_USER_REFS) 2255 + printk(KERN_INFO "binder: %d:%d %s %d u%p c%p\n", 2256 + proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); 2257 + } else { 2258 + list_del_init(&w->entry); 2259 + if (!weak && !strong) { 2260 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 2261 + printk(KERN_INFO "binder: %d:%d node %d u%p c%p deleted\n", 2262 + proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); 2263 + rb_erase(&node->rb_node, &proc->nodes); 2264 + kfree(node); 2265 + binder_stats.obj_deleted[BINDER_STAT_NODE]++; 2266 + } else { 2267 + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) 2268 + printk(KERN_INFO "binder: %d:%d node %d u%p c%p state unchanged\n", 2269 + proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); 2270 + } 2271 + } 2272 + } break; 2273 + case BINDER_WORK_DEAD_BINDER: 2274 + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2275 + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2276 + struct binder_ref_death *death = container_of(w, struct binder_ref_death, work); 2277 + uint32_t cmd; 2278 + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2279 + cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2280 + else 2281 + cmd = BR_DEAD_BINDER; 2282 + if (put_user(cmd, (uint32_t __user *)ptr)) 2283 + return -EFAULT; 2284 + ptr += sizeof(uint32_t); 2285 + if (put_user(death->cookie, (void * __user *)ptr)) 2286 + return -EFAULT; 2287 + ptr += sizeof(void *); 2288 + if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION) 2289 + printk(KERN_INFO "binder: %d:%d %s %p\n", 2290 + proc->pid, thread->pid, 2291 + cmd == BR_DEAD_BINDER ? 2292 + "BR_DEAD_BINDER" : 2293 + "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2294 + death->cookie); 2295 + 2296 + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2297 + list_del(&w->entry); 2298 + kfree(death); 2299 + binder_stats.obj_deleted[BINDER_STAT_DEATH]++; 2300 + } else 2301 + list_move(&w->entry, &proc->delivered_death); 2302 + if (cmd == BR_DEAD_BINDER) 2303 + goto done; /* DEAD_BINDER notifications can cause transactions */ 2304 + } break; 2305 + } 2306 + 2307 + if (!t) 2308 + continue; 2309 + 2310 + BUG_ON(t->buffer == NULL); 2311 + if (t->buffer->target_node) { 2312 + struct binder_node *target_node = t->buffer->target_node; 2313 + tr.target.ptr = target_node->ptr; 2314 + tr.cookie = target_node->cookie; 2315 + t->saved_priority = task_nice(current); 2316 + if (t->priority < target_node->min_priority && 2317 + !(t->flags & TF_ONE_WAY)) 2318 + binder_set_nice(t->priority); 2319 + else if (!(t->flags & TF_ONE_WAY) || 2320 + t->saved_priority > target_node->min_priority) 2321 + binder_set_nice(target_node->min_priority); 2322 + cmd = BR_TRANSACTION; 2323 + } else { 2324 + tr.target.ptr = NULL; 2325 + tr.cookie = NULL; 2326 + cmd = BR_REPLY; 2327 + } 2328 + tr.code = t->code; 2329 + tr.flags = t->flags; 2330 + tr.sender_euid = t->sender_euid; 2331 + 2332 + if (t->from) { 2333 + struct task_struct *sender = t->from->proc->tsk; 2334 + tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns); 2335 + } else { 2336 + tr.sender_pid = 0; 2337 + } 2338 + 2339 + tr.data_size = t->buffer->data_size; 2340 + tr.offsets_size = t->buffer->offsets_size; 2341 + tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset); 2342 + tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); 2343 + 2344 + if (put_user(cmd, (uint32_t __user *)ptr)) 2345 + return -EFAULT; 2346 + ptr += sizeof(uint32_t); 2347 + if (copy_to_user(ptr, &tr, sizeof(tr))) 2348 + return -EFAULT; 2349 + ptr += sizeof(tr); 2350 + 2351 + binder_stat_br(proc, thread, cmd); 2352 + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) 2353 + printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d size %d-%d ptr %p-%p\n", 2354 + proc->pid, thread->pid, 2355 + (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", 2356 + t->debug_id, t->from ? t->from->proc->pid : 0, 2357 + t->from ? t->from->pid : 0, cmd, 2358 + t->buffer->data_size, t->buffer->offsets_size, 2359 + tr.data.ptr.buffer, tr.data.ptr.offsets); 2360 + 2361 + list_del(&t->work.entry); 2362 + t->buffer->allow_user_free = 1; 2363 + if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2364 + t->to_parent = thread->transaction_stack; 2365 + t->to_thread = thread; 2366 + thread->transaction_stack = t; 2367 + } else { 2368 + t->buffer->transaction = NULL; 2369 + kfree(t); 2370 + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++; 2371 + } 2372 + break; 2373 + } 2374 + 2375 + done: 2376 + 2377 + *consumed = ptr - buffer; 2378 + if (proc->requested_threads + proc->ready_threads == 0 && 2379 + proc->requested_threads_started < proc->max_threads && 2380 + (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2381 + BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2382 + /*spawn a new thread if we leave this out */) { 2383 + proc->requested_threads++; 2384 + if (binder_debug_mask & BINDER_DEBUG_THREADS) 2385 + printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n", 2386 + proc->pid, thread->pid); 2387 + if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2388 + return -EFAULT; 2389 + } 2390 + return 0; 2391 + } 2392 + 2393 + static void binder_release_work(struct list_head *list) 2394 + { 2395 + struct binder_work *w; 2396 + while (!list_empty(list)) { 2397 + w = list_first_entry(list, struct binder_work, entry); 2398 + list_del_init(&w->entry); 2399 + switch (w->type) { 2400 + case BINDER_WORK_TRANSACTION: { 2401 + struct binder_transaction *t = container_of(w, struct binder_transaction, work); 2402 + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) 2403 + binder_send_failed_reply(t, BR_DEAD_REPLY); 2404 + } break; 2405 + case BINDER_WORK_TRANSACTION_COMPLETE: { 2406 + kfree(w); 2407 + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++; 2408 + } break; 2409 + default: 2410 + break; 2411 + } 2412 + } 2413 + 2414 + } 2415 + 2416 + static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2417 + { 2418 + struct binder_thread *thread = NULL; 2419 + struct rb_node *parent = NULL; 2420 + struct rb_node **p = &proc->threads.rb_node; 2421 + 2422 + while (*p) { 2423 + parent = *p; 2424 + thread = rb_entry(parent, struct binder_thread, rb_node); 2425 + 2426 + if (current->pid < thread->pid) 2427 + p = &(*p)->rb_left; 2428 + else if (current->pid > thread->pid) 2429 + p = &(*p)->rb_right; 2430 + else 2431 + break; 2432 + } 2433 + if (*p == NULL) { 2434 + thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2435 + if (thread == NULL) 2436 + return NULL; 2437 + binder_stats.obj_created[BINDER_STAT_THREAD]++; 2438 + thread->proc = proc; 2439 + thread->pid = current->pid; 2440 + init_waitqueue_head(&thread->wait); 2441 + INIT_LIST_HEAD(&thread->todo); 2442 + rb_link_node(&thread->rb_node, parent, p); 2443 + rb_insert_color(&thread->rb_node, &proc->threads); 2444 + thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2445 + thread->return_error = BR_OK; 2446 + thread->return_error2 = BR_OK; 2447 + } 2448 + return thread; 2449 + } 2450 + 2451 + static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) 2452 + { 2453 + struct binder_transaction *t; 2454 + struct binder_transaction *send_reply = NULL; 2455 + int active_transactions = 0; 2456 + 2457 + rb_erase(&thread->rb_node, &proc->threads); 2458 + t = thread->transaction_stack; 2459 + if (t && t->to_thread == thread) 2460 + send_reply = t; 2461 + while (t) { 2462 + active_transactions++; 2463 + if (binder_debug_mask & BINDER_DEBUG_DEAD_TRANSACTION) 2464 + printk(KERN_INFO "binder: release %d:%d transaction %d %s, still active\n", 2465 + proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out"); 2466 + if (t->to_thread == thread) { 2467 + t->to_proc = NULL; 2468 + t->to_thread = NULL; 2469 + if (t->buffer) { 2470 + t->buffer->transaction = NULL; 2471 + t->buffer = NULL; 2472 + } 2473 + t = t->to_parent; 2474 + } else if (t->from == thread) { 2475 + t->from = NULL; 2476 + t = t->from_parent; 2477 + } else 2478 + BUG(); 2479 + } 2480 + if (send_reply) 2481 + binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2482 + binder_release_work(&thread->todo); 2483 + kfree(thread); 2484 + binder_stats.obj_deleted[BINDER_STAT_THREAD]++; 2485 + return active_transactions; 2486 + } 2487 + 2488 + static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) 2489 + { 2490 + struct binder_proc *proc = filp->private_data; 2491 + struct binder_thread *thread = NULL; 2492 + int wait_for_proc_work; 2493 + 2494 + mutex_lock(&binder_lock); 2495 + thread = binder_get_thread(proc); 2496 + 2497 + wait_for_proc_work = thread->transaction_stack == NULL && 2498 + list_empty(&thread->todo) && thread->return_error == BR_OK; 2499 + mutex_unlock(&binder_lock); 2500 + 2501 + if (wait_for_proc_work) { 2502 + if (binder_has_proc_work(proc, thread)) 2503 + return POLLIN; 2504 + poll_wait(filp, &proc->wait, wait); 2505 + if (binder_has_proc_work(proc, thread)) 2506 + return POLLIN; 2507 + } else { 2508 + if (binder_has_thread_work(thread)) 2509 + return POLLIN; 2510 + poll_wait(filp, &thread->wait, wait); 2511 + if (binder_has_thread_work(thread)) 2512 + return POLLIN; 2513 + } 2514 + return 0; 2515 + } 2516 + 2517 + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2518 + { 2519 + int ret; 2520 + struct binder_proc *proc = filp->private_data; 2521 + struct binder_thread *thread; 2522 + unsigned int size = _IOC_SIZE(cmd); 2523 + void __user *ubuf = (void __user *)arg; 2524 + 2525 + /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ 2526 + 2527 + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2528 + if (ret) 2529 + return ret; 2530 + 2531 + mutex_lock(&binder_lock); 2532 + thread = binder_get_thread(proc); 2533 + if (thread == NULL) { 2534 + ret = -ENOMEM; 2535 + goto err; 2536 + } 2537 + 2538 + switch (cmd) { 2539 + case BINDER_WRITE_READ: { 2540 + struct binder_write_read bwr; 2541 + if (size != sizeof(struct binder_write_read)) { 2542 + ret = -EINVAL; 2543 + goto err; 2544 + } 2545 + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2546 + ret = -EFAULT; 2547 + goto err; 2548 + } 2549 + if (binder_debug_mask & BINDER_DEBUG_READ_WRITE) 2550 + printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", 2551 + proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer); 2552 + if (bwr.write_size > 0) { 2553 + ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); 2554 + if (ret < 0) { 2555 + bwr.read_consumed = 0; 2556 + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2557 + ret = -EFAULT; 2558 + goto err; 2559 + } 2560 + } 2561 + if (bwr.read_size > 0) { 2562 + ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); 2563 + if (!list_empty(&proc->todo)) 2564 + wake_up_interruptible(&proc->wait); 2565 + if (ret < 0) { 2566 + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2567 + ret = -EFAULT; 2568 + goto err; 2569 + } 2570 + } 2571 + if (binder_debug_mask & BINDER_DEBUG_READ_WRITE) 2572 + printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", 2573 + proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size); 2574 + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2575 + ret = -EFAULT; 2576 + goto err; 2577 + } 2578 + break; 2579 + } 2580 + case BINDER_SET_MAX_THREADS: 2581 + if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2582 + ret = -EINVAL; 2583 + goto err; 2584 + } 2585 + break; 2586 + case BINDER_SET_CONTEXT_MGR: 2587 + if (binder_context_mgr_node != NULL) { 2588 + printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); 2589 + ret = -EBUSY; 2590 + goto err; 2591 + } 2592 + if (binder_context_mgr_uid != -1) { 2593 + if (binder_context_mgr_uid != current->euid) { 2594 + printk(KERN_ERR "binder: BINDER_SET_" 2595 + "CONTEXT_MGR bad uid %d != %d\n", 2596 + current->euid, 2597 + binder_context_mgr_uid); 2598 + ret = -EPERM; 2599 + goto err; 2600 + } 2601 + } else 2602 + binder_context_mgr_uid = current->euid; 2603 + binder_context_mgr_node = binder_new_node(proc, NULL, NULL); 2604 + if (binder_context_mgr_node == NULL) { 2605 + ret = -ENOMEM; 2606 + goto err; 2607 + } 2608 + binder_context_mgr_node->local_weak_refs++; 2609 + binder_context_mgr_node->local_strong_refs++; 2610 + binder_context_mgr_node->has_strong_ref = 1; 2611 + binder_context_mgr_node->has_weak_ref = 1; 2612 + break; 2613 + case BINDER_THREAD_EXIT: 2614 + if (binder_debug_mask & BINDER_DEBUG_THREADS) 2615 + printk(KERN_INFO "binder: %d:%d exit\n", 2616 + proc->pid, thread->pid); 2617 + binder_free_thread(proc, thread); 2618 + thread = NULL; 2619 + break; 2620 + case BINDER_VERSION: 2621 + if (size != sizeof(struct binder_version)) { 2622 + ret = -EINVAL; 2623 + goto err; 2624 + } 2625 + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { 2626 + ret = -EINVAL; 2627 + goto err; 2628 + } 2629 + break; 2630 + default: 2631 + ret = -EINVAL; 2632 + goto err; 2633 + } 2634 + ret = 0; 2635 + err: 2636 + if (thread) 2637 + thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2638 + mutex_unlock(&binder_lock); 2639 + wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2640 + if (ret && ret != -ERESTARTSYS) 2641 + printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2642 + return ret; 2643 + } 2644 + 2645 + static void binder_vma_open(struct vm_area_struct *vma) 2646 + { 2647 + struct binder_proc *proc = vma->vm_private_data; 2648 + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2649 + printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot); 2650 + dump_stack(); 2651 + } 2652 + static void binder_vma_close(struct vm_area_struct *vma) 2653 + { 2654 + struct binder_proc *proc = vma->vm_private_data; 2655 + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2656 + printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot); 2657 + proc->vma = NULL; 2658 + } 2659 + 2660 + static struct vm_operations_struct binder_vm_ops = { 2661 + .open = binder_vma_open, 2662 + .close = binder_vma_close, 2663 + }; 2664 + 2665 + static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2666 + { 2667 + int ret; 2668 + struct vm_struct *area; 2669 + struct binder_proc *proc = filp->private_data; 2670 + const char *failure_string; 2671 + struct binder_buffer *buffer; 2672 + 2673 + if ((vma->vm_end - vma->vm_start) > SZ_4M) 2674 + vma->vm_end = vma->vm_start + SZ_4M; 2675 + 2676 + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2677 + printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot); 2678 + 2679 + if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2680 + ret = -EPERM; 2681 + failure_string = "bad vm_flags"; 2682 + goto err_bad_arg; 2683 + } 2684 + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2685 + 2686 + area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2687 + if (area == NULL) { 2688 + ret = -ENOMEM; 2689 + failure_string = "get_vm_area"; 2690 + goto err_get_vm_area_failed; 2691 + } 2692 + proc->buffer = area->addr; 2693 + proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer; 2694 + 2695 + #ifdef CONFIG_CPU_CACHE_VIPT 2696 + if (cache_is_vipt_aliasing()) { 2697 + while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2698 + printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2699 + vma->vm_start += PAGE_SIZE; 2700 + } 2701 + } 2702 + #endif 2703 + proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2704 + if (proc->pages == NULL) { 2705 + ret = -ENOMEM; 2706 + failure_string = "alloc page array"; 2707 + goto err_alloc_pages_failed; 2708 + } 2709 + proc->buffer_size = vma->vm_end - vma->vm_start; 2710 + 2711 + vma->vm_ops = &binder_vm_ops; 2712 + vma->vm_private_data = proc; 2713 + 2714 + if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2715 + ret = -ENOMEM; 2716 + failure_string = "alloc small buf"; 2717 + goto err_alloc_small_buf_failed; 2718 + } 2719 + buffer = proc->buffer; 2720 + INIT_LIST_HEAD(&proc->buffers); 2721 + list_add(&buffer->entry, &proc->buffers); 2722 + buffer->free = 1; 2723 + binder_insert_free_buffer(proc, buffer); 2724 + proc->free_async_space = proc->buffer_size / 2; 2725 + barrier(); 2726 + proc->vma = vma; 2727 + 2728 + /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2729 + return 0; 2730 + 2731 + err_alloc_small_buf_failed: 2732 + kfree(proc->pages); 2733 + err_alloc_pages_failed: 2734 + vfree(proc->buffer); 2735 + err_get_vm_area_failed: 2736 + mutex_unlock(&binder_lock); 2737 + err_bad_arg: 2738 + printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2739 + return ret; 2740 + } 2741 + 2742 + static int binder_open(struct inode *nodp, struct file *filp) 2743 + { 2744 + struct binder_proc *proc; 2745 + 2746 + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2747 + printk(KERN_INFO "binder_open: %d:%d\n", current->group_leader->pid, current->pid); 2748 + 2749 + proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2750 + if (proc == NULL) 2751 + return -ENOMEM; 2752 + get_task_struct(current); 2753 + proc->tsk = current; 2754 + INIT_LIST_HEAD(&proc->todo); 2755 + init_waitqueue_head(&proc->wait); 2756 + proc->default_priority = task_nice(current); 2757 + mutex_lock(&binder_lock); 2758 + binder_stats.obj_created[BINDER_STAT_PROC]++; 2759 + hlist_add_head(&proc->proc_node, &binder_procs); 2760 + proc->pid = current->group_leader->pid; 2761 + INIT_LIST_HEAD(&proc->delivered_death); 2762 + filp->private_data = proc; 2763 + mutex_unlock(&binder_lock); 2764 + 2765 + if (binder_proc_dir_entry_proc) { 2766 + char strbuf[11]; 2767 + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2768 + create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); 2769 + } 2770 + 2771 + return 0; 2772 + } 2773 + 2774 + static int binder_flush(struct file *filp, fl_owner_t id) 2775 + { 2776 + struct rb_node *n; 2777 + struct binder_proc *proc = filp->private_data; 2778 + int wake_count = 0; 2779 + 2780 + mutex_lock(&binder_lock); 2781 + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2782 + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2783 + thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2784 + if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2785 + wake_up_interruptible(&thread->wait); 2786 + wake_count++; 2787 + } 2788 + } 2789 + wake_up_interruptible_all(&proc->wait); 2790 + mutex_unlock(&binder_lock); 2791 + 2792 + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2793 + printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); 2794 + 2795 + return 0; 2796 + } 2797 + 2798 + static int binder_release(struct inode *nodp, struct file *filp) 2799 + { 2800 + struct hlist_node *pos; 2801 + struct binder_transaction *t; 2802 + struct rb_node *n; 2803 + struct binder_proc *proc = filp->private_data; 2804 + int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2805 + 2806 + if (binder_proc_dir_entry_proc) { 2807 + char strbuf[11]; 2808 + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2809 + remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2810 + } 2811 + mutex_lock(&binder_lock); 2812 + hlist_del(&proc->proc_node); 2813 + if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2814 + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) 2815 + printk(KERN_INFO "binder_release: %d context_mgr_node gone\n", proc->pid); 2816 + binder_context_mgr_node = NULL; 2817 + } 2818 + 2819 + threads = 0; 2820 + active_transactions = 0; 2821 + while ((n = rb_first(&proc->threads))) { 2822 + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2823 + threads++; 2824 + active_transactions += binder_free_thread(proc, thread); 2825 + } 2826 + nodes = 0; 2827 + incoming_refs = 0; 2828 + while ((n = rb_first(&proc->nodes))) { 2829 + struct binder_node *node = rb_entry(n, struct binder_node, rb_node); 2830 + 2831 + nodes++; 2832 + rb_erase(&node->rb_node, &proc->nodes); 2833 + list_del_init(&node->work.entry); 2834 + if (hlist_empty(&node->refs)) { 2835 + kfree(node); 2836 + binder_stats.obj_deleted[BINDER_STAT_NODE]++; 2837 + } else { 2838 + struct binder_ref *ref; 2839 + int death = 0; 2840 + 2841 + node->proc = NULL; 2842 + node->local_strong_refs = 0; 2843 + node->local_weak_refs = 0; 2844 + hlist_add_head(&node->dead_node, &binder_dead_nodes); 2845 + 2846 + hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 2847 + incoming_refs++; 2848 + if (ref->death) { 2849 + death++; 2850 + if (list_empty(&ref->death->work.entry)) { 2851 + ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2852 + list_add_tail(&ref->death->work.entry, &ref->proc->todo); 2853 + wake_up_interruptible(&ref->proc->wait); 2854 + } else 2855 + BUG(); 2856 + } 2857 + } 2858 + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) 2859 + printk(KERN_INFO "binder: node %d now dead, refs %d, death %d\n", node->debug_id, incoming_refs, death); 2860 + } 2861 + } 2862 + outgoing_refs = 0; 2863 + while ((n = rb_first(&proc->refs_by_desc))) { 2864 + struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); 2865 + outgoing_refs++; 2866 + binder_delete_ref(ref); 2867 + } 2868 + binder_release_work(&proc->todo); 2869 + buffers = 0; 2870 + 2871 + while ((n = rb_first(&proc->allocated_buffers))) { 2872 + struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, rb_node); 2873 + t = buffer->transaction; 2874 + if (t) { 2875 + t->buffer = NULL; 2876 + buffer->transaction = NULL; 2877 + printk(KERN_ERR "binder: release proc %d, transaction %d, not freed\n", proc->pid, t->debug_id); 2878 + /*BUG();*/ 2879 + } 2880 + binder_free_buf(proc, buffer); 2881 + buffers++; 2882 + } 2883 + 2884 + binder_stats.obj_deleted[BINDER_STAT_PROC]++; 2885 + mutex_unlock(&binder_lock); 2886 + 2887 + page_count = 0; 2888 + if (proc->pages) { 2889 + int i; 2890 + for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 2891 + if (proc->pages[i]) { 2892 + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) 2893 + printk(KERN_INFO "binder_release: %d: page %d at %p not freed\n", proc->pid, i, proc->buffer + i * PAGE_SIZE); 2894 + __free_page(proc->pages[i]); 2895 + page_count++; 2896 + } 2897 + } 2898 + kfree(proc->pages); 2899 + vfree(proc->buffer); 2900 + } 2901 + 2902 + put_task_struct(proc->tsk); 2903 + 2904 + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2905 + printk(KERN_INFO "binder_release: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 2906 + proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); 2907 + 2908 + kfree(proc); 2909 + return 0; 2910 + } 2911 + 2912 + static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t) 2913 + { 2914 + buf += snprintf(buf, end - buf, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 2915 + prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, 2916 + t->from ? t->from->pid : 0, 2917 + t->to_proc ? t->to_proc->pid : 0, 2918 + t->to_thread ? t->to_thread->pid : 0, 2919 + t->code, t->flags, t->priority, t->need_reply); 2920 + if (buf >= end) 2921 + return buf; 2922 + if (t->buffer == NULL) { 2923 + buf += snprintf(buf, end - buf, " buffer free\n"); 2924 + return buf; 2925 + } 2926 + if (t->buffer->target_node) { 2927 + buf += snprintf(buf, end - buf, " node %d", 2928 + t->buffer->target_node->debug_id); 2929 + if (buf >= end) 2930 + return buf; 2931 + } 2932 + buf += snprintf(buf, end - buf, " size %d:%d data %p\n", 2933 + t->buffer->data_size, t->buffer->offsets_size, 2934 + t->buffer->data); 2935 + return buf; 2936 + } 2937 + 2938 + static char *print_binder_buffer(char *buf, char *end, const char *prefix, struct binder_buffer *buffer) 2939 + { 2940 + buf += snprintf(buf, end - buf, "%s %d: %p size %d:%d %s\n", 2941 + prefix, buffer->debug_id, buffer->data, 2942 + buffer->data_size, buffer->offsets_size, 2943 + buffer->transaction ? "active" : "delivered"); 2944 + return buf; 2945 + } 2946 + 2947 + static char *print_binder_work(char *buf, char *end, const char *prefix, 2948 + const char *transaction_prefix, struct binder_work *w) 2949 + { 2950 + struct binder_node *node; 2951 + struct binder_transaction *t; 2952 + 2953 + switch (w->type) { 2954 + case BINDER_WORK_TRANSACTION: 2955 + t = container_of(w, struct binder_transaction, work); 2956 + buf = print_binder_transaction(buf, end, transaction_prefix, t); 2957 + break; 2958 + case BINDER_WORK_TRANSACTION_COMPLETE: 2959 + buf += snprintf(buf, end - buf, 2960 + "%stransaction complete\n", prefix); 2961 + break; 2962 + case BINDER_WORK_NODE: 2963 + node = container_of(w, struct binder_node, work); 2964 + buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n", 2965 + prefix, node->debug_id, node->ptr, node->cookie); 2966 + break; 2967 + case BINDER_WORK_DEAD_BINDER: 2968 + buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix); 2969 + break; 2970 + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2971 + buf += snprintf(buf, end - buf, 2972 + "%shas cleared dead binder\n", prefix); 2973 + break; 2974 + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 2975 + buf += snprintf(buf, end - buf, 2976 + "%shas cleared death notification\n", prefix); 2977 + break; 2978 + default: 2979 + buf += snprintf(buf, end - buf, "%sunknown work: type %d\n", 2980 + prefix, w->type); 2981 + break; 2982 + } 2983 + return buf; 2984 + } 2985 + 2986 + static char *print_binder_thread(char *buf, char *end, struct binder_thread *thread, int print_always) 2987 + { 2988 + struct binder_transaction *t; 2989 + struct binder_work *w; 2990 + char *start_buf = buf; 2991 + char *header_buf; 2992 + 2993 + buf += snprintf(buf, end - buf, " thread %d: l %02x\n", thread->pid, thread->looper); 2994 + header_buf = buf; 2995 + t = thread->transaction_stack; 2996 + while (t) { 2997 + if (buf >= end) 2998 + break; 2999 + if (t->from == thread) { 3000 + buf = print_binder_transaction(buf, end, " outgoing transaction", t); 3001 + t = t->from_parent; 3002 + } else if (t->to_thread == thread) { 3003 + buf = print_binder_transaction(buf, end, " incoming transaction", t); 3004 + t = t->to_parent; 3005 + } else { 3006 + buf = print_binder_transaction(buf, end, " bad transaction", t); 3007 + t = NULL; 3008 + } 3009 + } 3010 + list_for_each_entry(w, &thread->todo, entry) { 3011 + if (buf >= end) 3012 + break; 3013 + buf = print_binder_work(buf, end, " ", 3014 + " pending transaction", w); 3015 + } 3016 + if (!print_always && buf == header_buf) 3017 + buf = start_buf; 3018 + return buf; 3019 + } 3020 + 3021 + static char *print_binder_node(char *buf, char *end, struct binder_node *node) 3022 + { 3023 + struct binder_ref *ref; 3024 + struct hlist_node *pos; 3025 + struct binder_work *w; 3026 + int count; 3027 + count = 0; 3028 + hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3029 + count++; 3030 + 3031 + buf += snprintf(buf, end - buf, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", 3032 + node->debug_id, node->ptr, node->cookie, 3033 + node->has_strong_ref, node->has_weak_ref, 3034 + node->local_strong_refs, node->local_weak_refs, 3035 + node->internal_strong_refs, count); 3036 + if (buf >= end) 3037 + return buf; 3038 + if (count) { 3039 + buf += snprintf(buf, end - buf, " proc"); 3040 + if (buf >= end) 3041 + return buf; 3042 + hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 3043 + buf += snprintf(buf, end - buf, " %d", ref->proc->pid); 3044 + if (buf >= end) 3045 + return buf; 3046 + } 3047 + } 3048 + buf += snprintf(buf, end - buf, "\n"); 3049 + list_for_each_entry(w, &node->async_todo, entry) { 3050 + if (buf >= end) 3051 + break; 3052 + buf = print_binder_work(buf, end, " ", 3053 + " pending async transaction", w); 3054 + } 3055 + return buf; 3056 + } 3057 + 3058 + static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref) 3059 + { 3060 + buf += snprintf(buf, end - buf, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3061 + ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3062 + ref->node->debug_id, ref->strong, ref->weak, ref->death); 3063 + return buf; 3064 + } 3065 + 3066 + static char *print_binder_proc(char *buf, char *end, struct binder_proc *proc, int print_all) 3067 + { 3068 + struct binder_work *w; 3069 + struct rb_node *n; 3070 + char *start_buf = buf; 3071 + char *header_buf; 3072 + 3073 + buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); 3074 + header_buf = buf; 3075 + 3076 + for (n = rb_first(&proc->threads); n != NULL && buf < end; n = rb_next(n)) 3077 + buf = print_binder_thread(buf, end, rb_entry(n, struct binder_thread, rb_node), print_all); 3078 + for (n = rb_first(&proc->nodes); n != NULL && buf < end; n = rb_next(n)) { 3079 + struct binder_node *node = rb_entry(n, struct binder_node, rb_node); 3080 + if (print_all || node->has_async_transaction) 3081 + buf = print_binder_node(buf, end, node); 3082 + } 3083 + if (print_all) { 3084 + for (n = rb_first(&proc->refs_by_desc); n != NULL && buf < end; n = rb_next(n)) 3085 + buf = print_binder_ref(buf, end, rb_entry(n, struct binder_ref, rb_node_desc)); 3086 + } 3087 + for (n = rb_first(&proc->allocated_buffers); n != NULL && buf < end; n = rb_next(n)) 3088 + buf = print_binder_buffer(buf, end, " buffer", rb_entry(n, struct binder_buffer, rb_node)); 3089 + list_for_each_entry(w, &proc->todo, entry) { 3090 + if (buf >= end) 3091 + break; 3092 + buf = print_binder_work(buf, end, " ", 3093 + " pending transaction", w); 3094 + } 3095 + list_for_each_entry(w, &proc->delivered_death, entry) { 3096 + if (buf >= end) 3097 + break; 3098 + buf += snprintf(buf, end - buf, " has delivered dead binder\n"); 3099 + break; 3100 + } 3101 + if (!print_all && buf == header_buf) 3102 + buf = start_buf; 3103 + return buf; 3104 + } 3105 + 3106 + static const char *binder_return_strings[] = { 3107 + "BR_ERROR", 3108 + "BR_OK", 3109 + "BR_TRANSACTION", 3110 + "BR_REPLY", 3111 + "BR_ACQUIRE_RESULT", 3112 + "BR_DEAD_REPLY", 3113 + "BR_TRANSACTION_COMPLETE", 3114 + "BR_INCREFS", 3115 + "BR_ACQUIRE", 3116 + "BR_RELEASE", 3117 + "BR_DECREFS", 3118 + "BR_ATTEMPT_ACQUIRE", 3119 + "BR_NOOP", 3120 + "BR_SPAWN_LOOPER", 3121 + "BR_FINISHED", 3122 + "BR_DEAD_BINDER", 3123 + "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3124 + "BR_FAILED_REPLY" 3125 + }; 3126 + 3127 + static const char *binder_command_strings[] = { 3128 + "BC_TRANSACTION", 3129 + "BC_REPLY", 3130 + "BC_ACQUIRE_RESULT", 3131 + "BC_FREE_BUFFER", 3132 + "BC_INCREFS", 3133 + "BC_ACQUIRE", 3134 + "BC_RELEASE", 3135 + "BC_DECREFS", 3136 + "BC_INCREFS_DONE", 3137 + "BC_ACQUIRE_DONE", 3138 + "BC_ATTEMPT_ACQUIRE", 3139 + "BC_REGISTER_LOOPER", 3140 + "BC_ENTER_LOOPER", 3141 + "BC_EXIT_LOOPER", 3142 + "BC_REQUEST_DEATH_NOTIFICATION", 3143 + "BC_CLEAR_DEATH_NOTIFICATION", 3144 + "BC_DEAD_BINDER_DONE" 3145 + }; 3146 + 3147 + static const char *binder_objstat_strings[] = { 3148 + "proc", 3149 + "thread", 3150 + "node", 3151 + "ref", 3152 + "death", 3153 + "transaction", 3154 + "transaction_complete" 3155 + }; 3156 + 3157 + static char *print_binder_stats(char *buf, char *end, const char *prefix, struct binder_stats *stats) 3158 + { 3159 + int i; 3160 + 3161 + BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); 3162 + for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3163 + if (stats->bc[i]) 3164 + buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, 3165 + binder_command_strings[i], stats->bc[i]); 3166 + if (buf >= end) 3167 + return buf; 3168 + } 3169 + 3170 + BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); 3171 + for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3172 + if (stats->br[i]) 3173 + buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, 3174 + binder_return_strings[i], stats->br[i]); 3175 + if (buf >= end) 3176 + return buf; 3177 + } 3178 + 3179 + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); 3180 + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted)); 3181 + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3182 + if (stats->obj_created[i] || stats->obj_deleted[i]) 3183 + buf += snprintf(buf, end - buf, "%s%s: active %d total %d\n", prefix, 3184 + binder_objstat_strings[i], 3185 + stats->obj_created[i] - stats->obj_deleted[i], 3186 + stats->obj_created[i]); 3187 + if (buf >= end) 3188 + return buf; 3189 + } 3190 + return buf; 3191 + } 3192 + 3193 + static char *print_binder_proc_stats(char *buf, char *end, struct binder_proc *proc) 3194 + { 3195 + struct binder_work *w; 3196 + struct rb_node *n; 3197 + int count, strong, weak; 3198 + 3199 + buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); 3200 + if (buf >= end) 3201 + return buf; 3202 + count = 0; 3203 + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3204 + count++; 3205 + buf += snprintf(buf, end - buf, " threads: %d\n", count); 3206 + if (buf >= end) 3207 + return buf; 3208 + buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n" 3209 + " ready threads %d\n" 3210 + " free async space %d\n", proc->requested_threads, 3211 + proc->requested_threads_started, proc->max_threads, 3212 + proc->ready_threads, proc->free_async_space); 3213 + if (buf >= end) 3214 + return buf; 3215 + count = 0; 3216 + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3217 + count++; 3218 + buf += snprintf(buf, end - buf, " nodes: %d\n", count); 3219 + if (buf >= end) 3220 + return buf; 3221 + count = 0; 3222 + strong = 0; 3223 + weak = 0; 3224 + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3225 + struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); 3226 + count++; 3227 + strong += ref->strong; 3228 + weak += ref->weak; 3229 + } 3230 + buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n", count, strong, weak); 3231 + if (buf >= end) 3232 + return buf; 3233 + 3234 + count = 0; 3235 + for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3236 + count++; 3237 + buf += snprintf(buf, end - buf, " buffers: %d\n", count); 3238 + if (buf >= end) 3239 + return buf; 3240 + 3241 + count = 0; 3242 + list_for_each_entry(w, &proc->todo, entry) { 3243 + switch (w->type) { 3244 + case BINDER_WORK_TRANSACTION: 3245 + count++; 3246 + break; 3247 + default: 3248 + break; 3249 + } 3250 + } 3251 + buf += snprintf(buf, end - buf, " pending transactions: %d\n", count); 3252 + if (buf >= end) 3253 + return buf; 3254 + 3255 + buf = print_binder_stats(buf, end, " ", &proc->stats); 3256 + 3257 + return buf; 3258 + } 3259 + 3260 + 3261 + static int binder_read_proc_state( 3262 + char *page, char **start, off_t off, int count, int *eof, void *data) 3263 + { 3264 + struct binder_proc *proc; 3265 + struct hlist_node *pos; 3266 + struct binder_node *node; 3267 + int len = 0; 3268 + char *buf = page; 3269 + char *end = page + PAGE_SIZE; 3270 + int do_lock = !binder_debug_no_lock; 3271 + 3272 + if (off) 3273 + return 0; 3274 + 3275 + if (do_lock) 3276 + mutex_lock(&binder_lock); 3277 + 3278 + buf += snprintf(buf, end - buf, "binder state:\n"); 3279 + 3280 + if (!hlist_empty(&binder_dead_nodes)) 3281 + buf += snprintf(buf, end - buf, "dead nodes:\n"); 3282 + hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) { 3283 + if (buf >= end) 3284 + break; 3285 + buf = print_binder_node(buf, end, node); 3286 + } 3287 + 3288 + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { 3289 + if (buf >= end) 3290 + break; 3291 + buf = print_binder_proc(buf, end, proc, 1); 3292 + } 3293 + if (do_lock) 3294 + mutex_unlock(&binder_lock); 3295 + if (buf > page + PAGE_SIZE) 3296 + buf = page + PAGE_SIZE; 3297 + 3298 + *start = page + off; 3299 + 3300 + len = buf - page; 3301 + if (len > off) 3302 + len -= off; 3303 + else 3304 + len = 0; 3305 + 3306 + return len < count ? len : count; 3307 + } 3308 + 3309 + static int binder_read_proc_stats( 3310 + char *page, char **start, off_t off, int count, int *eof, void *data) 3311 + { 3312 + struct binder_proc *proc; 3313 + struct hlist_node *pos; 3314 + int len = 0; 3315 + char *p = page; 3316 + int do_lock = !binder_debug_no_lock; 3317 + 3318 + if (off) 3319 + return 0; 3320 + 3321 + if (do_lock) 3322 + mutex_lock(&binder_lock); 3323 + 3324 + p += snprintf(p, PAGE_SIZE, "binder stats:\n"); 3325 + 3326 + p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats); 3327 + 3328 + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { 3329 + if (p >= page + PAGE_SIZE) 3330 + break; 3331 + p = print_binder_proc_stats(p, page + PAGE_SIZE, proc); 3332 + } 3333 + if (do_lock) 3334 + mutex_unlock(&binder_lock); 3335 + if (p > page + PAGE_SIZE) 3336 + p = page + PAGE_SIZE; 3337 + 3338 + *start = page + off; 3339 + 3340 + len = p - page; 3341 + if (len > off) 3342 + len -= off; 3343 + else 3344 + len = 0; 3345 + 3346 + return len < count ? len : count; 3347 + } 3348 + 3349 + static int binder_read_proc_transactions( 3350 + char *page, char **start, off_t off, int count, int *eof, void *data) 3351 + { 3352 + struct binder_proc *proc; 3353 + struct hlist_node *pos; 3354 + int len = 0; 3355 + char *buf = page; 3356 + char *end = page + PAGE_SIZE; 3357 + int do_lock = !binder_debug_no_lock; 3358 + 3359 + if (off) 3360 + return 0; 3361 + 3362 + if (do_lock) 3363 + mutex_lock(&binder_lock); 3364 + 3365 + buf += snprintf(buf, end - buf, "binder transactions:\n"); 3366 + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { 3367 + if (buf >= end) 3368 + break; 3369 + buf = print_binder_proc(buf, end, proc, 0); 3370 + } 3371 + if (do_lock) 3372 + mutex_unlock(&binder_lock); 3373 + if (buf > page + PAGE_SIZE) 3374 + buf = page + PAGE_SIZE; 3375 + 3376 + *start = page + off; 3377 + 3378 + len = buf - page; 3379 + if (len > off) 3380 + len -= off; 3381 + else 3382 + len = 0; 3383 + 3384 + return len < count ? len : count; 3385 + } 3386 + 3387 + static int binder_read_proc_proc( 3388 + char *page, char **start, off_t off, int count, int *eof, void *data) 3389 + { 3390 + struct binder_proc *proc = data; 3391 + int len = 0; 3392 + char *p = page; 3393 + int do_lock = !binder_debug_no_lock; 3394 + 3395 + if (off) 3396 + return 0; 3397 + 3398 + if (do_lock) 3399 + mutex_lock(&binder_lock); 3400 + p += snprintf(p, PAGE_SIZE, "binder proc state:\n"); 3401 + p = print_binder_proc(p, page + PAGE_SIZE, proc, 1); 3402 + if (do_lock) 3403 + mutex_unlock(&binder_lock); 3404 + 3405 + if (p > page + PAGE_SIZE) 3406 + p = page + PAGE_SIZE; 3407 + *start = page + off; 3408 + 3409 + len = p - page; 3410 + if (len > off) 3411 + len -= off; 3412 + else 3413 + len = 0; 3414 + 3415 + return len < count ? len : count; 3416 + } 3417 + 3418 + static char *print_binder_transaction_log_entry(char *buf, char *end, struct binder_transaction_log_entry *e) 3419 + { 3420 + buf += snprintf(buf, end - buf, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 3421 + e->debug_id, (e->call_type == 2) ? "reply" : 3422 + ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3423 + e->from_thread, e->to_proc, e->to_thread, e->to_node, 3424 + e->target_handle, e->data_size, e->offsets_size); 3425 + return buf; 3426 + } 3427 + 3428 + static int binder_read_proc_transaction_log( 3429 + char *page, char **start, off_t off, int count, int *eof, void *data) 3430 + { 3431 + struct binder_transaction_log *log = data; 3432 + int len = 0; 3433 + int i; 3434 + char *buf = page; 3435 + char *end = page + PAGE_SIZE; 3436 + 3437 + if (off) 3438 + return 0; 3439 + 3440 + if (log->full) { 3441 + for (i = log->next; i < ARRAY_SIZE(log->entry); i++) { 3442 + if (buf >= end) 3443 + break; 3444 + buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]); 3445 + } 3446 + } 3447 + for (i = 0; i < log->next; i++) { 3448 + if (buf >= end) 3449 + break; 3450 + buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]); 3451 + } 3452 + 3453 + *start = page + off; 3454 + 3455 + len = buf - page; 3456 + if (len > off) 3457 + len -= off; 3458 + else 3459 + len = 0; 3460 + 3461 + return len < count ? len : count; 3462 + } 3463 + 3464 + static struct file_operations binder_fops = { 3465 + .owner = THIS_MODULE, 3466 + .poll = binder_poll, 3467 + .unlocked_ioctl = binder_ioctl, 3468 + .mmap = binder_mmap, 3469 + .open = binder_open, 3470 + .flush = binder_flush, 3471 + .release = binder_release, 3472 + }; 3473 + 3474 + static struct miscdevice binder_miscdev = { 3475 + .minor = MISC_DYNAMIC_MINOR, 3476 + .name = "binder", 3477 + .fops = &binder_fops 3478 + }; 3479 + 3480 + static int __init binder_init(void) 3481 + { 3482 + int ret; 3483 + 3484 + binder_proc_dir_entry_root = proc_mkdir("binder", NULL); 3485 + if (binder_proc_dir_entry_root) 3486 + binder_proc_dir_entry_proc = proc_mkdir("proc", binder_proc_dir_entry_root); 3487 + ret = misc_register(&binder_miscdev); 3488 + if (binder_proc_dir_entry_root) { 3489 + create_proc_read_entry("state", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_state, NULL); 3490 + create_proc_read_entry("stats", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_stats, NULL); 3491 + create_proc_read_entry("transactions", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transactions, NULL); 3492 + create_proc_read_entry("transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log); 3493 + create_proc_read_entry("failed_transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log_failed); 3494 + } 3495 + return ret; 3496 + } 3497 + 3498 + device_initcall(binder_init); 3499 +
+330
drivers/staging/android/binder.h
··· 1 + /* 2 + * Copyright (C) 2008 Google, Inc. 3 + * 4 + * Based on, but no longer compatible with, the original 5 + * OpenBinder.org binder driver interface, which is: 6 + * 7 + * Copyright (c) 2005 Palmsource, Inc. 8 + * 9 + * This software is licensed under the terms of the GNU General Public 10 + * License version 2, as published by the Free Software Foundation, and 11 + * may be copied, distributed, and modified under those terms. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + */ 19 + 20 + #ifndef _LINUX_BINDER_H 21 + #define _LINUX_BINDER_H 22 + 23 + #include <linux/ioctl.h> 24 + 25 + #define B_PACK_CHARS(c1, c2, c3, c4) \ 26 + ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) 27 + #define B_TYPE_LARGE 0x85 28 + 29 + enum { 30 + BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), 31 + BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), 32 + BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 33 + BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 34 + BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 35 + }; 36 + 37 + enum { 38 + FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 39 + FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 40 + }; 41 + 42 + /* 43 + * This is the flattened representation of a Binder object for transfer 44 + * between processes. The 'offsets' supplied as part of a binder transaction 45 + * contains offsets into the data where these structures occur. The Binder 46 + * driver takes care of re-writing the structure type and data as it moves 47 + * between processes. 48 + */ 49 + struct flat_binder_object { 50 + /* 8 bytes for large_flat_header. */ 51 + unsigned long type; 52 + unsigned long flags; 53 + 54 + /* 8 bytes of data. */ 55 + union { 56 + void *binder; /* local object */ 57 + signed long handle; /* remote object */ 58 + }; 59 + 60 + /* extra data associated with local object */ 61 + void *cookie; 62 + }; 63 + 64 + /* 65 + * On 64-bit platforms where user code may run in 32-bits the driver must 66 + * translate the buffer (and local binder) addresses apropriately. 67 + */ 68 + 69 + struct binder_write_read { 70 + signed long write_size; /* bytes to write */ 71 + signed long write_consumed; /* bytes consumed by driver */ 72 + unsigned long write_buffer; 73 + signed long read_size; /* bytes to read */ 74 + signed long read_consumed; /* bytes consumed by driver */ 75 + unsigned long read_buffer; 76 + }; 77 + 78 + /* Use with BINDER_VERSION, driver fills in fields. */ 79 + struct binder_version { 80 + /* driver protocol version -- increment with incompatible change */ 81 + signed long protocol_version; 82 + }; 83 + 84 + /* This is the current protocol version. */ 85 + #define BINDER_CURRENT_PROTOCOL_VERSION 7 86 + 87 + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 88 + #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) 89 + #define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) 90 + #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) 91 + #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) 92 + #define BINDER_THREAD_EXIT _IOW('b', 8, int) 93 + #define BINDER_VERSION _IOWR('b', 9, struct binder_version) 94 + 95 + /* 96 + * NOTE: Two special error codes you should check for when calling 97 + * in to the driver are: 98 + * 99 + * EINTR -- The operation has been interupted. This should be 100 + * handled by retrying the ioctl() until a different error code 101 + * is returned. 102 + * 103 + * ECONNREFUSED -- The driver is no longer accepting operations 104 + * from your process. That is, the process is being destroyed. 105 + * You should handle this by exiting from your process. Note 106 + * that once this error code is returned, all further calls to 107 + * the driver from any thread will return this same code. 108 + */ 109 + 110 + enum transaction_flags { 111 + TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ 112 + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ 113 + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ 114 + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ 115 + }; 116 + 117 + struct binder_transaction_data { 118 + /* The first two are only used for bcTRANSACTION and brTRANSACTION, 119 + * identifying the target and contents of the transaction. 120 + */ 121 + union { 122 + size_t handle; /* target descriptor of command transaction */ 123 + void *ptr; /* target descriptor of return transaction */ 124 + } target; 125 + void *cookie; /* target object cookie */ 126 + unsigned int code; /* transaction command */ 127 + 128 + /* General information about the transaction. */ 129 + unsigned int flags; 130 + pid_t sender_pid; 131 + uid_t sender_euid; 132 + size_t data_size; /* number of bytes of data */ 133 + size_t offsets_size; /* number of bytes of offsets */ 134 + 135 + /* If this transaction is inline, the data immediately 136 + * follows here; otherwise, it ends with a pointer to 137 + * the data buffer. 138 + */ 139 + union { 140 + struct { 141 + /* transaction data */ 142 + const void *buffer; 143 + /* offsets from buffer to flat_binder_object structs */ 144 + const void *offsets; 145 + } ptr; 146 + uint8_t buf[8]; 147 + } data; 148 + }; 149 + 150 + struct binder_ptr_cookie { 151 + void *ptr; 152 + void *cookie; 153 + }; 154 + 155 + struct binder_pri_desc { 156 + int priority; 157 + int desc; 158 + }; 159 + 160 + struct binder_pri_ptr_cookie { 161 + int priority; 162 + void *ptr; 163 + void *cookie; 164 + }; 165 + 166 + enum BinderDriverReturnProtocol { 167 + BR_ERROR = _IOR('r', 0, int), 168 + /* 169 + * int: error code 170 + */ 171 + 172 + BR_OK = _IO('r', 1), 173 + /* No parameters! */ 174 + 175 + BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 176 + BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 177 + /* 178 + * binder_transaction_data: the received command. 179 + */ 180 + 181 + BR_ACQUIRE_RESULT = _IOR('r', 4, int), 182 + /* 183 + * not currently supported 184 + * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. 185 + * Else the remote object has acquired a primary reference. 186 + */ 187 + 188 + BR_DEAD_REPLY = _IO('r', 5), 189 + /* 190 + * The target of the last transaction (either a bcTRANSACTION or 191 + * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. 192 + */ 193 + 194 + BR_TRANSACTION_COMPLETE = _IO('r', 6), 195 + /* 196 + * No parameters... always refers to the last transaction requested 197 + * (including replies). Note that this will be sent even for 198 + * asynchronous transactions. 199 + */ 200 + 201 + BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), 202 + BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), 203 + BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), 204 + BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), 205 + /* 206 + * void *: ptr to binder 207 + * void *: cookie for binder 208 + */ 209 + 210 + BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), 211 + /* 212 + * not currently supported 213 + * int: priority 214 + * void *: ptr to binder 215 + * void *: cookie for binder 216 + */ 217 + 218 + BR_NOOP = _IO('r', 12), 219 + /* 220 + * No parameters. Do nothing and examine the next command. It exists 221 + * primarily so that we can replace it with a BR_SPAWN_LOOPER command. 222 + */ 223 + 224 + BR_SPAWN_LOOPER = _IO('r', 13), 225 + /* 226 + * No parameters. The driver has determined that a process has no 227 + * threads waiting to service incomming transactions. When a process 228 + * receives this command, it must spawn a new service thread and 229 + * register it via bcENTER_LOOPER. 230 + */ 231 + 232 + BR_FINISHED = _IO('r', 14), 233 + /* 234 + * not currently supported 235 + * stop threadpool thread 236 + */ 237 + 238 + BR_DEAD_BINDER = _IOR('r', 15, void *), 239 + /* 240 + * void *: cookie 241 + */ 242 + BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), 243 + /* 244 + * void *: cookie 245 + */ 246 + 247 + BR_FAILED_REPLY = _IO('r', 17), 248 + /* 249 + * The the last transaction (either a bcTRANSACTION or 250 + * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. 251 + */ 252 + }; 253 + 254 + enum BinderDriverCommandProtocol { 255 + BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), 256 + BC_REPLY = _IOW('c', 1, struct binder_transaction_data), 257 + /* 258 + * binder_transaction_data: the sent command. 259 + */ 260 + 261 + BC_ACQUIRE_RESULT = _IOW('c', 2, int), 262 + /* 263 + * not currently supported 264 + * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. 265 + * Else you have acquired a primary reference on the object. 266 + */ 267 + 268 + BC_FREE_BUFFER = _IOW('c', 3, int), 269 + /* 270 + * void *: ptr to transaction data received on a read 271 + */ 272 + 273 + BC_INCREFS = _IOW('c', 4, int), 274 + BC_ACQUIRE = _IOW('c', 5, int), 275 + BC_RELEASE = _IOW('c', 6, int), 276 + BC_DECREFS = _IOW('c', 7, int), 277 + /* 278 + * int: descriptor 279 + */ 280 + 281 + BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), 282 + BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), 283 + /* 284 + * void *: ptr to binder 285 + * void *: cookie for binder 286 + */ 287 + 288 + BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), 289 + /* 290 + * not currently supported 291 + * int: priority 292 + * int: descriptor 293 + */ 294 + 295 + BC_REGISTER_LOOPER = _IO('c', 11), 296 + /* 297 + * No parameters. 298 + * Register a spawned looper thread with the device. 299 + */ 300 + 301 + BC_ENTER_LOOPER = _IO('c', 12), 302 + BC_EXIT_LOOPER = _IO('c', 13), 303 + /* 304 + * No parameters. 305 + * These two commands are sent as an application-level thread 306 + * enters and exits the binder loop, respectively. They are 307 + * used so the binder can have an accurate count of the number 308 + * of looping threads it has available. 309 + */ 310 + 311 + BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), 312 + /* 313 + * void *: ptr to binder 314 + * void *: cookie 315 + */ 316 + 317 + BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), 318 + /* 319 + * void *: ptr to binder 320 + * void *: cookie 321 + */ 322 + 323 + BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), 324 + /* 325 + * void *: cookie 326 + */ 327 + }; 328 + 329 + #endif /* _LINUX_BINDER_H */ 330 +