Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

binder: add functions to copy to/from binder buffers

Avoid vm_area when copying to or from binder buffers.
Instead, new copy functions are added that copy from
kernel space to binder buffer space. These use
kmap_atomic() and kunmap_atomic() to create temporary
mappings and then memcpy() is used to copy within
that page.

Also, kmap_atomic() / kunmap_atomic() use the appropriate
cache flushing to support VIVT cache architectures.
Allow binder to build if CPU_CACHE_VIVT is defined.

Several uses of the new functions are added here. More
to follow in subsequent patches.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Todd Kjos and committed by
Greg Kroah-Hartman
8ced0c62 1a7c3d9b

+145 -43
+1 -1
drivers/android/Kconfig
··· 10 10 11 11 config ANDROID_BINDER_IPC 12 12 bool "Android Binder IPC Driver" 13 - depends on MMU && !CPU_CACHE_VIVT 13 + depends on MMU 14 14 default n 15 15 ---help--- 16 16 Binder is used in Android for both communication between processes,
+73 -42
drivers/android/binder.c
··· 2244 2244 off_end = (void *)off_start + buffer->offsets_size; 2245 2245 for (offp = off_start; offp < off_end; offp++) { 2246 2246 struct binder_object_header *hdr; 2247 - size_t object_size = binder_validate_object(buffer, *offp); 2247 + size_t object_size; 2248 + binder_size_t object_offset; 2249 + binder_size_t buffer_offset = (uintptr_t)offp - 2250 + (uintptr_t)buffer->data; 2248 2251 2252 + binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, 2253 + buffer, buffer_offset, 2254 + sizeof(object_offset)); 2255 + object_size = binder_validate_object(buffer, object_offset); 2249 2256 if (object_size == 0) { 2250 2257 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2251 - debug_id, (u64)*offp, buffer->data_size); 2258 + debug_id, (u64)object_offset, buffer->data_size); 2252 2259 continue; 2253 2260 } 2254 - hdr = (struct binder_object_header *)(buffer->data + *offp); 2261 + hdr = (struct binder_object_header *) 2262 + (buffer->data + object_offset); 2255 2263 switch (hdr->type) { 2256 2264 case BINDER_TYPE_BINDER: 2257 2265 case BINDER_TYPE_WEAK_BINDER: { ··· 2367 2359 continue; 2368 2360 } 2369 2361 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2370 - for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2371 - binder_deferred_fd_close(fd_array[fd_index]); 2362 + for (fd_index = 0; fd_index < fda->num_fds; 2363 + fd_index++) { 2364 + u32 fd; 2365 + binder_size_t offset = 2366 + (uintptr_t)&fd_array[fd_index] - 2367 + (uintptr_t)buffer->data; 2368 + 2369 + binder_alloc_copy_from_buffer(&proc->alloc, 2370 + &fd, 2371 + buffer, 2372 + offset, 2373 + sizeof(fd)); 2374 + binder_deferred_fd_close(fd); 2375 + } 2372 2376 } break; 2373 2377 default: 2374 2378 pr_err("transaction release %d bad object type %x\n", ··· 2516 2496 return ret; 2517 2497 } 2518 2498 2519 - static int binder_translate_fd(u32 *fdp, 2499 + static int binder_translate_fd(u32 fd, binder_size_t fd_offset, 2520 2500 struct binder_transaction *t, 2521 2501 struct binder_thread *thread, 2522 2502 struct binder_transaction *in_reply_to) ··· 2527 2507 struct file *file; 2528 2508 int ret = 0; 2529 2509 bool target_allows_fd; 2530 - int fd = *fdp; 2531 2510 2532 2511 if (in_reply_to) 2533 2512 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); ··· 2565 2546 goto err_alloc; 2566 2547 } 2567 2548 fixup->file = file; 2568 - fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; 2549 + fixup->offset = fd_offset; 2569 2550 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2570 2551 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2571 2552 ··· 2617 2598 return -EINVAL; 2618 2599 } 2619 2600 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2620 - int ret = binder_translate_fd(&fd_array[fdi], t, thread, 2621 - in_reply_to); 2601 + u32 fd; 2602 + int ret; 2603 + binder_size_t offset = 2604 + (uintptr_t)&fd_array[fdi] - 2605 + (uintptr_t)t->buffer->data; 2606 + 2607 + binder_alloc_copy_from_buffer(&target_proc->alloc, 2608 + &fd, t->buffer, 2609 + offset, sizeof(fd)); 2610 + ret = binder_translate_fd(fd, offset, t, thread, 2611 + in_reply_to); 2622 2612 if (ret < 0) 2623 2613 return ret; 2624 2614 } ··· 3094 3066 3095 3067 t->security_ctx = (uintptr_t)kptr + 3096 3068 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 3097 - memcpy(kptr, secctx, secctx_sz); 3069 + binder_alloc_copy_to_buffer(&target_proc->alloc, 3070 + t->buffer, buf_offset, 3071 + secctx, secctx_sz); 3098 3072 security_release_secctx(secctx, secctx_sz); 3099 3073 secctx = NULL; 3100 3074 } ··· 3158 3128 off_min = 0; 3159 3129 for (; offp < off_end; offp++) { 3160 3130 struct binder_object_header *hdr; 3161 - size_t object_size = binder_validate_object(t->buffer, *offp); 3131 + size_t object_size; 3132 + binder_size_t object_offset; 3133 + binder_size_t buffer_offset = 3134 + (uintptr_t)offp - (uintptr_t)t->buffer->data; 3162 3135 3163 - if (object_size == 0 || *offp < off_min) { 3136 + binder_alloc_copy_from_buffer(&target_proc->alloc, 3137 + &object_offset, 3138 + t->buffer, 3139 + buffer_offset, 3140 + sizeof(object_offset)); 3141 + object_size = binder_validate_object(t->buffer, object_offset); 3142 + if (object_size == 0 || object_offset < off_min) { 3164 3143 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3165 - proc->pid, thread->pid, (u64)*offp, 3144 + proc->pid, thread->pid, 3145 + (u64)object_offset, 3166 3146 (u64)off_min, 3167 3147 (u64)t->buffer->data_size); 3168 3148 return_error = BR_FAILED_REPLY; ··· 3181 3141 goto err_bad_offset; 3182 3142 } 3183 3143 3184 - hdr = (struct binder_object_header *)(t->buffer->data + *offp); 3185 - off_min = *offp + object_size; 3144 + hdr = (struct binder_object_header *) 3145 + (t->buffer->data + object_offset); 3146 + off_min = object_offset + object_size; 3186 3147 switch (hdr->type) { 3187 3148 case BINDER_TYPE_BINDER: 3188 3149 case BINDER_TYPE_WEAK_BINDER: { ··· 3214 3173 3215 3174 case BINDER_TYPE_FD: { 3216 3175 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3217 - int ret = binder_translate_fd(&fp->fd, t, thread, 3218 - in_reply_to); 3176 + binder_size_t fd_offset = object_offset + 3177 + (uintptr_t)&fp->fd - (uintptr_t)fp; 3178 + int ret = binder_translate_fd(fp->fd, fd_offset, t, 3179 + thread, in_reply_to); 3219 3180 3220 3181 if (ret < 0) { 3221 3182 return_error = BR_FAILED_REPLY; ··· 4010 3967 4011 3968 /** 4012 3969 * binder_apply_fd_fixups() - finish fd translation 3970 + * @proc: binder_proc associated @t->buffer 4013 3971 * @t: binder transaction with list of fd fixups 4014 3972 * 4015 3973 * Now that we are in the context of the transaction target ··· 4022 3978 * fput'ing files that have not been processed and ksys_close'ing 4023 3979 * any fds that have already been allocated. 4024 3980 */ 4025 - static int binder_apply_fd_fixups(struct binder_transaction *t) 3981 + static int binder_apply_fd_fixups(struct binder_proc *proc, 3982 + struct binder_transaction *t) 4026 3983 { 4027 3984 struct binder_txn_fd_fixup *fixup, *tmp; 4028 3985 int ret = 0; 4029 3986 4030 3987 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4031 3988 int fd = get_unused_fd_flags(O_CLOEXEC); 4032 - u32 *fdp; 4033 3989 4034 3990 if (fd < 0) { 4035 3991 binder_debug(BINDER_DEBUG_TRANSACTION, ··· 4044 4000 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4045 4001 fd_install(fd, fixup->file); 4046 4002 fixup->file = NULL; 4047 - fdp = (u32 *)(t->buffer->data + fixup->offset); 4048 - /* 4049 - * This store can cause problems for CPUs with a 4050 - * VIVT cache (eg ARMv5) since the cache cannot 4051 - * detect virtual aliases to the same physical cacheline. 4052 - * To support VIVT, this address and the user-space VA 4053 - * would both need to be flushed. Since this kernel 4054 - * VA is not constructed via page_to_virt(), we can't 4055 - * use flush_dcache_page() on it, so we'd have to use 4056 - * an internal function. If devices with VIVT ever 4057 - * need to run Android, we'll either need to go back 4058 - * to patching the translated fd from the sender side 4059 - * (using the non-standard kernel functions), or rework 4060 - * how the kernel uses the buffer to use page_to_virt() 4061 - * addresses instead of allocating in our own vm area. 4062 - * 4063 - * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT. 4064 - */ 4065 - *fdp = fd; 4003 + binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, 4004 + fixup->offset, &fd, 4005 + sizeof(u32)); 4066 4006 } 4067 4007 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4068 4008 if (fixup->file) { 4069 4009 fput(fixup->file); 4070 4010 } else if (ret) { 4071 - u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); 4011 + u32 fd; 4072 4012 4073 - binder_deferred_fd_close(*fdp); 4013 + binder_alloc_copy_from_buffer(&proc->alloc, &fd, 4014 + t->buffer, fixup->offset, 4015 + sizeof(fd)); 4016 + binder_deferred_fd_close(fd); 4074 4017 } 4075 4018 list_del(&fixup->fixup_entry); 4076 4019 kfree(fixup); ··· 4355 4324 trd->sender_pid = 0; 4356 4325 } 4357 4326 4358 - ret = binder_apply_fd_fixups(t); 4327 + ret = binder_apply_fd_fixups(proc, t); 4359 4328 if (ret) { 4360 4329 struct binder_buffer *buffer = t->buffer; 4361 4330 bool oneway = !!(t->flags & TF_ONE_WAY);
+59
drivers/android/binder_alloc.c
··· 1166 1166 } 1167 1167 return 0; 1168 1168 } 1169 + 1170 + static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc, 1171 + bool to_buffer, 1172 + struct binder_buffer *buffer, 1173 + binder_size_t buffer_offset, 1174 + void *ptr, 1175 + size_t bytes) 1176 + { 1177 + /* All copies must be 32-bit aligned and 32-bit size */ 1178 + BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes)); 1179 + 1180 + while (bytes) { 1181 + unsigned long size; 1182 + struct page *page; 1183 + pgoff_t pgoff; 1184 + void *tmpptr; 1185 + void *base_ptr; 1186 + 1187 + page = binder_alloc_get_page(alloc, buffer, 1188 + buffer_offset, &pgoff); 1189 + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); 1190 + base_ptr = kmap_atomic(page); 1191 + tmpptr = base_ptr + pgoff; 1192 + if (to_buffer) 1193 + memcpy(tmpptr, ptr, size); 1194 + else 1195 + memcpy(ptr, tmpptr, size); 1196 + /* 1197 + * kunmap_atomic() takes care of flushing the cache 1198 + * if this device has VIVT cache arch 1199 + */ 1200 + kunmap_atomic(base_ptr); 1201 + bytes -= size; 1202 + pgoff = 0; 1203 + ptr = ptr + size; 1204 + buffer_offset += size; 1205 + } 1206 + } 1207 + 1208 + void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, 1209 + struct binder_buffer *buffer, 1210 + binder_size_t buffer_offset, 1211 + void *src, 1212 + size_t bytes) 1213 + { 1214 + binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, 1215 + src, bytes); 1216 + } 1217 + 1218 + void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, 1219 + void *dest, 1220 + struct binder_buffer *buffer, 1221 + binder_size_t buffer_offset, 1222 + size_t bytes) 1223 + { 1224 + binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, 1225 + dest, bytes); 1226 + } 1227 +
+12
drivers/android/binder_alloc.h
··· 191 191 const void __user *from, 192 192 size_t bytes); 193 193 194 + void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, 195 + struct binder_buffer *buffer, 196 + binder_size_t buffer_offset, 197 + void *src, 198 + size_t bytes); 199 + 200 + void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, 201 + void *dest, 202 + struct binder_buffer *buffer, 203 + binder_size_t buffer_offset, 204 + size_t bytes); 205 + 194 206 #endif /* _LINUX_BINDER_ALLOC_H */ 195 207