Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

binder: defer copies of pre-patched txn data

BINDER_TYPE_PTR objects point to memory areas in the
source process to be copied into the target buffer
as part of a transaction. This implements a scatter-
gather model where non-contiguous memory in a source
process is "gathered" into a contiguous region in
the target buffer.

The data can include pointers that must be fixed up
to correctly point to the copied data. To avoid making
source process pointers visible to the target process,
this patch defers the copy until the fixups are known
and then copies and fixeups are done together.

There is a special case of BINDER_TYPE_FDA which applies
the fixup later in the target process context. In this
case the user data is skipped (so no untranslated fds
become visible to the target).

Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Todd Kjos <tkjos@google.com>
Link: https://lore.kernel.org/r/20211130185152.437403-5-tkjos@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Todd Kjos and committed by
Greg Kroah-Hartman
09184ae9 656e01f3

+274 -25
+274 -25
drivers/android/binder.c
··· 2233 2233 return ret; 2234 2234 } 2235 2235 2236 - static int binder_translate_fd_array(struct binder_fd_array_object *fda, 2236 + /** 2237 + * struct binder_ptr_fixup - data to be fixed-up in target buffer 2238 + * @offset offset in target buffer to fixup 2239 + * @skip_size bytes to skip in copy (fixup will be written later) 2240 + * @fixup_data data to write at fixup offset 2241 + * @node list node 2242 + * 2243 + * This is used for the pointer fixup list (pf) which is created and consumed 2244 + * during binder_transaction() and is only accessed locally. No 2245 + * locking is necessary. 2246 + * 2247 + * The list is ordered by @offset. 2248 + */ 2249 + struct binder_ptr_fixup { 2250 + binder_size_t offset; 2251 + size_t skip_size; 2252 + binder_uintptr_t fixup_data; 2253 + struct list_head node; 2254 + }; 2255 + 2256 + /** 2257 + * struct binder_sg_copy - scatter-gather data to be copied 2258 + * @offset offset in target buffer 2259 + * @sender_uaddr user address in source buffer 2260 + * @length bytes to copy 2261 + * @node list node 2262 + * 2263 + * This is used for the sg copy list (sgc) which is created and consumed 2264 + * during binder_transaction() and is only accessed locally. No 2265 + * locking is necessary. 2266 + * 2267 + * The list is ordered by @offset. 2268 + */ 2269 + struct binder_sg_copy { 2270 + binder_size_t offset; 2271 + const void __user *sender_uaddr; 2272 + size_t length; 2273 + struct list_head node; 2274 + }; 2275 + 2276 + /** 2277 + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data 2278 + * @alloc: binder_alloc associated with @buffer 2279 + * @buffer: binder buffer in target process 2280 + * @sgc_head: list_head of scatter-gather copy list 2281 + * @pf_head: list_head of pointer fixup list 2282 + * 2283 + * Processes all elements of @sgc_head, applying fixups from @pf_head 2284 + * and copying the scatter-gather data from the source process' user 2285 + * buffer to the target's buffer. It is expected that the list creation 2286 + * and processing all occurs during binder_transaction() so these lists 2287 + * are only accessed in local context. 2288 + * 2289 + * Return: 0=success, else -errno 2290 + */ 2291 + static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, 2292 + struct binder_buffer *buffer, 2293 + struct list_head *sgc_head, 2294 + struct list_head *pf_head) 2295 + { 2296 + int ret = 0; 2297 + struct binder_sg_copy *sgc, *tmpsgc; 2298 + struct binder_ptr_fixup *pf = 2299 + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, 2300 + node); 2301 + 2302 + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2303 + size_t bytes_copied = 0; 2304 + 2305 + while (bytes_copied < sgc->length) { 2306 + size_t copy_size; 2307 + size_t bytes_left = sgc->length - bytes_copied; 2308 + size_t offset = sgc->offset + bytes_copied; 2309 + 2310 + /* 2311 + * We copy up to the fixup (pointed to by pf) 2312 + */ 2313 + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) 2314 + : bytes_left; 2315 + if (!ret && copy_size) 2316 + ret = binder_alloc_copy_user_to_buffer( 2317 + alloc, buffer, 2318 + offset, 2319 + sgc->sender_uaddr + bytes_copied, 2320 + copy_size); 2321 + bytes_copied += copy_size; 2322 + if (copy_size != bytes_left) { 2323 + BUG_ON(!pf); 2324 + /* we stopped at a fixup offset */ 2325 + if (pf->skip_size) { 2326 + /* 2327 + * we are just skipping. This is for 2328 + * BINDER_TYPE_FDA where the translated 2329 + * fds will be fixed up when we get 2330 + * to target context. 2331 + */ 2332 + bytes_copied += pf->skip_size; 2333 + } else { 2334 + /* apply the fixup indicated by pf */ 2335 + if (!ret) 2336 + ret = binder_alloc_copy_to_buffer( 2337 + alloc, buffer, 2338 + pf->offset, 2339 + &pf->fixup_data, 2340 + sizeof(pf->fixup_data)); 2341 + bytes_copied += sizeof(pf->fixup_data); 2342 + } 2343 + list_del(&pf->node); 2344 + kfree(pf); 2345 + pf = list_first_entry_or_null(pf_head, 2346 + struct binder_ptr_fixup, node); 2347 + } 2348 + } 2349 + list_del(&sgc->node); 2350 + kfree(sgc); 2351 + } 2352 + BUG_ON(!list_empty(pf_head)); 2353 + BUG_ON(!list_empty(sgc_head)); 2354 + 2355 + return ret > 0 ? -EINVAL : ret; 2356 + } 2357 + 2358 + /** 2359 + * binder_cleanup_deferred_txn_lists() - free specified lists 2360 + * @sgc_head: list_head of scatter-gather copy list 2361 + * @pf_head: list_head of pointer fixup list 2362 + * 2363 + * Called to clean up @sgc_head and @pf_head if there is an 2364 + * error. 2365 + */ 2366 + static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, 2367 + struct list_head *pf_head) 2368 + { 2369 + struct binder_sg_copy *sgc, *tmpsgc; 2370 + struct binder_ptr_fixup *pf, *tmppf; 2371 + 2372 + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { 2373 + list_del(&sgc->node); 2374 + kfree(sgc); 2375 + } 2376 + list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2377 + list_del(&pf->node); 2378 + kfree(pf); 2379 + } 2380 + } 2381 + 2382 + /** 2383 + * binder_defer_copy() - queue a scatter-gather buffer for copy 2384 + * @sgc_head: list_head of scatter-gather copy list 2385 + * @offset: binder buffer offset in target process 2386 + * @sender_uaddr: user address in source process 2387 + * @length: bytes to copy 2388 + * 2389 + * Specify a scatter-gather block to be copied. The actual copy must 2390 + * be deferred until all the needed fixups are identified and queued. 2391 + * Then the copy and fixups are done together so un-translated values 2392 + * from the source are never visible in the target buffer. 2393 + * 2394 + * We are guaranteed that repeated calls to this function will have 2395 + * monotonically increasing @offset values so the list will naturally 2396 + * be ordered. 2397 + * 2398 + * Return: 0=success, else -errno 2399 + */ 2400 + static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, 2401 + const void __user *sender_uaddr, size_t length) 2402 + { 2403 + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); 2404 + 2405 + if (!bc) 2406 + return -ENOMEM; 2407 + 2408 + bc->offset = offset; 2409 + bc->sender_uaddr = sender_uaddr; 2410 + bc->length = length; 2411 + INIT_LIST_HEAD(&bc->node); 2412 + 2413 + /* 2414 + * We are guaranteed that the deferred copies are in-order 2415 + * so just add to the tail. 2416 + */ 2417 + list_add_tail(&bc->node, sgc_head); 2418 + 2419 + return 0; 2420 + } 2421 + 2422 + /** 2423 + * binder_add_fixup() - queue a fixup to be applied to sg copy 2424 + * @pf_head: list_head of binder ptr fixup list 2425 + * @offset: binder buffer offset in target process 2426 + * @fixup: bytes to be copied for fixup 2427 + * @skip_size: bytes to skip when copying (fixup will be applied later) 2428 + * 2429 + * Add the specified fixup to a list ordered by @offset. When copying 2430 + * the scatter-gather buffers, the fixup will be copied instead of 2431 + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup 2432 + * will be applied later (in target process context), so we just skip 2433 + * the bytes specified by @skip_size. If @skip_size is 0, we copy the 2434 + * value in @fixup. 2435 + * 2436 + * This function is called *mostly* in @offset order, but there are 2437 + * exceptions. Since out-of-order inserts are relatively uncommon, 2438 + * we insert the new element by searching backward from the tail of 2439 + * the list. 2440 + * 2441 + * Return: 0=success, else -errno 2442 + */ 2443 + static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, 2444 + binder_uintptr_t fixup, size_t skip_size) 2445 + { 2446 + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); 2447 + struct binder_ptr_fixup *tmppf; 2448 + 2449 + if (!pf) 2450 + return -ENOMEM; 2451 + 2452 + pf->offset = offset; 2453 + pf->fixup_data = fixup; 2454 + pf->skip_size = skip_size; 2455 + INIT_LIST_HEAD(&pf->node); 2456 + 2457 + /* Fixups are *mostly* added in-order, but there are some 2458 + * exceptions. Look backwards through list for insertion point. 2459 + */ 2460 + list_for_each_entry_reverse(tmppf, pf_head, node) { 2461 + if (tmppf->offset < pf->offset) { 2462 + list_add(&pf->node, &tmppf->node); 2463 + return 0; 2464 + } 2465 + } 2466 + /* 2467 + * if we get here, then the new offset is the lowest so 2468 + * insert at the head 2469 + */ 2470 + list_add(&pf->node, pf_head); 2471 + return 0; 2472 + } 2473 + 2474 + static int binder_translate_fd_array(struct list_head *pf_head, 2475 + struct binder_fd_array_object *fda, 2237 2476 const void __user *sender_ubuffer, 2238 2477 struct binder_buffer_object *parent, 2239 2478 struct binder_buffer_object *sender_uparent, ··· 2484 2245 binder_size_t fda_offset; 2485 2246 const void __user *sender_ufda_base; 2486 2247 struct binder_proc *proc = thread->proc; 2248 + int ret; 2487 2249 2488 2250 fd_buf_size = sizeof(u32) * fda->num_fds; 2489 2251 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { ··· 2516 2276 proc->pid, thread->pid); 2517 2277 return -EINVAL; 2518 2278 } 2279 + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); 2280 + if (ret) 2281 + return ret; 2282 + 2519 2283 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2520 2284 u32 fd; 2521 - int ret; 2522 2285 binder_size_t offset = fda_offset + fdi * sizeof(fd); 2523 2286 binder_size_t sender_uoffset = fdi * sizeof(fd); 2524 2287 ··· 2535 2292 return 0; 2536 2293 } 2537 2294 2538 - static int binder_fixup_parent(struct binder_transaction *t, 2295 + static int binder_fixup_parent(struct list_head *pf_head, 2296 + struct binder_transaction *t, 2539 2297 struct binder_thread *thread, 2540 2298 struct binder_buffer_object *bp, 2541 2299 binder_size_t off_start_offset, ··· 2582 2338 } 2583 2339 buffer_offset = bp->parent_offset + 2584 2340 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 2585 - if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 2586 - &bp->buffer, sizeof(bp->buffer))) { 2587 - binder_user_error("%d:%d got transaction with invalid parent offset\n", 2588 - proc->pid, thread->pid); 2589 - return -EINVAL; 2590 - } 2591 - 2592 - return 0; 2341 + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); 2593 2342 } 2594 2343 2595 2344 /** ··· 2724 2487 int t_debug_id = atomic_inc_return(&binder_last_id); 2725 2488 char *secctx = NULL; 2726 2489 u32 secctx_sz = 0; 2490 + struct list_head sgc_head; 2491 + struct list_head pf_head; 2727 2492 const void __user *user_buffer = (const void __user *) 2728 2493 (uintptr_t)tr->data.ptr.buffer; 2494 + INIT_LIST_HEAD(&sgc_head); 2495 + INIT_LIST_HEAD(&pf_head); 2729 2496 2730 2497 e = binder_transaction_log_add(&binder_transaction_log); 2731 2498 e->debug_id = t_debug_id; ··· 3246 3005 return_error_line = __LINE__; 3247 3006 goto err_bad_parent; 3248 3007 } 3249 - ret = binder_translate_fd_array(fda, user_buffer, 3250 - parent, 3008 + ret = binder_translate_fd_array(&pf_head, fda, 3009 + user_buffer, parent, 3251 3010 &user_object.bbo, t, 3252 3011 thread, in_reply_to); 3253 3012 if (!ret) ··· 3279 3038 return_error_line = __LINE__; 3280 3039 goto err_bad_offset; 3281 3040 } 3282 - if (binder_alloc_copy_user_to_buffer( 3283 - &target_proc->alloc, 3284 - t->buffer, 3285 - sg_buf_offset, 3286 - (const void __user *) 3287 - (uintptr_t)bp->buffer, 3288 - bp->length)) { 3289 - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3290 - proc->pid, thread->pid); 3291 - return_error_param = -EFAULT; 3041 + ret = binder_defer_copy(&sgc_head, sg_buf_offset, 3042 + (const void __user *)(uintptr_t)bp->buffer, 3043 + bp->length); 3044 + if (ret) { 3292 3045 return_error = BR_FAILED_REPLY; 3046 + return_error_param = ret; 3293 3047 return_error_line = __LINE__; 3294 - goto err_copy_data_failed; 3048 + goto err_translate_failed; 3295 3049 } 3296 3050 /* Fixup buffer pointer to target proc address space */ 3297 3051 bp->buffer = (uintptr_t) ··· 3295 3059 3296 3060 num_valid = (buffer_offset - off_start_offset) / 3297 3061 sizeof(binder_size_t); 3298 - ret = binder_fixup_parent(t, thread, bp, 3062 + ret = binder_fixup_parent(&pf_head, t, 3063 + thread, bp, 3299 3064 off_start_offset, 3300 3065 num_valid, 3301 3066 last_fixup_obj_off, ··· 3333 3096 proc->pid, thread->pid); 3334 3097 return_error = BR_FAILED_REPLY; 3335 3098 return_error_param = -EFAULT; 3099 + return_error_line = __LINE__; 3100 + goto err_copy_data_failed; 3101 + } 3102 + 3103 + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, 3104 + &sgc_head, &pf_head); 3105 + if (ret) { 3106 + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3107 + proc->pid, thread->pid); 3108 + return_error = BR_FAILED_REPLY; 3109 + return_error_param = ret; 3336 3110 return_error_line = __LINE__; 3337 3111 goto err_copy_data_failed; 3338 3112 } ··· 3420 3172 err_bad_offset: 3421 3173 err_bad_parent: 3422 3174 err_copy_data_failed: 3175 + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); 3423 3176 binder_free_txn_fixups(t); 3424 3177 trace_binder_transaction_failed_buffer_release(t->buffer); 3425 3178 binder_transaction_buffer_release(target_proc, NULL, t->buffer,