Merge tag 'char-misc-6.19-final' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull binder fixes from Greg KH:
"Here are some small, last-minute binder C and Rust driver fixes for
reported issues. They include a number of fixes for reported crashes
and other problems.

All of these have been in linux-next this week, and longer"

* tag 'char-misc-6.19-final' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
binderfs: fix ida_alloc_max() upper bound
rust_binderfs: fix ida_alloc_max() upper bound
binder: fix BR_FROZEN_REPLY error log
rust_binder: add additional alignment checks
binder: fix UAF in binder_netlink_report()
rust_binder: correctly handle FDA objects of length zero

+94 -50
+16 -3
drivers/android/binder.c
··· 2991 * @t: the binder transaction that failed 2992 * @data_size: the user provided data size for the transaction 2993 * @error: enum binder_driver_return_protocol returned to sender 2994 */ 2995 static void binder_netlink_report(struct binder_proc *proc, 2996 struct binder_transaction *t, ··· 3784 goto err_dead_proc_or_thread; 3785 } 3786 } else { 3787 BUG_ON(target_node == NULL); 3788 BUG_ON(t->buffer->async_transaction != 1); 3789 return_error = binder_proc_transaction(t, target_proc, NULL); ··· 3802 */ 3803 if (return_error == BR_TRANSACTION_PENDING_FROZEN) { 3804 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; 3805 - binder_netlink_report(proc, t, tr->data_size, 3806 return_error); 3807 } 3808 binder_enqueue_thread_work(thread, tcomplete); ··· 3824 return; 3825 3826 err_dead_proc_or_thread: 3827 - binder_txn_error("%d:%d dead process or thread\n", 3828 - thread->pid, proc->pid); 3829 return_error_line = __LINE__; 3830 binder_dequeue_work(proc, tcomplete); 3831 err_translate_failed:
··· 2991 * @t: the binder transaction that failed 2992 * @data_size: the user provided data size for the transaction 2993 * @error: enum binder_driver_return_protocol returned to sender 2994 + * 2995 + * Note that t->buffer is not safe to access here, as it may have been 2996 + * released (or not yet allocated). Callers should guarantee all the 2997 + * transaction items used here are safe to access. 2998 */ 2999 static void binder_netlink_report(struct binder_proc *proc, 3000 struct binder_transaction *t, ··· 3780 goto err_dead_proc_or_thread; 3781 } 3782 } else { 3783 + /* 3784 + * Make a transaction copy. It is not safe to access 't' after 3785 + * binder_proc_transaction() reported a pending frozen. The 3786 + * target could thaw and consume the transaction at any point. 3787 + * Instead, use a safe 't_copy' for binder_netlink_report(). 3788 + */ 3789 + struct binder_transaction t_copy = *t; 3790 + 3791 BUG_ON(target_node == NULL); 3792 BUG_ON(t->buffer->async_transaction != 1); 3793 return_error = binder_proc_transaction(t, target_proc, NULL); ··· 3790 */ 3791 if (return_error == BR_TRANSACTION_PENDING_FROZEN) { 3792 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; 3793 + binder_netlink_report(proc, &t_copy, tr->data_size, 3794 return_error); 3795 } 3796 binder_enqueue_thread_work(thread, tcomplete); ··· 3812 return; 3813 3814 err_dead_proc_or_thread: 3815 + binder_txn_error("%d:%d %s process or thread\n", 3816 + proc->pid, thread->pid, 3817 + return_error == BR_FROZEN_REPLY ? "frozen" : "dead"); 3818 return_error_line = __LINE__; 3819 binder_dequeue_work(proc, tcomplete); 3820 err_translate_failed:
+4 -4
drivers/android/binder/rust_binderfs.c
··· 132 mutex_lock(&binderfs_minors_mutex); 133 if (++info->device_count <= info->mount_opts.max) 134 minor = ida_alloc_max(&binderfs_minors, 135 - use_reserve ? BINDERFS_MAX_MINOR : 136 - BINDERFS_MAX_MINOR_CAPPED, 137 GFP_KERNEL); 138 else 139 minor = -ENOSPC; ··· 399 /* Reserve a new minor number for the new device. */ 400 mutex_lock(&binderfs_minors_mutex); 401 minor = ida_alloc_max(&binderfs_minors, 402 - use_reserve ? BINDERFS_MAX_MINOR : 403 - BINDERFS_MAX_MINOR_CAPPED, 404 GFP_KERNEL); 405 mutex_unlock(&binderfs_minors_mutex); 406 if (minor < 0) {
··· 132 mutex_lock(&binderfs_minors_mutex); 133 if (++info->device_count <= info->mount_opts.max) 134 minor = ida_alloc_max(&binderfs_minors, 135 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 136 + BINDERFS_MAX_MINOR_CAPPED - 1, 137 GFP_KERNEL); 138 else 139 minor = -ENOSPC; ··· 399 /* Reserve a new minor number for the new device. */ 400 mutex_lock(&binderfs_minors_mutex); 401 minor = ida_alloc_max(&binderfs_minors, 402 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 403 + BINDERFS_MAX_MINOR_CAPPED - 1, 404 GFP_KERNEL); 405 mutex_unlock(&binderfs_minors_mutex); 406 if (minor < 0) {
+70 -39
drivers/android/binder/thread.rs
··· 39 sync::atomic::{AtomicU32, Ordering}, 40 }; 41 42 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` 43 /// call and is discarded when it returns. 44 struct ScatterGatherState { ··· 73 } 74 75 /// This entry specifies that a fixup should happen at `target_offset` of the 76 - /// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object` 77 - /// and is applied later. Otherwise if `skip` is zero, then the size of the 78 - /// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer. 79 - struct PointerFixupEntry { 80 - /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup. 81 - skip: usize, 82 - /// The translated pointer to write when `skip` is zero. 83 - pointer_value: u64, 84 - /// The offset at which the value should be written. The offset is relative 85 - /// to the original buffer. 86 - target_offset: usize, 87 } 88 89 /// Return type of `apply_and_validate_fixup_in_parent`. ··· 773 774 parent_entry.fixup_min_offset = info.new_min_offset; 775 parent_entry.pointer_fixups.push( 776 - PointerFixupEntry { 777 - skip: 0, 778 pointer_value: buffer_ptr_in_user_space, 779 target_offset: info.target_offset, 780 }, ··· 799 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; 800 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?; 801 802 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; 803 view.alloc.info_add_fd_reserve(num_fds)?; 804 ··· 817 } 818 }; 819 820 parent_entry.fixup_min_offset = info.new_min_offset; 821 parent_entry 822 .pointer_fixups 823 .push( 824 - PointerFixupEntry { 825 skip: fds_len, 826 - pointer_value: 0, 827 target_offset: info.target_offset, 828 }, 829 GFP_KERNEL, ··· 837 .sender_uaddr 838 .checked_add(parent_offset) 839 .ok_or(EINVAL)?; 840 let mut fda_bytes = KVec::new(); 841 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) 842 .read_all(&mut fda_bytes, GFP_KERNEL)?; ··· 889 let mut reader = 890 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader(); 891 for fixup in &mut sg_entry.pointer_fixups { 892 - let fixup_len = if fixup.skip == 0 { 893 - size_of::<u64>() 894 - } else { 895 - fixup.skip 896 }; 897 898 - let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?; 899 - if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end { 900 pr_warn!( 901 "Fixups oob {} {} {} {}", 902 - fixup.target_offset, 903 end_of_previous_fixup, 904 offset_end, 905 target_offset_end ··· 912 } 913 914 let copy_off = end_of_previous_fixup; 915 - let copy_len = fixup.target_offset - end_of_previous_fixup; 916 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 917 pr_warn!("Failed copying into alloc: {:?}", err); 918 return Err(err.into()); 919 } 920 - if fixup.skip == 0 { 921 - let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value); 922 if let Err(err) = res { 923 pr_warn!("Failed copying ptr into alloc: {:?}", err); 924 return Err(err.into()); ··· 971 972 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; 973 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; 974 - let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 975 - let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?; 976 - let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 977 - let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?; 978 let aligned_secctx_size = match secctx.as_ref() { 979 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, 980 None => 0, 981 }; 982 983 // This guarantees that at least `sizeof(usize)` bytes will be allocated. 984 let len = usize::max( 985 aligned_data_size 986 - .checked_add(aligned_offsets_size) 987 - .and_then(|sum| sum.checked_add(aligned_buffers_size)) 988 .and_then(|sum| sum.checked_add(aligned_secctx_size)) 989 .ok_or(ENOMEM)?, 990 - size_of::<usize>(), 991 ); 992 - let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size; 993 let mut alloc = 994 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { 995 Ok(alloc) => alloc, ··· 1026 } 1027 1028 let offsets_start = aligned_data_size; 1029 - let offsets_end = aligned_data_size + aligned_offsets_size; 1030 1031 // This state is used for BINDER_TYPE_PTR objects. 1032 let sg_state = sg_state.insert(ScatterGatherState { 1033 unused_buffer_space: UnusedBufferSpace { 1034 offset: offsets_end, 1035 - limit: len, 1036 }, 1037 sg_entries: KVec::new(), 1038 ancestors: KVec::new(), ··· 1041 // Traverse the objects specified. 1042 let mut view = AllocationView::new(&mut alloc, data_size); 1043 for (index, index_offset) in (offsets_start..offsets_end) 1044 - .step_by(size_of::<usize>()) 1045 .enumerate() 1046 { 1047 - let offset = view.alloc.read(index_offset)?; 1048 1049 - if offset < end_of_previous_object { 1050 pr_warn!("Got transaction with invalid offset."); 1051 return Err(EINVAL.into()); 1052 } ··· 1082 } 1083 1084 // Update the indexes containing objects to clean up. 1085 - let offset_after_object = index_offset + size_of::<usize>(); 1086 view.alloc 1087 .set_info_offsets(offsets_start..offset_after_object); 1088 }
··· 39 sync::atomic::{AtomicU32, Ordering}, 40 }; 41 42 + fn is_aligned(value: usize, to: usize) -> bool { 43 + value % to == 0 44 + } 45 + 46 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` 47 /// call and is discarded when it returns. 48 struct ScatterGatherState { ··· 69 } 70 71 /// This entry specifies that a fixup should happen at `target_offset` of the 72 + /// buffer. 73 + enum PointerFixupEntry { 74 + /// A fixup for a `binder_buffer_object`. 75 + Fixup { 76 + /// The translated pointer to write. 77 + pointer_value: u64, 78 + /// The offset at which the value should be written. The offset is relative 79 + /// to the original buffer. 80 + target_offset: usize, 81 + }, 82 + /// A skip for a `binder_fd_array_object`. 83 + Skip { 84 + /// The number of bytes to skip. 85 + skip: usize, 86 + /// The offset at which the skip should happen. The offset is relative 87 + /// to the original buffer. 88 + target_offset: usize, 89 + }, 90 } 91 92 /// Return type of `apply_and_validate_fixup_in_parent`. ··· 762 763 parent_entry.fixup_min_offset = info.new_min_offset; 764 parent_entry.pointer_fixups.push( 765 + PointerFixupEntry::Fixup { 766 pointer_value: buffer_ptr_in_user_space, 767 target_offset: info.target_offset, 768 }, ··· 789 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; 790 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?; 791 792 + if !is_aligned(parent_offset, size_of::<u32>()) { 793 + return Err(EINVAL.into()); 794 + } 795 + 796 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; 797 view.alloc.info_add_fd_reserve(num_fds)?; 798 ··· 803 } 804 }; 805 806 + if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) { 807 + return Err(EINVAL.into()); 808 + } 809 + 810 parent_entry.fixup_min_offset = info.new_min_offset; 811 parent_entry 812 .pointer_fixups 813 .push( 814 + PointerFixupEntry::Skip { 815 skip: fds_len, 816 target_offset: info.target_offset, 817 }, 818 GFP_KERNEL, ··· 820 .sender_uaddr 821 .checked_add(parent_offset) 822 .ok_or(EINVAL)?; 823 + 824 let mut fda_bytes = KVec::new(); 825 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) 826 .read_all(&mut fda_bytes, GFP_KERNEL)?; ··· 871 let mut reader = 872 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader(); 873 for fixup in &mut sg_entry.pointer_fixups { 874 + let (fixup_len, fixup_offset) = match fixup { 875 + PointerFixupEntry::Fixup { target_offset, .. } => { 876 + (size_of::<u64>(), *target_offset) 877 + } 878 + PointerFixupEntry::Skip { 879 + skip, 880 + target_offset, 881 + } => (*skip, *target_offset), 882 }; 883 884 + let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?; 885 + if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end { 886 pr_warn!( 887 "Fixups oob {} {} {} {}", 888 + fixup_offset, 889 end_of_previous_fixup, 890 offset_end, 891 target_offset_end ··· 890 } 891 892 let copy_off = end_of_previous_fixup; 893 + let copy_len = fixup_offset - end_of_previous_fixup; 894 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 895 pr_warn!("Failed copying into alloc: {:?}", err); 896 return Err(err.into()); 897 } 898 + if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup { 899 + let res = alloc.write::<u64>(fixup_offset, pointer_value); 900 if let Err(err) = res { 901 pr_warn!("Failed copying ptr into alloc: {:?}", err); 902 return Err(err.into()); ··· 949 950 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; 951 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; 952 + let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 953 + let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 954 let aligned_secctx_size = match secctx.as_ref() { 955 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, 956 None => 0, 957 }; 958 959 + if !is_aligned(offsets_size, size_of::<u64>()) { 960 + return Err(EINVAL.into()); 961 + } 962 + if !is_aligned(buffers_size, size_of::<u64>()) { 963 + return Err(EINVAL.into()); 964 + } 965 + 966 // This guarantees that at least `sizeof(usize)` bytes will be allocated. 967 let len = usize::max( 968 aligned_data_size 969 + .checked_add(offsets_size) 970 + .and_then(|sum| sum.checked_add(buffers_size)) 971 .and_then(|sum| sum.checked_add(aligned_secctx_size)) 972 .ok_or(ENOMEM)?, 973 + size_of::<u64>(), 974 ); 975 + let secctx_off = aligned_data_size + offsets_size + buffers_size; 976 let mut alloc = 977 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { 978 Ok(alloc) => alloc, ··· 999 } 1000 1001 let offsets_start = aligned_data_size; 1002 + let offsets_end = aligned_data_size + offsets_size; 1003 1004 // This state is used for BINDER_TYPE_PTR objects. 1005 let sg_state = sg_state.insert(ScatterGatherState { 1006 unused_buffer_space: UnusedBufferSpace { 1007 offset: offsets_end, 1008 + limit: offsets_end + buffers_size, 1009 }, 1010 sg_entries: KVec::new(), 1011 ancestors: KVec::new(), ··· 1014 // Traverse the objects specified. 1015 let mut view = AllocationView::new(&mut alloc, data_size); 1016 for (index, index_offset) in (offsets_start..offsets_end) 1017 + .step_by(size_of::<u64>()) 1018 .enumerate() 1019 { 1020 + let offset: usize = view 1021 + .alloc 1022 + .read::<u64>(index_offset)? 1023 + .try_into() 1024 + .map_err(|_| EINVAL)?; 1025 1026 + if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) { 1027 pr_warn!("Got transaction with invalid offset."); 1028 return Err(EINVAL.into()); 1029 } ··· 1051 } 1052 1053 // Update the indexes containing objects to clean up. 1054 + let offset_after_object = index_offset + size_of::<u64>(); 1055 view.alloc 1056 .set_info_offsets(offsets_start..offset_after_object); 1057 }
+4 -4
drivers/android/binderfs.c
··· 132 mutex_lock(&binderfs_minors_mutex); 133 if (++info->device_count <= info->mount_opts.max) 134 minor = ida_alloc_max(&binderfs_minors, 135 - use_reserve ? BINDERFS_MAX_MINOR : 136 - BINDERFS_MAX_MINOR_CAPPED, 137 GFP_KERNEL); 138 else 139 minor = -ENOSPC; ··· 408 /* Reserve a new minor number for the new device. */ 409 mutex_lock(&binderfs_minors_mutex); 410 minor = ida_alloc_max(&binderfs_minors, 411 - use_reserve ? BINDERFS_MAX_MINOR : 412 - BINDERFS_MAX_MINOR_CAPPED, 413 GFP_KERNEL); 414 mutex_unlock(&binderfs_minors_mutex); 415 if (minor < 0) {
··· 132 mutex_lock(&binderfs_minors_mutex); 133 if (++info->device_count <= info->mount_opts.max) 134 minor = ida_alloc_max(&binderfs_minors, 135 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 136 + BINDERFS_MAX_MINOR_CAPPED - 1, 137 GFP_KERNEL); 138 else 139 minor = -ENOSPC; ··· 408 /* Reserve a new minor number for the new device. */ 409 mutex_lock(&binderfs_minors_mutex); 410 minor = ida_alloc_max(&binderfs_minors, 411 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 412 + BINDERFS_MAX_MINOR_CAPPED - 1, 413 GFP_KERNEL); 414 mutex_unlock(&binderfs_minors_mutex); 415 if (minor < 0) {