Merge tag 'char-misc-6.19-final' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull binder fixes from Greg KH:
"Here are some small, last-minute binder C and Rust driver fixes for
reported issues. They include a number of fixes for reported crashes
and other problems.

All of these have been in linux-next this week, and longer"

* tag 'char-misc-6.19-final' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
binderfs: fix ida_alloc_max() upper bound
rust_binderfs: fix ida_alloc_max() upper bound
binder: fix BR_FROZEN_REPLY error log
rust_binder: add additional alignment checks
binder: fix UAF in binder_netlink_report()
rust_binder: correctly handle FDA objects of length zero

+94 -50
+16 -3
drivers/android/binder.c
··· 2991 2991 * @t: the binder transaction that failed 2992 2992 * @data_size: the user provided data size for the transaction 2993 2993 * @error: enum binder_driver_return_protocol returned to sender 2994 + * 2995 + * Note that t->buffer is not safe to access here, as it may have been 2996 + * released (or not yet allocated). Callers should guarantee all the 2997 + * transaction items used here are safe to access. 2994 2998 */ 2995 2999 static void binder_netlink_report(struct binder_proc *proc, 2996 3000 struct binder_transaction *t, ··· 3784 3780 goto err_dead_proc_or_thread; 3785 3781 } 3786 3782 } else { 3783 + /* 3784 + * Make a transaction copy. It is not safe to access 't' after 3785 + * binder_proc_transaction() reported a pending frozen. The 3786 + * target could thaw and consume the transaction at any point. 3787 + * Instead, use a safe 't_copy' for binder_netlink_report(). 3788 + */ 3789 + struct binder_transaction t_copy = *t; 3790 + 3787 3791 BUG_ON(target_node == NULL); 3788 3792 BUG_ON(t->buffer->async_transaction != 1); 3789 3793 return_error = binder_proc_transaction(t, target_proc, NULL); ··· 3802 3790 */ 3803 3791 if (return_error == BR_TRANSACTION_PENDING_FROZEN) { 3804 3792 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; 3805 - binder_netlink_report(proc, t, tr->data_size, 3793 + binder_netlink_report(proc, &t_copy, tr->data_size, 3806 3794 return_error); 3807 3795 } 3808 3796 binder_enqueue_thread_work(thread, tcomplete); ··· 3824 3812 return; 3825 3813 3826 3814 err_dead_proc_or_thread: 3827 - binder_txn_error("%d:%d dead process or thread\n", 3828 - thread->pid, proc->pid); 3815 + binder_txn_error("%d:%d %s process or thread\n", 3816 + proc->pid, thread->pid, 3817 + return_error == BR_FROZEN_REPLY ? "frozen" : "dead"); 3829 3818 return_error_line = __LINE__; 3830 3819 binder_dequeue_work(proc, tcomplete); 3831 3820 err_translate_failed:
+4 -4
drivers/android/binder/rust_binderfs.c
··· 132 132 mutex_lock(&binderfs_minors_mutex); 133 133 if (++info->device_count <= info->mount_opts.max) 134 134 minor = ida_alloc_max(&binderfs_minors, 135 - use_reserve ? BINDERFS_MAX_MINOR : 136 - BINDERFS_MAX_MINOR_CAPPED, 135 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 136 + BINDERFS_MAX_MINOR_CAPPED - 1, 137 137 GFP_KERNEL); 138 138 else 139 139 minor = -ENOSPC; ··· 399 399 /* Reserve a new minor number for the new device. */ 400 400 mutex_lock(&binderfs_minors_mutex); 401 401 minor = ida_alloc_max(&binderfs_minors, 402 - use_reserve ? BINDERFS_MAX_MINOR : 403 - BINDERFS_MAX_MINOR_CAPPED, 402 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 403 + BINDERFS_MAX_MINOR_CAPPED - 1, 404 404 GFP_KERNEL); 405 405 mutex_unlock(&binderfs_minors_mutex); 406 406 if (minor < 0) {
+70 -39
drivers/android/binder/thread.rs
··· 39 39 sync::atomic::{AtomicU32, Ordering}, 40 40 }; 41 41 42 + fn is_aligned(value: usize, to: usize) -> bool { 43 + value % to == 0 44 + } 45 + 42 46 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` 43 47 /// call and is discarded when it returns. 44 48 struct ScatterGatherState { ··· 73 69 } 74 70 75 71 /// This entry specifies that a fixup should happen at `target_offset` of the 76 - /// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object` 77 - /// and is applied later. Otherwise if `skip` is zero, then the size of the 78 - /// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer. 79 - struct PointerFixupEntry { 80 - /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup. 81 - skip: usize, 82 - /// The translated pointer to write when `skip` is zero. 83 - pointer_value: u64, 84 - /// The offset at which the value should be written. The offset is relative 85 - /// to the original buffer. 86 - target_offset: usize, 72 + /// buffer. 73 + enum PointerFixupEntry { 74 + /// A fixup for a `binder_buffer_object`. 75 + Fixup { 76 + /// The translated pointer to write. 77 + pointer_value: u64, 78 + /// The offset at which the value should be written. The offset is relative 79 + /// to the original buffer. 80 + target_offset: usize, 81 + }, 82 + /// A skip for a `binder_fd_array_object`. 83 + Skip { 84 + /// The number of bytes to skip. 85 + skip: usize, 86 + /// The offset at which the skip should happen. The offset is relative 87 + /// to the original buffer. 88 + target_offset: usize, 89 + }, 87 90 } 88 91 89 92 /// Return type of `apply_and_validate_fixup_in_parent`. ··· 773 762 774 763 parent_entry.fixup_min_offset = info.new_min_offset; 775 764 parent_entry.pointer_fixups.push( 776 - PointerFixupEntry { 777 - skip: 0, 765 + PointerFixupEntry::Fixup { 778 766 pointer_value: buffer_ptr_in_user_space, 779 767 target_offset: info.target_offset, 780 768 }, ··· 799 789 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; 800 790 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?; 801 791 792 + if !is_aligned(parent_offset, size_of::<u32>()) { 793 + return Err(EINVAL.into()); 794 + } 795 + 802 796 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; 803 797 view.alloc.info_add_fd_reserve(num_fds)?; 804 798 ··· 817 803 } 818 804 }; 819 805 806 + if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) { 807 + return Err(EINVAL.into()); 808 + } 809 + 820 810 parent_entry.fixup_min_offset = info.new_min_offset; 821 811 parent_entry 822 812 .pointer_fixups 823 813 .push( 824 - PointerFixupEntry { 814 + PointerFixupEntry::Skip { 825 815 skip: fds_len, 826 - pointer_value: 0, 827 816 target_offset: info.target_offset, 828 817 }, 829 818 GFP_KERNEL, ··· 837 820 .sender_uaddr 838 821 .checked_add(parent_offset) 839 822 .ok_or(EINVAL)?; 823 + 840 824 let mut fda_bytes = KVec::new(); 841 825 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) 842 826 .read_all(&mut fda_bytes, GFP_KERNEL)?; ··· 889 871 let mut reader = 890 872 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader(); 891 873 for fixup in &mut sg_entry.pointer_fixups { 892 - let fixup_len = if fixup.skip == 0 { 893 - size_of::<u64>() 894 - } else { 895 - fixup.skip 874 + let (fixup_len, fixup_offset) = match fixup { 875 + PointerFixupEntry::Fixup { target_offset, .. } => { 876 + (size_of::<u64>(), *target_offset) 877 + } 878 + PointerFixupEntry::Skip { 879 + skip, 880 + target_offset, 881 + } => (*skip, *target_offset), 896 882 }; 897 883 898 - let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?; 899 - if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end { 884 + let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?; 885 + if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end { 900 886 pr_warn!( 901 887 "Fixups oob {} {} {} {}", 902 - fixup.target_offset, 888 + fixup_offset, 903 889 end_of_previous_fixup, 904 890 offset_end, 905 891 target_offset_end ··· 912 890 } 913 891 914 892 let copy_off = end_of_previous_fixup; 915 - let copy_len = fixup.target_offset - end_of_previous_fixup; 893 + let copy_len = fixup_offset - end_of_previous_fixup; 916 894 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 917 895 pr_warn!("Failed copying into alloc: {:?}", err); 918 896 return Err(err.into()); 919 897 } 920 - if fixup.skip == 0 { 921 - let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value); 898 + if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup { 899 + let res = alloc.write::<u64>(fixup_offset, pointer_value); 922 900 if let Err(err) = res { 923 901 pr_warn!("Failed copying ptr into alloc: {:?}", err); 924 902 return Err(err.into()); ··· 971 949 972 950 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; 973 951 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; 974 - let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 975 - let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?; 976 - let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 977 - let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?; 952 + let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 953 + let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 978 954 let aligned_secctx_size = match secctx.as_ref() { 979 955 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, 980 956 None => 0, 981 957 }; 982 958 959 + if !is_aligned(offsets_size, size_of::<u64>()) { 960 + return Err(EINVAL.into()); 961 + } 962 + if !is_aligned(buffers_size, size_of::<u64>()) { 963 + return Err(EINVAL.into()); 964 + } 965 + 983 966 // This guarantees that at least `sizeof(usize)` bytes will be allocated. 984 967 let len = usize::max( 985 968 aligned_data_size 986 - .checked_add(aligned_offsets_size) 987 - .and_then(|sum| sum.checked_add(aligned_buffers_size)) 969 + .checked_add(offsets_size) 970 + .and_then(|sum| sum.checked_add(buffers_size)) 988 971 .and_then(|sum| sum.checked_add(aligned_secctx_size)) 989 972 .ok_or(ENOMEM)?, 990 - size_of::<usize>(), 973 + size_of::<u64>(), 991 974 ); 992 - let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size; 975 + let secctx_off = aligned_data_size + offsets_size + buffers_size; 993 976 let mut alloc = 994 977 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { 995 978 Ok(alloc) => alloc, ··· 1026 999 } 1027 1000 1028 1001 let offsets_start = aligned_data_size; 1029 - let offsets_end = aligned_data_size + aligned_offsets_size; 1002 + let offsets_end = aligned_data_size + offsets_size; 1030 1003 1031 1004 // This state is used for BINDER_TYPE_PTR objects. 1032 1005 let sg_state = sg_state.insert(ScatterGatherState { 1033 1006 unused_buffer_space: UnusedBufferSpace { 1034 1007 offset: offsets_end, 1035 - limit: len, 1008 + limit: offsets_end + buffers_size, 1036 1009 }, 1037 1010 sg_entries: KVec::new(), 1038 1011 ancestors: KVec::new(), ··· 1041 1014 // Traverse the objects specified. 1042 1015 let mut view = AllocationView::new(&mut alloc, data_size); 1043 1016 for (index, index_offset) in (offsets_start..offsets_end) 1044 - .step_by(size_of::<usize>()) 1017 + .step_by(size_of::<u64>()) 1045 1018 .enumerate() 1046 1019 { 1047 - let offset = view.alloc.read(index_offset)?; 1020 + let offset: usize = view 1021 + .alloc 1022 + .read::<u64>(index_offset)? 1023 + .try_into() 1024 + .map_err(|_| EINVAL)?; 1048 1025 1049 - if offset < end_of_previous_object { 1026 + if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) { 1050 1027 pr_warn!("Got transaction with invalid offset."); 1051 1028 return Err(EINVAL.into()); 1052 1029 } ··· 1082 1051 } 1083 1052 1084 1053 // Update the indexes containing objects to clean up. 1085 - let offset_after_object = index_offset + size_of::<usize>(); 1054 + let offset_after_object = index_offset + size_of::<u64>(); 1086 1055 view.alloc 1087 1056 .set_info_offsets(offsets_start..offset_after_object); 1088 1057 }
+4 -4
drivers/android/binderfs.c
··· 132 132 mutex_lock(&binderfs_minors_mutex); 133 133 if (++info->device_count <= info->mount_opts.max) 134 134 minor = ida_alloc_max(&binderfs_minors, 135 - use_reserve ? BINDERFS_MAX_MINOR : 136 - BINDERFS_MAX_MINOR_CAPPED, 135 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 136 + BINDERFS_MAX_MINOR_CAPPED - 1, 137 137 GFP_KERNEL); 138 138 else 139 139 minor = -ENOSPC; ··· 408 408 /* Reserve a new minor number for the new device. */ 409 409 mutex_lock(&binderfs_minors_mutex); 410 410 minor = ida_alloc_max(&binderfs_minors, 411 - use_reserve ? BINDERFS_MAX_MINOR : 412 - BINDERFS_MAX_MINOR_CAPPED, 411 + use_reserve ? BINDERFS_MAX_MINOR - 1 : 412 + BINDERFS_MAX_MINOR_CAPPED - 1, 413 413 GFP_KERNEL); 414 414 mutex_unlock(&binderfs_minors_mutex); 415 415 if (minor < 0) {