rust_binder: add additional alignment checks

This adds some alignment checks to match C Binder more closely. This
causes the driver to reject more transactions. I don't think any of the
transactions in question are harmful, but it's still a bug because it's
the wrong uapi to accept them.

The cases where usize is changed for u64, it will affect only 32-bit
kernels.

Cc: stable@vger.kernel.org
Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Acked-by: Carlos Llamas <cmllamas@google.com>
Link: https://patch.msgid.link/20260123-binder-alignment-more-checks-v1-1-7e1cea77411d@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by Alice Ryhl and committed by Greg Kroah-Hartman d0472481 5e8a3d01

+36 -14
+36 -14
drivers/android/binder/thread.rs
··· 39 39 sync::atomic::{AtomicU32, Ordering}, 40 40 }; 41 41 42 + fn is_aligned(value: usize, to: usize) -> bool { 43 + value % to == 0 44 + } 45 + 42 46 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` 43 47 /// call and is discarded when it returns. 44 48 struct ScatterGatherState { ··· 799 795 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; 800 796 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?; 801 797 798 + if !is_aligned(parent_offset, size_of::<u32>()) { 799 + return Err(EINVAL.into()); 800 + } 801 + 802 802 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; 803 803 view.alloc.info_add_fd_reserve(num_fds)?; 804 804 ··· 816 808 return Err(EINVAL.into()); 817 809 } 818 810 }; 811 + 812 + if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) { 813 + return Err(EINVAL.into()); 814 + } 819 815 820 816 parent_entry.fixup_min_offset = info.new_min_offset; 821 817 parent_entry ··· 837 825 .sender_uaddr 838 826 .checked_add(parent_offset) 839 827 .ok_or(EINVAL)?; 828 + 840 829 let mut fda_bytes = KVec::new(); 841 830 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) 842 831 .read_all(&mut fda_bytes, GFP_KERNEL)?; ··· 971 958 972 959 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; 973 960 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; 974 - let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 975 - let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?; 976 - let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 977 - let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?; 961 + let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 962 + let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 978 963 let aligned_secctx_size = match secctx.as_ref() { 979 964 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, 980 965 None => 0, 981 966 }; 982 967 968 + if !is_aligned(offsets_size, size_of::<u64>()) { 969 + return Err(EINVAL.into()); 970 + } 971 + if !is_aligned(buffers_size, size_of::<u64>()) { 972 + return Err(EINVAL.into()); 973 + } 974 + 983 975 // This guarantees that at least `sizeof(usize)` bytes will be allocated. 984 976 let len = usize::max( 985 977 aligned_data_size 986 - .checked_add(aligned_offsets_size) 987 - .and_then(|sum| sum.checked_add(aligned_buffers_size)) 978 + .checked_add(offsets_size) 979 + .and_then(|sum| sum.checked_add(buffers_size)) 988 980 .and_then(|sum| sum.checked_add(aligned_secctx_size)) 989 981 .ok_or(ENOMEM)?, 990 - size_of::<usize>(), 982 + size_of::<u64>(), 991 983 ); 992 - let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size; 984 + let secctx_off = aligned_data_size + offsets_size + buffers_size; 993 985 let mut alloc = 994 986 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { 995 987 Ok(alloc) => alloc, ··· 1026 1008 } 1027 1009 1028 1010 let offsets_start = aligned_data_size; 1029 - let offsets_end = aligned_data_size + aligned_offsets_size; 1011 + let offsets_end = aligned_data_size + offsets_size; 1030 1012 1031 1013 // This state is used for BINDER_TYPE_PTR objects. 1032 1014 let sg_state = sg_state.insert(ScatterGatherState { 1033 1015 unused_buffer_space: UnusedBufferSpace { 1034 1016 offset: offsets_end, 1035 - limit: len, 1017 + limit: offsets_end + buffers_size, 1036 1018 }, 1037 1019 sg_entries: KVec::new(), 1038 1020 ancestors: KVec::new(), ··· 1041 1023 // Traverse the objects specified. 1042 1024 let mut view = AllocationView::new(&mut alloc, data_size); 1043 1025 for (index, index_offset) in (offsets_start..offsets_end) 1044 - .step_by(size_of::<usize>()) 1026 + .step_by(size_of::<u64>()) 1045 1027 .enumerate() 1046 1028 { 1047 - let offset = view.alloc.read(index_offset)?; 1029 + let offset: usize = view 1030 + .alloc 1031 + .read::<u64>(index_offset)? 1032 + .try_into() 1033 + .map_err(|_| EINVAL)?; 1048 1034 1049 - if offset < end_of_previous_object { 1035 + if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) { 1050 1036 pr_warn!("Got transaction with invalid offset."); 1051 1037 return Err(EINVAL.into()); 1052 1038 } ··· 1082 1060 } 1083 1061 1084 1062 // Update the indexes containing objects to clean up. 1085 - let offset_after_object = index_offset + size_of::<usize>(); 1063 + let offset_after_object = index_offset + size_of::<u64>(); 1086 1064 view.alloc 1087 1065 .set_info_offsets(offsets_start..offset_after_object); 1088 1066 }