Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vfio/type1: sanitize for overflow using check_*_overflow()

Adopt check_*_overflow() functions to clearly express overflow check
intent.

Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Fixes: 73fa0d10d077 ("vfio: Type1 IOMMU implementation")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251028-fix-unmap-v6-1-2542b96bcc8e@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>

authored by

Alex Mastro and committed by
Alex Williamson
6012379e dcb6fa37

+63 -23
+63 -23
drivers/vfio/vfio_iommu_type1.c
··· 38 38 #include <linux/workqueue.h> 39 39 #include <linux/notifier.h> 40 40 #include <linux/mm_inline.h> 41 + #include <linux/overflow.h> 41 42 #include "vfio.h" 42 43 43 44 #define DRIVER_VERSION "0.2" ··· 183 182 } 184 183 185 184 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, 186 - dma_addr_t start, u64 size) 185 + dma_addr_t start, size_t size) 187 186 { 188 187 struct rb_node *res = NULL; 189 188 struct rb_node *node = iommu->dma_list.rb_node; ··· 896 895 unsigned long remote_vaddr; 897 896 struct vfio_dma *dma; 898 897 bool do_accounting; 898 + dma_addr_t iova_end; 899 + size_t iova_size; 899 900 900 - if (!iommu || !pages) 901 + if (!iommu || !pages || npage <= 0) 901 902 return -EINVAL; 902 903 903 904 /* Supported for v2 version only */ 904 905 if (!iommu->v2) 905 906 return -EACCES; 907 + 908 + if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) || 909 + check_add_overflow(user_iova, iova_size - 1, &iova_end)) 910 + return -EOVERFLOW; 906 911 907 912 mutex_lock(&iommu->lock); 908 913 ··· 1015 1008 { 1016 1009 struct vfio_iommu *iommu = iommu_data; 1017 1010 bool do_accounting; 1011 + dma_addr_t iova_end; 1012 + size_t iova_size; 1018 1013 int i; 1019 1014 1020 1015 /* Supported for v2 version only */ 1021 1016 if (WARN_ON(!iommu->v2)) 1017 + return; 1018 + 1019 + if (WARN_ON(npage <= 0)) 1020 + return; 1021 + 1022 + if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) || 1023 + check_add_overflow(user_iova, iova_size - 1, &iova_end))) 1022 1024 return; 1023 1025 1024 1026 mutex_lock(&iommu->lock); ··· 1390 1374 int ret = -EINVAL, retries = 0; 1391 1375 unsigned long pgshift; 1392 1376 dma_addr_t iova = unmap->iova; 1393 - u64 size = unmap->size; 1377 + dma_addr_t iova_end; 1378 + size_t size = unmap->size; 1394 1379 bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; 1395 1380 bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; 1396 1381 struct rb_node *n, *first_n; ··· 1404 1387 goto unlock; 1405 1388 } 1406 1389 1390 + if (iova != unmap->iova || size != unmap->size) { 1391 + ret = -EOVERFLOW; 1392 + goto unlock; 1393 + } 1394 + 1407 1395 pgshift = __ffs(iommu->pgsize_bitmap); 1408 1396 pgsize = (size_t)1 << pgshift; 1409 1397 ··· 1418 1396 if (unmap_all) { 1419 1397 if (iova || size) 1420 1398 goto unlock; 1421 - size = U64_MAX; 1422 - } else if (!size || size & (pgsize - 1) || 1423 - iova + size - 1 < iova || size > SIZE_MAX) { 1424 - goto unlock; 1399 + size = SIZE_MAX; 1400 + } else { 1401 + if (!size || size & (pgsize - 1)) 1402 + goto unlock; 1403 + 1404 + if (check_add_overflow(iova, size - 1, &iova_end)) { 1405 + ret = -EOVERFLOW; 1406 + goto unlock; 1407 + } 1425 1408 } 1426 1409 1427 1410 /* When dirty tracking is enabled, allow only min supported pgsize */ ··· 1473 1446 if (dma && dma->iova != iova) 1474 1447 goto unlock; 1475 1448 1476 - dma = vfio_find_dma(iommu, iova + size - 1, 0); 1449 + dma = vfio_find_dma(iommu, iova_end, 0); 1477 1450 if (dma && dma->iova + dma->size != iova + size) 1478 1451 goto unlock; 1479 1452 } ··· 1675 1648 { 1676 1649 bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR; 1677 1650 dma_addr_t iova = map->iova; 1651 + dma_addr_t iova_end; 1678 1652 unsigned long vaddr = map->vaddr; 1653 + unsigned long vaddr_end; 1679 1654 size_t size = map->size; 1680 1655 int ret = 0, prot = 0; 1681 1656 size_t pgsize; ··· 1685 1656 1686 1657 /* Verify that none of our __u64 fields overflow */ 1687 1658 if (map->size != size || map->vaddr != vaddr || map->iova != iova) 1659 + return -EOVERFLOW; 1660 + 1661 + if (!size) 1688 1662 return -EINVAL; 1663 + 1664 + if (check_add_overflow(iova, size - 1, &iova_end) || 1665 + check_add_overflow(vaddr, size - 1, &vaddr_end)) 1666 + return -EOVERFLOW; 1689 1667 1690 1668 /* READ/WRITE from device perspective */ 1691 1669 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) ··· 1709 1673 1710 1674 WARN_ON((pgsize - 1) & PAGE_MASK); 1711 1675 1712 - if (!size || (size | iova | vaddr) & (pgsize - 1)) { 1713 - ret = -EINVAL; 1714 - goto out_unlock; 1715 - } 1716 - 1717 - /* Don't allow IOVA or virtual address wrap */ 1718 - if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { 1676 + if ((size | iova | vaddr) & (pgsize - 1)) { 1719 1677 ret = -EINVAL; 1720 1678 goto out_unlock; 1721 1679 } ··· 1740 1710 goto out_unlock; 1741 1711 } 1742 1712 1743 - if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { 1713 + if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) { 1744 1714 ret = -EINVAL; 1745 1715 goto out_unlock; 1746 1716 } ··· 3007 2977 struct vfio_iommu_type1_dirty_bitmap_get range; 3008 2978 unsigned long pgshift; 3009 2979 size_t data_size = dirty.argsz - minsz; 3010 - size_t iommu_pgsize; 2980 + size_t size, iommu_pgsize; 2981 + dma_addr_t iova, iova_end; 3011 2982 3012 2983 if (!data_size || data_size < sizeof(range)) 3013 2984 return -EINVAL; ··· 3017 2986 sizeof(range))) 3018 2987 return -EFAULT; 3019 2988 3020 - if (range.iova + range.size < range.iova) 2989 + iova = range.iova; 2990 + size = range.size; 2991 + 2992 + if (iova != range.iova || size != range.size) 2993 + return -EOVERFLOW; 2994 + 2995 + if (!size) 3021 2996 return -EINVAL; 2997 + 2998 + if (check_add_overflow(iova, size - 1, &iova_end)) 2999 + return -EOVERFLOW; 3000 + 3022 3001 if (!access_ok((void __user *)range.bitmap.data, 3023 3002 range.bitmap.size)) 3024 3003 return -EINVAL; 3025 3004 3026 3005 pgshift = __ffs(range.bitmap.pgsize); 3027 - ret = verify_bitmap_size(range.size >> pgshift, 3006 + ret = verify_bitmap_size(size >> pgshift, 3028 3007 range.bitmap.size); 3029 3008 if (ret) 3030 3009 return ret; ··· 3048 3007 ret = -EINVAL; 3049 3008 goto out_unlock; 3050 3009 } 3051 - if (range.iova & (iommu_pgsize - 1)) { 3010 + if (iova & (iommu_pgsize - 1)) { 3052 3011 ret = -EINVAL; 3053 3012 goto out_unlock; 3054 3013 } 3055 - if (!range.size || range.size & (iommu_pgsize - 1)) { 3014 + if (size & (iommu_pgsize - 1)) { 3056 3015 ret = -EINVAL; 3057 3016 goto out_unlock; 3058 3017 } 3059 3018 3060 3019 if (iommu->dirty_page_tracking) 3061 3020 ret = vfio_iova_dirty_bitmap(range.bitmap.data, 3062 - iommu, range.iova, 3063 - range.size, 3021 + iommu, iova, size, 3064 3022 range.bitmap.pgsize); 3065 3023 else 3066 3024 ret = -EINVAL;