Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull iommufd fixes from Jason Gunthorpe:

- Fix dirty tracking bitmap collection when using reporting bitmaps
that are not neatly aligned to u64's or match the IO page table radix
tree layout.

- Add self tests to cover the cases that were found to be broken.

- Add missing enforcement of invalidation type in the uapi.

- Fix selftest config generation

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd:
selftests/iommu: fix the config fragment
iommufd: Reject non-zero data_type if no data_len is provided
iommufd/iova_bitmap: Consider page offset for the pages to be pinned
iommufd/selftest: Add mock IO hugepages tests
iommufd/selftest: Hugepage mock domain support
iommufd/selftest: Refactor mock_domain_read_and_clear_dirty()
iommufd/selftest: Refactor dirty bitmap tests
iommufd/iova_bitmap: Handle recording beyond the mapped pages
iommufd/selftest: Test u64 unaligned bitmaps
iommufd/iova_bitmap: Switch iova_bitmap::bitmap to an u8 array
iommufd/iova_bitmap: Bounds check mapped::pages access

+210 -63
+2 -1
drivers/iommu/iommufd/hw_pagetable.c
··· 263 263 264 264 if (cmd->__reserved) 265 265 return -EOPNOTSUPP; 266 - if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) 266 + if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) || 267 + (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len)) 267 268 return -EINVAL; 268 269 269 270 idev = iommufd_get_device(ucmd, cmd->dev_id);
+1
drivers/iommu/iommufd/iommufd_test.h
··· 45 45 46 46 enum { 47 47 MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0, 48 + MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1, 48 49 }; 49 50 50 51 enum {
+58 -10
drivers/iommu/iommufd/iova_bitmap.c
··· 100 100 struct iova_bitmap_map mapped; 101 101 102 102 /* userspace address of the bitmap */ 103 - u64 __user *bitmap; 103 + u8 __user *bitmap; 104 104 105 105 /* u64 index that @mapped points to */ 106 106 unsigned long mapped_base_index; ··· 113 113 114 114 /* length of the IOVA range for the whole bitmap */ 115 115 size_t length; 116 + 117 + /* length of the IOVA range set ahead the pinned pages */ 118 + unsigned long set_ahead_length; 116 119 }; 117 120 118 121 /* ··· 165 162 { 166 163 struct iova_bitmap_map *mapped = &bitmap->mapped; 167 164 unsigned long npages; 168 - u64 __user *addr; 165 + u8 __user *addr; 169 166 long ret; 170 167 171 168 /* ··· 179 176 sizeof(*bitmap->bitmap), PAGE_SIZE); 180 177 181 178 /* 182 - * We always cap at max number of 'struct page' a base page can fit. 183 - * This is, for example, on x86 means 2M of bitmap data max. 184 - */ 185 - npages = min(npages, PAGE_SIZE / sizeof(struct page *)); 186 - 187 - /* 188 179 * Bitmap address to be pinned is calculated via pointer arithmetic 189 180 * with bitmap u64 word index. 190 181 */ 191 182 addr = bitmap->bitmap + bitmap->mapped_base_index; 183 + 184 + /* 185 + * We always cap at max number of 'struct page' a base page can fit. 186 + * This is, for example, on x86 means 2M of bitmap data max. 187 + */ 188 + npages = min(npages + !!offset_in_page(addr), 189 + PAGE_SIZE / sizeof(struct page *)); 192 190 193 191 ret = pin_user_pages_fast((unsigned long)addr, npages, 194 192 FOLL_WRITE, mapped->pages); ··· 251 247 252 248 mapped = &bitmap->mapped; 253 249 mapped->pgshift = __ffs(page_size); 254 - bitmap->bitmap = data; 250 + bitmap->bitmap = (u8 __user *)data; 255 251 bitmap->mapped_total_index = 256 252 iova_bitmap_offset_to_index(bitmap, length - 1) + 1; 257 253 bitmap->iova = iova; ··· 308 304 309 305 remaining = bitmap->mapped_total_index - bitmap->mapped_base_index; 310 306 remaining = min_t(unsigned long, remaining, 311 - bytes / sizeof(*bitmap->bitmap)); 307 + DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap))); 312 308 313 309 return remaining; 314 310 } ··· 345 341 return bitmap->mapped_base_index >= bitmap->mapped_total_index; 346 342 } 347 343 344 + static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap, 345 + size_t set_ahead_length) 346 + { 347 + int ret = 0; 348 + 349 + while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) { 350 + unsigned long length = iova_bitmap_mapped_length(bitmap); 351 + unsigned long iova = iova_bitmap_mapped_iova(bitmap); 352 + 353 + ret = iova_bitmap_get(bitmap); 354 + if (ret) 355 + break; 356 + 357 + length = min(length, set_ahead_length); 358 + iova_bitmap_set(bitmap, iova, length); 359 + 360 + set_ahead_length -= length; 361 + bitmap->mapped_base_index += 362 + iova_bitmap_offset_to_index(bitmap, length - 1) + 1; 363 + iova_bitmap_put(bitmap); 364 + } 365 + 366 + bitmap->set_ahead_length = 0; 367 + return ret; 368 + } 369 + 348 370 /* 349 371 * Advances to the next range, releases the current pinned 350 372 * pages and pins the next set of bitmap pages. ··· 386 356 iova_bitmap_put(bitmap); 387 357 if (iova_bitmap_done(bitmap)) 388 358 return 0; 359 + 360 + /* Iterate, set and skip any bits requested for next iteration */ 361 + if (bitmap->set_ahead_length) { 362 + int ret; 363 + 364 + ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length); 365 + if (ret) 366 + return ret; 367 + } 389 368 390 369 /* When advancing the index we pin the next set of bitmap pages */ 391 370 return iova_bitmap_get(bitmap); ··· 448 409 mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; 449 410 unsigned long last_bit = (((iova + length - 1) - mapped->iova) >> 450 411 mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; 412 + unsigned long last_page_idx = mapped->npages - 1; 451 413 452 414 do { 453 415 unsigned int page_idx = cur_bit / BITS_PER_PAGE; ··· 457 417 last_bit - cur_bit + 1); 458 418 void *kaddr; 459 419 420 + if (unlikely(page_idx > last_page_idx)) 421 + break; 422 + 460 423 kaddr = kmap_local_page(mapped->pages[page_idx]); 461 424 bitmap_set(kaddr, offset, nbits); 462 425 kunmap_local(kaddr); 463 426 cur_bit += nbits; 464 427 } while (cur_bit <= last_bit); 428 + 429 + if (unlikely(cur_bit <= last_bit)) { 430 + bitmap->set_ahead_length = 431 + ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift); 432 + } 465 433 } 466 434 EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);
+58 -21
drivers/iommu/iommufd/selftest.c
··· 41 41 enum { 42 42 MOCK_DIRTY_TRACK = 1, 43 43 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, 44 + MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE, 44 45 45 46 /* 46 47 * Like a real page table alignment requires the low bits of the address ··· 54 53 MOCK_PFN_START_IOVA = _MOCK_PFN_START, 55 54 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, 56 55 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, 56 + MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2, 57 57 }; 58 58 59 59 /* ··· 193 191 return 0; 194 192 } 195 193 194 + static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock, 195 + unsigned long iova, size_t page_size, 196 + unsigned long flags) 197 + { 198 + unsigned long cur, end = iova + page_size - 1; 199 + bool dirty = false; 200 + void *ent, *old; 201 + 202 + for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) { 203 + ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); 204 + if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) 205 + continue; 206 + 207 + dirty = true; 208 + /* Clear dirty */ 209 + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { 210 + unsigned long val; 211 + 212 + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; 213 + old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, 214 + xa_mk_value(val), GFP_KERNEL); 215 + WARN_ON_ONCE(ent != old); 216 + } 217 + } 218 + 219 + return dirty; 220 + } 221 + 196 222 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, 197 223 unsigned long iova, size_t size, 198 224 unsigned long flags, ··· 228 198 { 229 199 struct mock_iommu_domain *mock = 230 200 container_of(domain, struct mock_iommu_domain, domain); 231 - unsigned long i, max = size / MOCK_IO_PAGE_SIZE; 232 - void *ent, *old; 201 + unsigned long end = iova + size; 202 + void *ent; 233 203 234 204 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) 235 205 return -EINVAL; 236 206 237 - for (i = 0; i < max; i++) { 238 - unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE; 207 + do { 208 + unsigned long pgsize = MOCK_IO_PAGE_SIZE; 209 + unsigned long head; 239 210 240 - ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); 241 - if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) { 242 - /* Clear dirty */ 243 - if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { 244 - unsigned long val; 245 - 246 - val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; 247 - old = xa_store(&mock->pfns, 248 - cur / MOCK_IO_PAGE_SIZE, 249 - xa_mk_value(val), GFP_KERNEL); 250 - WARN_ON_ONCE(ent != old); 251 - } 252 - iommu_dirty_bitmap_record(dirty, cur, 253 - MOCK_IO_PAGE_SIZE); 211 + ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 212 + if (!ent) { 213 + iova += pgsize; 214 + continue; 254 215 } 255 - } 216 + 217 + if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA) 218 + pgsize = MOCK_HUGE_PAGE_SIZE; 219 + head = iova & ~(pgsize - 1); 220 + 221 + /* Clear dirty */ 222 + if (mock_test_and_clear_dirty(mock, head, pgsize, flags)) 223 + iommu_dirty_bitmap_record(dirty, head, pgsize); 224 + iova = head + pgsize; 225 + } while (iova < end); 256 226 257 227 return 0; 258 228 } ··· 264 234 265 235 static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) 266 236 { 237 + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 267 238 struct mock_iommu_domain *mock; 268 239 269 240 mock = kzalloc(sizeof(*mock), GFP_KERNEL); ··· 273 242 mock->domain.geometry.aperture_start = MOCK_APERTURE_START; 274 243 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; 275 244 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; 245 + if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA) 246 + mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE; 276 247 mock->domain.ops = mock_ops.default_domain_ops; 277 248 mock->domain.type = IOMMU_DOMAIN_UNMANAGED; 278 249 xa_init(&mock->pfns); ··· 320 287 return ERR_PTR(-EOPNOTSUPP); 321 288 if (user_data || (has_dirty_flag && no_dirty_ops)) 322 289 return ERR_PTR(-EOPNOTSUPP); 323 - domain = mock_domain_alloc_paging(NULL); 290 + domain = mock_domain_alloc_paging(dev); 324 291 if (!domain) 325 292 return ERR_PTR(-ENOMEM); 326 293 if (has_dirty_flag) ··· 383 350 384 351 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 385 352 flags = MOCK_PFN_LAST_IOVA; 353 + if (pgsize != MOCK_IO_PAGE_SIZE) { 354 + flags |= MOCK_PFN_HUGE_IOVA; 355 + } 386 356 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, 387 357 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | 388 358 flags), ··· 640 604 struct mock_dev *mdev; 641 605 int rc; 642 606 643 - if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY)) 607 + if (dev_flags & 608 + ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA)) 644 609 return ERR_PTR(-EINVAL); 645 610 646 611 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+3 -2
tools/testing/selftests/iommu/config
··· 1 - CONFIG_IOMMUFD 2 - CONFIG_IOMMUFD_TEST 1 + CONFIG_IOMMUFD=y 2 + CONFIG_FAULT_INJECTION=y 3 + CONFIG_IOMMUFD_TEST=y
+63 -15
tools/testing/selftests/iommu/iommufd.c
··· 12 12 static unsigned long HUGEPAGE_SIZE; 13 13 14 14 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2) 15 + #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE) 15 16 16 17 static unsigned long get_huge_page_size(void) 17 18 { ··· 1717 1716 FIXTURE_VARIANT(iommufd_dirty_tracking) 1718 1717 { 1719 1718 unsigned long buffer_size; 1719 + bool hugepages; 1720 1720 }; 1721 1721 1722 1722 FIXTURE_SETUP(iommufd_dirty_tracking) 1723 1723 { 1724 + int mmap_flags; 1724 1725 void *vrc; 1725 1726 int rc; 1726 1727 ··· 1735 1732 variant->buffer_size, rc); 1736 1733 } 1737 1734 1735 + mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED; 1736 + if (variant->hugepages) { 1737 + /* 1738 + * MAP_POPULATE will cause the kernel to fail mmap if THPs are 1739 + * not available. 1740 + */ 1741 + mmap_flags |= MAP_HUGETLB | MAP_POPULATE; 1742 + } 1738 1743 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0); 1739 1744 vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE, 1740 - MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 1745 + mmap_flags, -1, 0); 1741 1746 assert(vrc == self->buffer); 1742 1747 1743 1748 self->page_size = MOCK_PAGE_SIZE; 1744 1749 self->bitmap_size = 1745 1750 variant->buffer_size / self->page_size / BITS_PER_BYTE; 1746 1751 1747 - /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */ 1752 + /* Provision with an extra (PAGE_SIZE) for the unaligned case */ 1748 1753 rc = posix_memalign(&self->bitmap, PAGE_SIZE, 1749 - self->bitmap_size + MOCK_PAGE_SIZE); 1754 + self->bitmap_size + PAGE_SIZE); 1750 1755 assert(!rc); 1751 1756 assert(self->bitmap); 1752 1757 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); 1753 1758 1754 1759 test_ioctl_ioas_alloc(&self->ioas_id); 1755 - test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id, 1756 - &self->idev_id); 1760 + /* Enable 1M mock IOMMU hugepages */ 1761 + if (variant->hugepages) { 1762 + test_cmd_mock_domain_flags(self->ioas_id, 1763 + MOCK_FLAGS_DEVICE_HUGE_IOVA, 1764 + &self->stdev_id, &self->hwpt_id, 1765 + &self->idev_id); 1766 + } else { 1767 + test_cmd_mock_domain(self->ioas_id, &self->stdev_id, 1768 + &self->hwpt_id, &self->idev_id); 1769 + } 1757 1770 } 1758 1771 1759 1772 FIXTURE_TEARDOWN(iommufd_dirty_tracking) ··· 1803 1784 .buffer_size = 128UL * 1024UL * 1024UL, 1804 1785 }; 1805 1786 1787 + FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge) 1788 + { 1789 + /* 4K bitmap (128M IOVA range) */ 1790 + .buffer_size = 128UL * 1024UL * 1024UL, 1791 + .hugepages = true, 1792 + }; 1793 + 1806 1794 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) 1807 1795 { 1808 1796 /* 8K bitmap (256M IOVA range) */ 1809 1797 .buffer_size = 256UL * 1024UL * 1024UL, 1798 + }; 1799 + 1800 + FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge) 1801 + { 1802 + /* 8K bitmap (256M IOVA range) */ 1803 + .buffer_size = 256UL * 1024UL * 1024UL, 1804 + .hugepages = true, 1810 1805 }; 1811 1806 1812 1807 TEST_F(iommufd_dirty_tracking, enforce_dirty) ··· 1882 1849 1883 1850 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) 1884 1851 { 1885 - uint32_t stddev_id; 1852 + uint32_t page_size = MOCK_PAGE_SIZE; 1886 1853 uint32_t hwpt_id; 1887 1854 uint32_t ioas_id; 1855 + 1856 + if (variant->hugepages) 1857 + page_size = MOCK_HUGE_PAGE_SIZE; 1888 1858 1889 1859 test_ioctl_ioas_alloc(&ioas_id); 1890 1860 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, ··· 1895 1859 1896 1860 test_cmd_hwpt_alloc(self->idev_id, ioas_id, 1897 1861 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); 1898 - test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); 1899 1862 1900 1863 test_cmd_set_dirty_tracking(hwpt_id, true); 1901 1864 1902 1865 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, 1903 - MOCK_APERTURE_START, self->page_size, 1866 + MOCK_APERTURE_START, self->page_size, page_size, 1904 1867 self->bitmap, self->bitmap_size, 0, _metadata); 1905 1868 1906 1869 /* PAGE_SIZE unaligned bitmap */ 1907 1870 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, 1908 - MOCK_APERTURE_START, self->page_size, 1871 + MOCK_APERTURE_START, self->page_size, page_size, 1909 1872 self->bitmap + MOCK_PAGE_SIZE, 1910 1873 self->bitmap_size, 0, _metadata); 1911 1874 1912 - test_ioctl_destroy(stddev_id); 1875 + /* u64 unaligned bitmap */ 1876 + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, 1877 + MOCK_APERTURE_START, self->page_size, page_size, 1878 + self->bitmap + 0xff1, self->bitmap_size, 0, 1879 + _metadata); 1880 + 1913 1881 test_ioctl_destroy(hwpt_id); 1914 1882 } 1915 1883 1916 1884 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear) 1917 1885 { 1918 - uint32_t stddev_id; 1886 + uint32_t page_size = MOCK_PAGE_SIZE; 1919 1887 uint32_t hwpt_id; 1920 1888 uint32_t ioas_id; 1889 + 1890 + if (variant->hugepages) 1891 + page_size = MOCK_HUGE_PAGE_SIZE; 1921 1892 1922 1893 test_ioctl_ioas_alloc(&ioas_id); 1923 1894 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, ··· 1932 1889 1933 1890 test_cmd_hwpt_alloc(self->idev_id, ioas_id, 1934 1891 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); 1935 - test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); 1936 1892 1937 1893 test_cmd_set_dirty_tracking(hwpt_id, true); 1938 1894 1939 1895 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, 1940 - MOCK_APERTURE_START, self->page_size, 1896 + MOCK_APERTURE_START, self->page_size, page_size, 1941 1897 self->bitmap, self->bitmap_size, 1942 1898 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, 1943 1899 _metadata); 1944 1900 1945 1901 /* Unaligned bitmap */ 1946 1902 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, 1947 - MOCK_APERTURE_START, self->page_size, 1903 + MOCK_APERTURE_START, self->page_size, page_size, 1948 1904 self->bitmap + MOCK_PAGE_SIZE, 1949 1905 self->bitmap_size, 1950 1906 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, 1951 1907 _metadata); 1952 1908 1953 - test_ioctl_destroy(stddev_id); 1909 + /* u64 unaligned bitmap */ 1910 + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, 1911 + MOCK_APERTURE_START, self->page_size, page_size, 1912 + self->bitmap + 0xff1, self->bitmap_size, 1913 + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, 1914 + _metadata); 1915 + 1954 1916 test_ioctl_destroy(hwpt_id); 1955 1917 } 1956 1918
+25 -14
tools/testing/selftests/iommu/iommufd_utils.h
··· 344 344 page_size, bitmap, nr)) 345 345 346 346 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, 347 - __u64 iova, size_t page_size, __u64 *bitmap, 347 + __u64 iova, size_t page_size, 348 + size_t pte_page_size, __u64 *bitmap, 348 349 __u64 bitmap_size, __u32 flags, 349 350 struct __test_metadata *_metadata) 350 351 { 351 - unsigned long i, nbits = bitmap_size * BITS_PER_BYTE; 352 - unsigned long nr = nbits / 2; 352 + unsigned long npte = pte_page_size / page_size, pteset = 2 * npte; 353 + unsigned long nbits = bitmap_size * BITS_PER_BYTE; 354 + unsigned long j, i, nr = nbits / pteset ?: 1; 353 355 __u64 out_dirty = 0; 354 356 355 357 /* Mark all even bits as dirty in the mock domain */ 356 - for (i = 0; i < nbits; i += 2) 358 + memset(bitmap, 0, bitmap_size); 359 + for (i = 0; i < nbits; i += pteset) 357 360 set_bit(i, (unsigned long *)bitmap); 358 361 359 362 test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, ··· 368 365 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, 369 366 flags); 370 367 /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */ 371 - for (i = 0; i < nbits; i++) { 372 - ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap)); 368 + for (i = 0; i < nbits; i += pteset) { 369 + for (j = 0; j < pteset; j++) { 370 + ASSERT_EQ(j < npte, 371 + test_bit(i + j, (unsigned long *)bitmap)); 372 + } 373 + ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap)); 373 374 } 374 375 375 376 memset(bitmap, 0, bitmap_size); ··· 381 374 flags); 382 375 383 376 /* It as read already -- expect all zeroes */ 384 - for (i = 0; i < nbits; i++) { 385 - ASSERT_EQ(!(i % 2) && (flags & 386 - IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR), 387 - test_bit(i, (unsigned long *)bitmap)); 377 + for (i = 0; i < nbits; i += pteset) { 378 + for (j = 0; j < pteset; j++) { 379 + ASSERT_EQ( 380 + (j < npte) && 381 + (flags & 382 + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR), 383 + test_bit(i + j, (unsigned long *)bitmap)); 384 + } 388 385 } 389 386 390 387 return 0; 391 388 } 392 - #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \ 393 - bitmap_size, flags, _metadata) \ 389 + #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\ 390 + bitmap, bitmap_size, flags, _metadata) \ 394 391 ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \ 395 - page_size, bitmap, bitmap_size, \ 396 - flags, _metadata)) 392 + page_size, pte_size, bitmap, \ 393 + bitmap_size, flags, _metadata)) 397 394 398 395 static int _test_cmd_create_access(int fd, unsigned int ioas_id, 399 396 __u32 *access_id, unsigned int flags)