Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen: swiotlb: Switch to physical address mapping callbacks

Combine resource and page mappings routines to one function
and remove .map_resource/.unmap_resource callbacks completely.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-5-3bbfe3a25cdf@kernel.org

authored by

Leon Romanovsky and committed by
Marek Szyprowski
af85de5a 50b149be

+29 -34
+29 -34
drivers/xen/swiotlb-xen.c
··· 200 200 * physical address to use is returned. 201 201 * 202 202 * Once the device is given the dma address, the device owns this memory until 203 - * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. 203 + * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed. 204 204 */ 205 - static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, 206 - unsigned long offset, size_t size, 207 - enum dma_data_direction dir, 205 + static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys, 206 + size_t size, enum dma_data_direction dir, 208 207 unsigned long attrs) 209 208 { 210 - phys_addr_t map, phys = page_to_phys(page) + offset; 211 - dma_addr_t dev_addr = xen_phys_to_dma(dev, phys); 209 + dma_addr_t dev_addr; 210 + phys_addr_t map; 212 211 213 212 BUG_ON(dir == DMA_NONE); 213 + 214 + if (attrs & DMA_ATTR_MMIO) { 215 + if (unlikely(!dma_capable(dev, phys, size, false))) { 216 + dev_err_once( 217 + dev, 218 + "DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n", 219 + &phys, size, *dev->dma_mask, 220 + dev->bus_dma_limit); 221 + WARN_ON_ONCE(1); 222 + return DMA_MAPPING_ERROR; 223 + } 224 + return phys; 225 + } 226 + 227 + dev_addr = xen_phys_to_dma(dev, phys); 228 + 214 229 /* 215 230 * If the address happens to be in the device's DMA window, 216 231 * we can safely return the device addr and not worry about bounce ··· 272 257 273 258 /* 274 259 * Unmap a single streaming mode DMA translation. The dma_addr and size must 275 - * match what was provided for in a previous xen_swiotlb_map_page call. All 260 + * match what was provided for in a previous xen_swiotlb_map_phys call. All 276 261 * other usages are undefined. 277 262 * 278 263 * After this call, reads by the cpu to the buffer are guaranteed to see 279 264 * whatever the device wrote there. 280 265 */ 281 - static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 266 + static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr, 282 267 size_t size, enum dma_data_direction dir, unsigned long attrs) 283 268 { 284 269 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr); ··· 340 325 341 326 /* 342 327 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 343 - * concerning calls here are the same as for swiotlb_unmap_page() above. 328 + * concerning calls here are the same as for swiotlb_unmap_phys() above. 344 329 */ 345 330 static void 346 331 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, ··· 352 337 BUG_ON(dir == DMA_NONE); 353 338 354 339 for_each_sg(sgl, sg, nelems, i) 355 - xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), 340 + xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg), 356 341 dir, attrs); 357 342 358 343 } ··· 367 352 BUG_ON(dir == DMA_NONE); 368 353 369 354 for_each_sg(sgl, sg, nelems, i) { 370 - sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg), 371 - sg->offset, sg->length, dir, attrs); 355 + sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg), 356 + sg->length, dir, attrs); 372 357 if (sg->dma_address == DMA_MAPPING_ERROR) 373 358 goto out_unmap; 374 359 sg_dma_len(sg) = sg->length; ··· 407 392 } 408 393 } 409 394 410 - static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev, 411 - phys_addr_t paddr, 412 - size_t size, 413 - enum dma_data_direction dir, 414 - unsigned long attrs) 415 - { 416 - dma_addr_t dma_addr = paddr; 417 - 418 - if (unlikely(!dma_capable(dev, dma_addr, size, false))) { 419 - dev_err_once(dev, 420 - "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 421 - &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 422 - WARN_ON_ONCE(1); 423 - return DMA_MAPPING_ERROR; 424 - } 425 - 426 - return dma_addr; 427 - } 428 - 429 395 /* 430 396 * Return whether the given device DMA address mask can be supported 431 397 * properly. For example, if your device can only drive the low 24-bits ··· 433 437 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, 434 438 .map_sg = xen_swiotlb_map_sg, 435 439 .unmap_sg = xen_swiotlb_unmap_sg, 436 - .map_page = xen_swiotlb_map_page, 437 - .unmap_page = xen_swiotlb_unmap_page, 440 + .map_phys = xen_swiotlb_map_phys, 441 + .unmap_phys = xen_swiotlb_unmap_phys, 438 442 .dma_supported = xen_swiotlb_dma_supported, 439 443 .mmap = dma_common_mmap, 440 444 .get_sgtable = dma_common_get_sgtable, 441 445 .alloc_pages_op = dma_common_alloc_pages, 442 446 .free_pages = dma_common_free_pages, 443 447 .max_mapping_size = swiotlb_max_mapping_size, 444 - .map_resource = xen_swiotlb_direct_map_resource, 445 448 };