Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/virtio: Optimize the setup of "xen-grant-dma" devices

This is needed to avoid having to parse the same device-tree
several times for a given device.

For this to work we need to install the xen_virtio_restricted_mem_acc
callback in Arm's xen_guest_init() which is same callback as x86's
PV and HVM modes already use and remove the manual assignment in
xen_setup_dma_ops(). Also we need to split the code to initialize
backend_domid into a separate function.

Prior to current patch we parsed the device-tree three times:
1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device()
2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid()
3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device()

With current patch we parse the device-tree only once in
xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid()

Other benefits are:
- Not diverge from x86 when setting up Xen grant DMA ops
- Drop several global functions

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Reviewed-by: Xenia Ragiadakou <burzalodowa@gmail.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Link: https://lore.kernel.org/r/20221025162004.8501-2-olekstysh@gmail.com
Signed-off-by: Juergen Gross <jgross@suse.com>

authored by

Oleksandr Tyshchenko and committed by
Juergen Gross
035e3a43 76dcd734

+30 -69
+1 -1
arch/arm/xen/enlighten.c
··· 445 445 return 0; 446 446 447 447 if (IS_ENABLED(CONFIG_XEN_VIRTIO)) 448 - virtio_set_mem_acc_cb(xen_virtio_mem_acc); 448 + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); 449 449 450 450 if (!acpi_disabled) 451 451 xen_acpi_guest_init();
+28 -49
drivers/xen/grant-dma-ops.c
··· 292 292 .dma_supported = xen_grant_dma_supported, 293 293 }; 294 294 295 - static bool xen_is_dt_grant_dma_device(struct device *dev) 296 - { 297 - struct device_node *iommu_np; 298 - bool has_iommu; 299 - 300 - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); 301 - has_iommu = iommu_np && 302 - of_device_is_compatible(iommu_np, "xen,grant-dma"); 303 - of_node_put(iommu_np); 304 - 305 - return has_iommu; 306 - } 307 - 308 - bool xen_is_grant_dma_device(struct device *dev) 309 - { 310 - /* XXX Handle only DT devices for now */ 311 - if (dev->of_node) 312 - return xen_is_dt_grant_dma_device(dev); 313 - 314 - return false; 315 - } 316 - 317 - bool xen_virtio_mem_acc(struct virtio_device *dev) 318 - { 319 - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) 320 - return true; 321 - 322 - return xen_is_grant_dma_device(dev->dev.parent); 323 - } 324 - 325 295 static int xen_dt_grant_init_backend_domid(struct device *dev, 326 - struct xen_grant_dma_data *data) 296 + domid_t *backend_domid) 327 297 { 328 298 struct of_phandle_args iommu_spec; 329 299 330 300 if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", 331 301 0, &iommu_spec)) { 332 - dev_err(dev, "Cannot parse iommus property\n"); 302 + dev_dbg(dev, "Cannot parse iommus property\n"); 333 303 return -ESRCH; 334 304 } 335 305 336 306 if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || 337 307 iommu_spec.args_count != 1) { 338 - dev_err(dev, "Incompatible IOMMU node\n"); 308 + dev_dbg(dev, "Incompatible IOMMU node\n"); 339 309 of_node_put(iommu_spec.np); 340 310 return -ESRCH; 341 311 } ··· 316 346 * The endpoint ID here means the ID of the domain where the 317 347 * corresponding backend is running 318 348 */ 319 - data->backend_domid = iommu_spec.args[0]; 349 + *backend_domid = iommu_spec.args[0]; 320 350 321 351 return 0; 322 352 } 323 353 324 - void xen_grant_setup_dma_ops(struct device *dev) 354 + static int xen_grant_init_backend_domid(struct device *dev, 355 + domid_t *backend_domid) 356 + { 357 + int ret = -ENODEV; 358 + 359 + if (dev->of_node) { 360 + ret = xen_dt_grant_init_backend_domid(dev, backend_domid); 361 + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) { 362 + dev_info(dev, "Using dom0 as backend\n"); 363 + *backend_domid = 0; 364 + ret = 0; 365 + } 366 + 367 + return ret; 368 + } 369 + 370 + static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid) 325 371 { 326 372 struct xen_grant_dma_data *data; 327 373 ··· 351 365 if (!data) 352 366 goto err; 353 367 354 - if (dev->of_node) { 355 - if (xen_dt_grant_init_backend_domid(dev, data)) 356 - goto err; 357 - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { 358 - dev_info(dev, "Using dom0 as backend\n"); 359 - data->backend_domid = 0; 360 - } else { 361 - /* XXX ACPI device unsupported for now */ 362 - goto err; 363 - } 368 + data->backend_domid = backend_domid; 364 369 365 370 if (store_xen_grant_dma_data(dev, data)) { 366 371 dev_err(dev, "Cannot store Xen grant DMA data\n"); ··· 369 392 370 393 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) 371 394 { 372 - bool ret = xen_virtio_mem_acc(dev); 395 + domid_t backend_domid; 373 396 374 - if (ret) 375 - xen_grant_setup_dma_ops(dev->dev.parent); 397 + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) { 398 + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid); 399 + return true; 400 + } 376 401 377 - return ret; 402 + return false; 378 403 } 379 404 380 405 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
+1 -3
include/xen/arm/xen-ops.h
··· 8 8 static inline void xen_setup_dma_ops(struct device *dev) 9 9 { 10 10 #ifdef CONFIG_XEN 11 - if (xen_is_grant_dma_device(dev)) 12 - xen_grant_setup_dma_ops(dev); 13 - else if (xen_swiotlb_detect()) 11 + if (xen_swiotlb_detect()) 14 12 dev->dma_ops = &xen_swiotlb_dma_ops; 15 13 #endif 16 14 }
-16
include/xen/xen-ops.h
··· 216 216 #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ 217 217 218 218 #ifdef CONFIG_XEN_GRANT_DMA_OPS 219 - void xen_grant_setup_dma_ops(struct device *dev); 220 - bool xen_is_grant_dma_device(struct device *dev); 221 - bool xen_virtio_mem_acc(struct virtio_device *dev); 222 219 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); 223 220 #else 224 - static inline void xen_grant_setup_dma_ops(struct device *dev) 225 - { 226 - } 227 - static inline bool xen_is_grant_dma_device(struct device *dev) 228 - { 229 - return false; 230 - } 231 - 232 221 struct virtio_device; 233 - 234 - static inline bool xen_virtio_mem_acc(struct virtio_device *dev) 235 - { 236 - return false; 237 - } 238 222 239 223 static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) 240 224 {