Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xen-front: Remove CMA support

It turns out this was only needed to paper over a bug in the CMA
helpers, which was addressed in

commit 998fb1a0f478b83492220ff79583bf9ad538bdd8
Author: Liviu Dudau <Liviu.Dudau@arm.com>
Date: Fri Nov 10 13:33:10 2017 +0000

drm: gem_cma_helper.c: Allow importing of contiguous scatterlists with nents > 1

Without this the following pipeline didn't work:

domU:
1. xen-front allocates a non-contig buffer
2. creates grants out of it

dom0:
3. converts the grants into a dma-buf. Since they're non-contig, the
scatter-list is huge.
4. imports it into rcar-du, which requires dma-contig memory for
scanout.

-> On this given platform there's an IOMMU, so in theory this should
work. But in practice this failed, because of the huge number of sg
entries, even though the IOMMU driver mapped it all into a dma-contig
range.

With a guest-contig buffer allocated in step 1, this problem doesn't
exist. But there's technically no reason to require guest-contig
memory for xen buffer sharing using grants.

Given all that, the xen-front cma support is not needed and should be
removed.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417074012.21311-1-andr2000@gmail.com

+21 -241
-12
Documentation/gpu/xen-front.rst
··· 18 18 .. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h 19 19 :doc: Buffers allocated by the frontend driver 20 20 21 - With GEM CMA helpers 22 - ~~~~~~~~~~~~~~~~~~~~ 23 - 24 - .. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h 25 - :doc: With GEM CMA helpers 26 - 27 - Without GEM CMA helpers 28 - ~~~~~~~~~~~~~~~~~~~~~~~ 29 - 30 - .. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h 31 - :doc: Without GEM CMA helpers 32 - 33 21 Buffers allocated by the backend 34 22 -------------------------------- 35 23
-13
drivers/gpu/drm/xen/Kconfig
··· 15 15 help 16 16 Choose this option if you want to enable a para-virtualized 17 17 frontend DRM/KMS driver for Xen guest OSes. 18 - 19 - config DRM_XEN_FRONTEND_CMA 20 - bool "Use DRM CMA to allocate dumb buffers" 21 - depends on DRM_XEN_FRONTEND 22 - select DRM_KMS_CMA_HELPER 23 - select DRM_GEM_CMA_HELPER 24 - help 25 - Use DRM CMA helpers to allocate display buffers. 26 - This is useful for the use-cases when guest driver needs to 27 - share or export buffers to other drivers which only expect 28 - contiguous buffers. 29 - Note: in this mode driver cannot use buffers allocated 30 - by the backend.
+2 -7
drivers/gpu/drm/xen/Makefile
··· 5 5 xen_drm_front_conn.o \ 6 6 xen_drm_front_evtchnl.o \ 7 7 xen_drm_front_shbuf.o \ 8 - xen_drm_front_cfg.o 9 - 10 - ifeq ($(CONFIG_DRM_XEN_FRONTEND_CMA),y) 11 - drm_xen_front-objs += xen_drm_front_gem_cma.o 12 - else 13 - drm_xen_front-objs += xen_drm_front_gem.o 14 - endif 8 + xen_drm_front_cfg.o \ 9 + xen_drm_front_gem.o 15 10 16 11 obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
+10 -52
drivers/gpu/drm/xen/xen_drm_front.c
··· 12 12 #include <drm/drm_atomic_helper.h> 13 13 #include <drm/drm_crtc_helper.h> 14 14 #include <drm/drm_gem.h> 15 - #include <drm/drm_gem_cma_helper.h> 16 15 17 16 #include <linux/of_device.h> 18 17 ··· 166 167 return ret; 167 168 } 168 169 169 - static int be_dbuf_create_int(struct xen_drm_front_info *front_info, 170 + int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, 170 171 u64 dbuf_cookie, u32 width, u32 height, 171 - u32 bpp, u64 size, struct page **pages, 172 - struct sg_table *sgt) 172 + u32 bpp, u64 size, struct page **pages) 173 173 { 174 174 struct xen_drm_front_evtchnl *evtchnl; 175 175 struct xen_drm_front_shbuf *shbuf; ··· 185 187 buf_cfg.xb_dev = front_info->xb_dev; 186 188 buf_cfg.pages = pages; 187 189 buf_cfg.size = size; 188 - buf_cfg.sgt = sgt; 189 190 buf_cfg.be_alloc = front_info->cfg.be_alloc; 190 191 191 192 shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); ··· 232 235 mutex_unlock(&evtchnl->u.req.req_io_lock); 233 236 dbuf_free(&front_info->dbuf_list, dbuf_cookie); 234 237 return ret; 235 - } 236 - 237 - int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info, 238 - u64 dbuf_cookie, u32 width, u32 height, 239 - u32 bpp, u64 size, struct sg_table *sgt) 240 - { 241 - return be_dbuf_create_int(front_info, dbuf_cookie, width, height, 242 - bpp, size, NULL, sgt); 243 - } 244 - 245 - int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info, 246 - u64 dbuf_cookie, u32 width, u32 height, 247 - u32 bpp, u64 size, struct page **pages) 248 - { 249 - return be_dbuf_create_int(front_info, dbuf_cookie, width, height, 250 - bpp, size, pages, NULL); 251 238 } 252 239 253 240 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, ··· 415 434 goto fail; 416 435 } 417 436 418 - /* 419 - * In case of CONFIG_DRM_XEN_FRONTEND_CMA gem_obj is constructed 420 - * via DRM CMA helpers and doesn't have ->pages allocated 421 - * (xendrm_gem_get_pages will return NULL), but instead can provide 422 - * sg table 423 - */ 424 - if (xen_drm_front_gem_get_pages(obj)) 425 - ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info, 426 - xen_drm_front_dbuf_to_cookie(obj), 427 - args->width, args->height, args->bpp, 428 - args->size, 429 - xen_drm_front_gem_get_pages(obj)); 430 - else 431 - ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info, 432 - xen_drm_front_dbuf_to_cookie(obj), 433 - args->width, args->height, args->bpp, 434 - args->size, 435 - xen_drm_front_gem_get_sg_table(obj)); 437 + ret = xen_drm_front_dbuf_create(drm_info->front_info, 438 + xen_drm_front_dbuf_to_cookie(obj), 439 + args->width, args->height, args->bpp, 440 + args->size, 441 + xen_drm_front_gem_get_pages(obj)); 436 442 if (ret) 437 443 goto fail_backend; 438 444 ··· 491 523 .poll = drm_poll, 492 524 .read = drm_read, 493 525 .llseek = no_llseek, 494 - #ifdef CONFIG_DRM_XEN_FRONTEND_CMA 495 - .mmap = drm_gem_cma_mmap, 496 - #else 497 526 .mmap = xen_drm_front_gem_mmap, 498 - #endif 499 527 }; 500 528 501 529 static const struct vm_operations_struct xen_drm_drv_vm_ops = { ··· 511 547 .gem_prime_export = drm_gem_prime_export, 512 548 .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, 513 549 .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, 550 + .gem_prime_vmap = xen_drm_front_gem_prime_vmap, 551 + .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, 552 + .gem_prime_mmap = xen_drm_front_gem_prime_mmap, 514 553 .dumb_create = xen_drm_drv_dumb_create, 515 554 .fops = &xen_drm_dev_fops, 516 555 .name = "xendrm-du", ··· 522 555 .major = 1, 523 556 .minor = 0, 524 557 525 - #ifdef CONFIG_DRM_XEN_FRONTEND_CMA 526 - .gem_prime_vmap = drm_gem_cma_prime_vmap, 527 - .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 528 - .gem_prime_mmap = drm_gem_cma_prime_mmap, 529 - #else 530 - .gem_prime_vmap = xen_drm_front_gem_prime_vmap, 531 - .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, 532 - .gem_prime_mmap = xen_drm_front_gem_prime_mmap, 533 - #endif 534 558 }; 535 559 536 560 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
+6 -36
drivers/gpu/drm/xen/xen_drm_front.h
··· 23 23 * 24 24 * Depending on the requirements for the para-virtualized environment, namely 25 25 * requirements dictated by the accompanying DRM/(v)GPU drivers running in both 26 - * host and guest environments, number of operating modes of para-virtualized 27 - * display driver are supported: 28 - * 29 - * - display buffers can be allocated by either frontend driver or backend 30 - * - display buffers can be allocated to be contiguous in memory or not 31 - * 32 - * Note! Frontend driver itself has no dependency on contiguous memory for 33 - * its operation. 26 + * host and guest environments, display buffers can be allocated by either 27 + * frontend driver or backend. 34 28 */ 35 29 36 30 /** 37 31 * DOC: Buffers allocated by the frontend driver 38 32 * 39 - * The below modes of operation are configured at compile-time via 40 - * frontend driver's kernel configuration: 41 - */ 42 - 43 - /** 44 - * DOC: With GEM CMA helpers 45 - * 46 - * This use-case is useful when used with accompanying DRM/vGPU driver in 47 - * guest domain which was designed to only work with contiguous buffers, 48 - * e.g. DRM driver based on GEM CMA helpers: such drivers can only import 49 - * contiguous PRIME buffers, thus requiring frontend driver to provide 50 - * such. In order to implement this mode of operation para-virtualized 51 - * frontend driver can be configured to use GEM CMA helpers. 52 - */ 53 - 54 - /** 55 - * DOC: Without GEM CMA helpers 56 - * 57 - * If accompanying drivers can cope with non-contiguous memory then, to 58 - * lower pressure on CMA subsystem of the kernel, driver can allocate 59 - * buffers from system memory. 33 + * In this mode of operation driver allocates buffers from system memory. 60 34 * 61 35 * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation 62 36 * may require IOMMU support on the platform, so accompanying DRM/vGPU ··· 138 164 u32 x, u32 y, u32 width, u32 height, 139 165 u32 bpp, u64 fb_cookie); 140 166 141 - int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info, 142 - u64 dbuf_cookie, u32 width, u32 height, 143 - u32 bpp, u64 size, struct sg_table *sgt); 144 - 145 - int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info, 146 - u64 dbuf_cookie, u32 width, u32 height, 147 - u32 bpp, u64 size, struct page **pages); 167 + int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, 168 + u64 dbuf_cookie, u32 width, u32 height, 169 + u32 bpp, u64 size, struct page **pages); 148 170 149 171 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, 150 172 u64 dbuf_cookie, u64 fb_cookie, u32 width,
+3 -9
drivers/gpu/drm/xen/xen_drm_front_gem.c
··· 210 210 if (ret < 0) 211 211 return ERR_PTR(ret); 212 212 213 - /* 214 - * N.B. Although we have an API to create display buffer from sgt 215 - * we use pages API, because we still need those for GEM handling, 216 - * e.g. for mapping etc. 217 - */ 218 - ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info, 219 - xen_drm_front_dbuf_to_cookie(&xen_obj->base), 220 - 0, 0, 0, size, 221 - xen_obj->pages); 213 + ret = xen_drm_front_dbuf_create(drm_info->front_info, 214 + xen_drm_front_dbuf_to_cookie(&xen_obj->base), 215 + 0, 0, 0, size, xen_obj->pages); 222 216 if (ret < 0) 223 217 return ERR_PTR(ret); 224 218
-3
drivers/gpu/drm/xen/xen_drm_front_gem.h
··· 27 27 28 28 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); 29 29 30 - #ifndef CONFIG_DRM_XEN_FRONTEND_CMA 31 - 32 30 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma); 33 31 34 32 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj); ··· 36 38 37 39 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, 38 40 struct vm_area_struct *vma); 39 - #endif 40 41 41 42 #endif /* __XEN_DRM_FRONT_GEM_H */
-79
drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 OR MIT 2 - 3 - /* 4 - * Xen para-virtual DRM device 5 - * 6 - * Copyright (C) 2016-2018 EPAM Systems Inc. 7 - * 8 - * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9 - */ 10 - 11 - #include <drm/drmP.h> 12 - #include <drm/drm_gem.h> 13 - #include <drm/drm_fb_cma_helper.h> 14 - #include <drm/drm_gem_cma_helper.h> 15 - 16 - #include "xen_drm_front.h" 17 - #include "xen_drm_front_gem.h" 18 - 19 - struct drm_gem_object * 20 - xen_drm_front_gem_import_sg_table(struct drm_device *dev, 21 - struct dma_buf_attachment *attach, 22 - struct sg_table *sgt) 23 - { 24 - struct xen_drm_front_drm_info *drm_info = dev->dev_private; 25 - struct drm_gem_object *gem_obj; 26 - struct drm_gem_cma_object *cma_obj; 27 - int ret; 28 - 29 - gem_obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); 30 - if (IS_ERR_OR_NULL(gem_obj)) 31 - return gem_obj; 32 - 33 - cma_obj = to_drm_gem_cma_obj(gem_obj); 34 - 35 - ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info, 36 - xen_drm_front_dbuf_to_cookie(gem_obj), 37 - 0, 0, 0, gem_obj->size, 38 - drm_gem_cma_prime_get_sg_table(gem_obj)); 39 - if (ret < 0) 40 - return ERR_PTR(ret); 41 - 42 - DRM_DEBUG("Imported CMA buffer of size %zu\n", gem_obj->size); 43 - 44 - return gem_obj; 45 - } 46 - 47 - struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj) 48 - { 49 - return drm_gem_cma_prime_get_sg_table(gem_obj); 50 - } 51 - 52 - struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, 53 - size_t size) 54 - { 55 - struct xen_drm_front_drm_info *drm_info = dev->dev_private; 56 - struct drm_gem_cma_object *cma_obj; 57 - 58 - if (drm_info->front_info->cfg.be_alloc) { 59 - /* This use-case is not yet supported and probably won't be */ 60 - DRM_ERROR("Backend allocated buffers and CMA helpers are not supported at the same time\n"); 61 - return ERR_PTR(-EINVAL); 62 - } 63 - 64 - cma_obj = drm_gem_cma_create(dev, size); 65 - if (IS_ERR_OR_NULL(cma_obj)) 66 - return ERR_CAST(cma_obj); 67 - 68 - return &cma_obj->base; 69 - } 70 - 71 - void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) 72 - { 73 - drm_gem_cma_free_object(gem_obj); 74 - } 75 - 76 - struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj) 77 - { 78 - return NULL; 79 - }
-22
drivers/gpu/drm/xen/xen_drm_front_shbuf.c
··· 89 89 } 90 90 kfree(buf->grefs); 91 91 kfree(buf->directory); 92 - if (buf->sgt) { 93 - sg_free_table(buf->sgt); 94 - kvfree(buf->pages); 95 - } 96 92 kfree(buf); 97 93 } 98 94 ··· 346 350 347 351 static int alloc_storage(struct xen_drm_front_shbuf *buf) 348 352 { 349 - if (buf->sgt) { 350 - buf->pages = kvmalloc_array(buf->num_pages, 351 - sizeof(struct page *), GFP_KERNEL); 352 - if (!buf->pages) 353 - return -ENOMEM; 354 - 355 - if (drm_prime_sg_to_page_addr_arrays(buf->sgt, buf->pages, 356 - NULL, buf->num_pages) < 0) 357 - return -EINVAL; 358 - } 359 - 360 353 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL); 361 354 if (!buf->grefs) 362 355 return -ENOMEM; ··· 381 396 struct xen_drm_front_shbuf *buf; 382 397 int ret; 383 398 384 - /* either pages or sgt, not both */ 385 - if (unlikely(cfg->pages && cfg->sgt)) { 386 - DRM_ERROR("Cannot handle buffer allocation with both pages and sg table provided\n"); 387 - return NULL; 388 - } 389 - 390 399 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 391 400 if (!buf) 392 401 return NULL; ··· 392 413 393 414 buf->xb_dev = cfg->xb_dev; 394 415 buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE); 395 - buf->sgt = cfg->sgt; 396 416 buf->pages = cfg->pages; 397 417 398 418 buf->ops->calc_num_grefs(buf);
-8
drivers/gpu/drm/xen/xen_drm_front_shbuf.h
··· 29 29 grant_ref_t *grefs; 30 30 unsigned char *directory; 31 31 32 - /* 33 - * there are 2 ways to provide backing storage for this shared buffer: 34 - * either pages or sgt. if buffer created from sgt then we own 35 - * the pages and must free those ourselves on closure 36 - */ 37 32 int num_pages; 38 33 struct page **pages; 39 - 40 - struct sg_table *sgt; 41 34 42 35 struct xenbus_device *xb_dev; 43 36 ··· 45 52 struct xenbus_device *xb_dev; 46 53 size_t size; 47 54 struct page **pages; 48 - struct sg_table *sgt; 49 55 bool be_alloc; 50 56 }; 51 57