Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()

Drivers, especially i915.ko, can fail during the initial migration of a
dma-buf for CPU access. However, the error code from the driver was not
being propagated back to ioctl and so userspace was blissfully ignorant
of the failure. Rendering corruption ensues.

Whilst fixing the ioctl to return the error code from
dma_buf_start_cpu_access(), also do the same for
dma_buf_end_cpu_access(). For most drivers, dma_buf_end_cpu_access()
cannot fail. i915.ko however, as most drivers would, wants to avoid being
uninterruptible (as would be required to guarrantee no failure when
flushing the buffer to the device). As userspace already has to handle
errors from the SYNC_IOCTL, take advantage of this to be able to restart
the syscall across signals.

This fixes a coherency issue for i915.ko as well as reducing the
uninterruptible hold upon its BKL, the struct_mutex.

Fixes commit c11e391da2a8fe973c3c2398452000bed505851e
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu Feb 11 20:04:51 2016 -0200

dma-buf: Add ioctls to allow userspace to flush

Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible
Testcase: igt/prime_mmap_coherency/ioctl-errors
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tiago Vignatti <tiago.vignatti@intel.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
CC: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: intel-gfx@lists.freedesktop.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk

authored by

Chris Wilson and committed by
Daniel Vetter
18b862dc b47ff7e6

+28 -25
+11 -6
drivers/dma-buf/dma-buf.c
··· 259 259 struct dma_buf *dmabuf; 260 260 struct dma_buf_sync sync; 261 261 enum dma_data_direction direction; 262 + int ret; 262 263 263 264 dmabuf = file->private_data; 264 265 ··· 286 285 } 287 286 288 287 if (sync.flags & DMA_BUF_SYNC_END) 289 - dma_buf_end_cpu_access(dmabuf, direction); 288 + ret = dma_buf_end_cpu_access(dmabuf, direction); 290 289 else 291 - dma_buf_begin_cpu_access(dmabuf, direction); 290 + ret = dma_buf_begin_cpu_access(dmabuf, direction); 292 291 293 - return 0; 292 + return ret; 294 293 default: 295 294 return -ENOTTY; 296 295 } ··· 614 613 * 615 614 * This call must always succeed. 616 615 */ 617 - void dma_buf_end_cpu_access(struct dma_buf *dmabuf, 618 - enum dma_data_direction direction) 616 + int dma_buf_end_cpu_access(struct dma_buf *dmabuf, 617 + enum dma_data_direction direction) 619 618 { 619 + int ret = 0; 620 + 620 621 WARN_ON(!dmabuf); 621 622 622 623 if (dmabuf->ops->end_cpu_access) 623 - dmabuf->ops->end_cpu_access(dmabuf, direction); 624 + ret = dmabuf->ops->end_cpu_access(dmabuf, direction); 625 + 626 + return ret; 624 627 } 625 628 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); 626 629
+5 -10
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 228 228 return ret; 229 229 } 230 230 231 - static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 231 + static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 232 232 { 233 233 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 234 234 struct drm_device *dev = obj->base.dev; 235 - struct drm_i915_private *dev_priv = to_i915(dev); 236 - bool was_interruptible; 237 235 int ret; 238 236 239 - mutex_lock(&dev->struct_mutex); 240 - was_interruptible = dev_priv->mm.interruptible; 241 - dev_priv->mm.interruptible = false; 237 + ret = i915_mutex_lock_interruptible(dev); 238 + if (ret) 239 + return ret; 242 240 243 241 ret = i915_gem_object_set_to_gtt_domain(obj, false); 244 - 245 - dev_priv->mm.interruptible = was_interruptible; 246 242 mutex_unlock(&dev->struct_mutex); 247 243 248 - if (unlikely(ret)) 249 - DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n"); 244 + return ret; 250 245 } 251 246 252 247 static const struct dma_buf_ops i915_dmabuf_ops = {
+3 -2
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
··· 93 93 return omap_gem_get_pages(obj, &pages, true); 94 94 } 95 95 96 - static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 97 - enum dma_data_direction dir) 96 + static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 97 + enum dma_data_direction dir) 98 98 { 99 99 struct drm_gem_object *obj = buffer->priv; 100 100 omap_gem_put_pages(obj); 101 + return 0; 101 102 } 102 103 103 104
+2 -2
drivers/gpu/drm/udl/udl_fb.c
··· 423 423 } 424 424 425 425 if (ufb->obj->base.import_attach) { 426 - dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, 427 - DMA_FROM_DEVICE); 426 + ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, 427 + DMA_FROM_DEVICE); 428 428 } 429 429 430 430 unlock:
+4 -2
drivers/staging/android/ion/ion.c
··· 1075 1075 return PTR_ERR_OR_ZERO(vaddr); 1076 1076 } 1077 1077 1078 - static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1079 - enum dma_data_direction direction) 1078 + static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1079 + enum dma_data_direction direction) 1080 1080 { 1081 1081 struct ion_buffer *buffer = dmabuf->priv; 1082 1082 1083 1083 mutex_lock(&buffer->lock); 1084 1084 ion_buffer_kmap_put(buffer); 1085 1085 mutex_unlock(&buffer->lock); 1086 + 1087 + return 0; 1086 1088 } 1087 1089 1088 1090 static struct dma_buf_ops dma_buf_ops = {
+3 -3
include/linux/dma-buf.h
··· 94 94 void (*release)(struct dma_buf *); 95 95 96 96 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 97 - void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 97 + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 98 98 void *(*kmap_atomic)(struct dma_buf *, unsigned long); 99 99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 100 100 void *(*kmap)(struct dma_buf *, unsigned long); ··· 224 224 enum dma_data_direction); 225 225 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 226 226 enum dma_data_direction dir); 227 - void dma_buf_end_cpu_access(struct dma_buf *dma_buf, 228 - enum dma_data_direction dir); 227 + int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 228 + enum dma_data_direction dir); 229 229 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 230 230 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 231 231 void *dma_buf_kmap(struct dma_buf *, unsigned long);