Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'topic/drm-misc-2016-11-10' of git://anongit.freedesktop.org/drm-intel into drm-next

- better atomic state debugging from Rob
- fence prep from gustavo
- sumits flushed out his backlog of pending dma-buf/fence patches from
various people
- drm_mm leak debugging plus trying to appease Kconfig (Chris)
- a few misc things all over

* tag 'topic/drm-misc-2016-11-10' of git://anongit.freedesktop.org/drm-intel: (35 commits)
drm: Make DRM_DEBUG_MM depend on STACKTRACE_SUPPORT
drm/i915: Restrict DRM_DEBUG_MM automatic selection
drm: Restrict stackdepot usage to builtin drm.ko
drm/msm: module param to dump state on error irq
drm/msm/mdp5: add atomic_print_state support
drm/atomic: add debugfs file to dump out atomic state
drm/atomic: add new drm_debug bit to dump atomic state
drm: add helpers to go from plane state to drm_rect
drm: add helper for printing to log or seq_file
drm: helper macros to print composite types
reservation: revert "wait only with non-zero timeout specified (v3)" v2
drm/ttm: fix ttm_bo_wait
dma-buf/fence: revert "don't wait when specified timeout is zero" (v2)
dma-buf/fence: make timeout handling in fence_default_wait consistent (v2)
drm/amdgpu: add the interface of waiting multiple fences (v4)
dma-buf: return index of the first signaled fence (v2)
MAINTAINERS: update Sync File Framework files
dma-buf/sw_sync: put fence reference from the fence creation
dma-buf/sw_sync: mark sync_timeline_create() static
drm: Add stackdepot include for DRM_DEBUG_MM
...

+969 -171
+17
Documentation/gpu/drm-internals.rst
··· 350 350 .. kernel-doc:: drivers/gpu/drm/drm_ioctl.c 351 351 :export: 352 352 353 + 354 + Misc Utilities 355 + ============== 356 + 357 + Printer 358 + ------- 359 + 360 + .. kernel-doc:: include/drm/drm_print.h 361 + :doc: print 362 + 363 + .. kernel-doc:: include/drm/drm_print.h 364 + :internal: 365 + 366 + .. kernel-doc:: include/drm/drm_print.h 367 + :export: 368 + 369 + 353 370 Legacy Support Code 354 371 =================== 355 372
+3 -1
MAINTAINERS
··· 3919 3919 S: Maintained 3920 3920 L: linux-media@vger.kernel.org 3921 3921 L: dri-devel@lists.freedesktop.org 3922 - F: drivers/dma-buf/sync_file.c 3922 + F: drivers/dma-buf/sync_* 3923 + F: drivers/dma-buf/sw_sync.c 3923 3924 F: include/linux/sync_file.h 3925 + F: include/uapi/linux/sync_file.h 3924 3926 F: Documentation/sync_file.txt 3925 3927 T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git 3926 3928
+21 -11
drivers/dma-buf/dma-fence.c
··· 161 161 if (WARN_ON(timeout < 0)) 162 162 return -EINVAL; 163 163 164 - if (timeout == 0) 165 - return dma_fence_is_signaled(fence); 166 - 167 164 trace_dma_fence_wait_start(fence); 168 165 ret = fence->ops->wait(fence, intr, timeout); 169 166 trace_dma_fence_wait_end(fence); ··· 336 339 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 337 340 * 338 341 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the 339 - * remaining timeout in jiffies on success. 342 + * remaining timeout in jiffies on success. If timeout is zero the value one is 343 + * returned if the fence is already signaled for consistency with other 344 + * functions taking a jiffies timeout. 340 345 */ 341 346 signed long 342 347 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) 343 348 { 344 349 struct default_wait_cb cb; 345 350 unsigned long flags; 346 - signed long ret = timeout; 351 + signed long ret = timeout ? timeout : 1; 347 352 bool was_set; 348 353 349 354 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 350 - return timeout; 355 + return ret; 351 356 352 357 spin_lock_irqsave(fence->lock, flags); 353 358 ··· 402 403 EXPORT_SYMBOL(dma_fence_default_wait); 403 404 404 405 static bool 405 - dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count) 406 + dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, 407 + uint32_t *idx) 406 408 { 407 409 int i; 408 410 409 411 for (i = 0; i < count; ++i) { 410 412 struct dma_fence *fence = fences[i]; 411 - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 413 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 414 + if (idx) 415 + *idx = i; 412 416 return true; 417 + } 413 418 } 414 419 return false; 415 420 } ··· 425 422 * @count: [in] number of fences to wait on 426 423 * @intr: [in] if true, do an interruptible wait 427 424 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 425 + * @idx: [out] the first signaled fence index, meaningful only on 426 + * positive return 428 427 * 429 428 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if 430 429 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies ··· 438 433 */ 439 434 signed long 440 435 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, 441 - bool intr, signed long timeout) 436 + bool intr, signed long timeout, uint32_t *idx) 442 437 { 443 438 struct default_wait_cb *cb; 444 439 signed long ret = timeout; ··· 449 444 450 445 if (timeout == 0) { 451 446 for (i = 0; i < count; ++i) 452 - if (dma_fence_is_signaled(fences[i])) 447 + if (dma_fence_is_signaled(fences[i])) { 448 + if (idx) 449 + *idx = i; 453 450 return 1; 451 + } 454 452 455 453 return 0; 456 454 } ··· 476 468 if (dma_fence_add_callback(fence, &cb[i].base, 477 469 dma_fence_default_wait_cb)) { 478 470 /* This fence is already signaled */ 471 + if (idx) 472 + *idx = i; 479 473 goto fence_rm_cb; 480 474 } 481 475 } ··· 488 478 else 489 479 set_current_state(TASK_UNINTERRUPTIBLE); 490 480 491 - if (dma_fence_test_signaled_any(fences, count)) 481 + if (dma_fence_test_signaled_any(fences, count, idx)) 492 482 break; 493 483 494 484 ret = schedule_timeout(ret);
+1 -4
drivers/dma-buf/reservation.c
··· 370 370 { 371 371 struct dma_fence *fence; 372 372 unsigned seq, shared_count, i = 0; 373 - long ret = timeout; 374 - 375 - if (!timeout) 376 - return reservation_object_test_signaled_rcu(obj, wait_all); 373 + long ret = timeout ? timeout : 1; 377 374 378 375 retry: 379 376 fence = NULL;
+2 -2
drivers/dma-buf/sw_sync.c
··· 84 84 * Creates a new sync_timeline. Returns the sync_timeline object or NULL in 85 85 * case of error. 86 86 */ 87 - struct sync_timeline *sync_timeline_create(const char *name) 87 + static struct sync_timeline *sync_timeline_create(const char *name) 88 88 { 89 89 struct sync_timeline *obj; 90 90 ··· 316 316 } 317 317 318 318 sync_file = sync_file_create(&pt->base); 319 + dma_fence_put(&pt->base); 319 320 if (!sync_file) { 320 - dma_fence_put(&pt->base); 321 321 err = -ENOMEM; 322 322 goto err; 323 323 }
+14
drivers/gpu/drm/Kconfig
··· 33 33 read and write values to arbitrary DPCD registers on the DP aux 34 34 channel. 35 35 36 + config DRM_DEBUG_MM 37 + bool "Insert extra checks and debug info into the DRM range managers" 38 + default n 39 + depends on DRM=y 40 + depends on STACKTRACE_SUPPORT 41 + select STACKDEPOT 42 + help 43 + Enable allocation tracking of memory manager and leak detection on 44 + shutdown. 45 + 46 + Recommended for driver developers only. 47 + 48 + If in doubt, say "N". 49 + 36 50 config DRM_KMS_HELPER 37 51 tristate 38 52 depends on DRM
+1 -1
drivers/gpu/drm/Makefile
··· 15 15 drm_modeset_lock.o drm_atomic.o drm_bridge.o \ 16 16 drm_framebuffer.o drm_connector.o drm_blend.o \ 17 17 drm_encoder.o drm_mode_object.o drm_property.o \ 18 - drm_plane.o drm_color_mgmt.o 18 + drm_plane.o drm_color_mgmt.o drm_print.o 19 19 20 20 drm-$(CONFIG_COMPAT) += drm_ioc32.o 21 21 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1212 1212 struct drm_file *filp); 1213 1213 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1214 1214 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1215 + int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1216 + struct drm_file *filp); 1215 1217 1216 1218 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 1217 1219 struct drm_file *filp);
+174
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1141 1141 } 1142 1142 1143 1143 /** 1144 + * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1145 + * 1146 + * @adev: amdgpu device 1147 + * @filp: file private 1148 + * @user: drm_amdgpu_fence copied from user space 1149 + */ 1150 + static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1151 + struct drm_file *filp, 1152 + struct drm_amdgpu_fence *user) 1153 + { 1154 + struct amdgpu_ring *ring; 1155 + struct amdgpu_ctx *ctx; 1156 + struct dma_fence *fence; 1157 + int r; 1158 + 1159 + r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance, 1160 + user->ring, &ring); 1161 + if (r) 1162 + return ERR_PTR(r); 1163 + 1164 + ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1165 + if (ctx == NULL) 1166 + return ERR_PTR(-EINVAL); 1167 + 1168 + fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); 1169 + amdgpu_ctx_put(ctx); 1170 + 1171 + return fence; 1172 + } 1173 + 1174 + /** 1175 + * amdgpu_cs_wait_all_fence - wait on all fences to signal 1176 + * 1177 + * @adev: amdgpu device 1178 + * @filp: file private 1179 + * @wait: wait parameters 1180 + * @fences: array of drm_amdgpu_fence 1181 + */ 1182 + static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1183 + struct drm_file *filp, 1184 + union drm_amdgpu_wait_fences *wait, 1185 + struct drm_amdgpu_fence *fences) 1186 + { 1187 + uint32_t fence_count = wait->in.fence_count; 1188 + unsigned int i; 1189 + long r = 1; 1190 + 1191 + for (i = 0; i < fence_count; i++) { 1192 + struct dma_fence *fence; 1193 + unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1194 + 1195 + fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1196 + if (IS_ERR(fence)) 1197 + return PTR_ERR(fence); 1198 + else if (!fence) 1199 + continue; 1200 + 1201 + r = dma_fence_wait_timeout(fence, true, timeout); 1202 + if (r < 0) 1203 + return r; 1204 + 1205 + if (r == 0) 1206 + break; 1207 + } 1208 + 1209 + memset(wait, 0, sizeof(*wait)); 1210 + wait->out.status = (r > 0); 1211 + 1212 + return 0; 1213 + } 1214 + 1215 + /** 1216 + * amdgpu_cs_wait_any_fence - wait on any fence to signal 1217 + * 1218 + * @adev: amdgpu device 1219 + * @filp: file private 1220 + * @wait: wait parameters 1221 + * @fences: array of drm_amdgpu_fence 1222 + */ 1223 + static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1224 + struct drm_file *filp, 1225 + union drm_amdgpu_wait_fences *wait, 1226 + struct drm_amdgpu_fence *fences) 1227 + { 1228 + unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1229 + uint32_t fence_count = wait->in.fence_count; 1230 + uint32_t first = ~0; 1231 + struct dma_fence **array; 1232 + unsigned int i; 1233 + long r; 1234 + 1235 + /* Prepare the fence array */ 1236 + array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1237 + 1238 + if (array == NULL) 1239 + return -ENOMEM; 1240 + 1241 + for (i = 0; i < fence_count; i++) { 1242 + struct dma_fence *fence; 1243 + 1244 + fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1245 + if (IS_ERR(fence)) { 1246 + r = PTR_ERR(fence); 1247 + goto err_free_fence_array; 1248 + } else if (fence) { 1249 + array[i] = fence; 1250 + } else { /* NULL, the fence has been already signaled */ 1251 + r = 1; 1252 + goto out; 1253 + } 1254 + } 1255 + 1256 + r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1257 + &first); 1258 + if (r < 0) 1259 + goto err_free_fence_array; 1260 + 1261 + out: 1262 + memset(wait, 0, sizeof(*wait)); 1263 + wait->out.status = (r > 0); 1264 + wait->out.first_signaled = first; 1265 + /* set return value 0 to indicate success */ 1266 + r = 0; 1267 + 1268 + err_free_fence_array: 1269 + for (i = 0; i < fence_count; i++) 1270 + dma_fence_put(array[i]); 1271 + kfree(array); 1272 + 1273 + return r; 1274 + } 1275 + 1276 + /** 1277 + * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1278 + * 1279 + * @dev: drm device 1280 + * @data: data from userspace 1281 + * @filp: file private 1282 + */ 1283 + int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1284 + struct drm_file *filp) 1285 + { 1286 + struct amdgpu_device *adev = dev->dev_private; 1287 + union drm_amdgpu_wait_fences *wait = data; 1288 + uint32_t fence_count = wait->in.fence_count; 1289 + struct drm_amdgpu_fence *fences_user; 1290 + struct drm_amdgpu_fence *fences; 1291 + int r; 1292 + 1293 + /* Get the fences from userspace */ 1294 + fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1295 + GFP_KERNEL); 1296 + if (fences == NULL) 1297 + return -ENOMEM; 1298 + 1299 + fences_user = (void __user *)(unsigned long)(wait->in.fences); 1300 + if (copy_from_user(fences, fences_user, 1301 + sizeof(struct drm_amdgpu_fence) * fence_count)) { 1302 + r = -EFAULT; 1303 + goto err_free_fences; 1304 + } 1305 + 1306 + if (wait->in.wait_all) 1307 + r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1308 + else 1309 + r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1310 + 1311 + err_free_fences: 1312 + kfree(fences); 1313 + 1314 + return r; 1315 + } 1316 + 1317 + /** 1144 1318 * amdgpu_cs_find_bo_va - find bo_va for VM address 1145 1319 * 1146 1320 * @parser: command submission parser context
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 823 823 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 824 824 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 825 825 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 826 + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 826 827 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 827 828 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 828 829 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
··· 361 361 if (count) { 362 362 spin_unlock(&sa_manager->wq.lock); 363 363 t = dma_fence_wait_any_timeout(fences, count, false, 364 - MAX_SCHEDULE_TIMEOUT); 364 + MAX_SCHEDULE_TIMEOUT, 365 + NULL); 365 366 for (i = 0; i < count; ++i) 366 367 dma_fence_put(fences[i]); 367 368
-2
drivers/gpu/drm/arc/arcpgu_drv.c
··· 65 65 .open = drm_open, 66 66 .release = drm_release, 67 67 .unlocked_ioctl = drm_ioctl, 68 - #ifdef CONFIG_COMPAT 69 68 .compat_ioctl = drm_compat_ioctl, 70 - #endif 71 69 .poll = drm_poll, 72 70 .read = drm_read, 73 71 .llseek = no_llseek,
-2
drivers/gpu/drm/arm/hdlcd_drv.c
··· 268 268 .open = drm_open, 269 269 .release = drm_release, 270 270 .unlocked_ioctl = drm_ioctl, 271 - #ifdef CONFIG_COMPAT 272 271 .compat_ioctl = drm_compat_ioctl, 273 - #endif 274 272 .poll = drm_poll, 275 273 .read = drm_read, 276 274 .llseek = noop_llseek,
-2
drivers/gpu/drm/arm/malidp_drv.c
··· 197 197 .open = drm_open, 198 198 .release = drm_release, 199 199 .unlocked_ioctl = drm_ioctl, 200 - #ifdef CONFIG_COMPAT 201 200 .compat_ioctl = drm_compat_ioctl, 202 - #endif 203 201 .poll = drm_poll, 204 202 .read = drm_read, 205 203 .llseek = noop_llseek,
-2
drivers/gpu/drm/ast/ast_drv.c
··· 188 188 .unlocked_ioctl = drm_ioctl, 189 189 .mmap = ast_mmap, 190 190 .poll = drm_poll, 191 - #ifdef CONFIG_COMPAT 192 191 .compat_ioctl = drm_compat_ioctl, 193 - #endif 194 192 .read = drm_read, 195 193 }; 196 194
-2
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
··· 749 749 .open = drm_open, 750 750 .release = drm_release, 751 751 .unlocked_ioctl = drm_ioctl, 752 - #ifdef CONFIG_COMPAT 753 752 .compat_ioctl = drm_compat_ioctl, 754 - #endif 755 753 .poll = drm_poll, 756 754 .read = drm_read, 757 755 .llseek = no_llseek,
-2
drivers/gpu/drm/bochs/bochs_drv.c
··· 70 70 .open = drm_open, 71 71 .release = drm_release, 72 72 .unlocked_ioctl = drm_ioctl, 73 - #ifdef CONFIG_COMPAT 74 73 .compat_ioctl = drm_compat_ioctl, 75 - #endif 76 74 .poll = drm_poll, 77 75 .read = drm_read, 78 76 .llseek = no_llseek,
-2
drivers/gpu/drm/cirrus/cirrus_drv.c
··· 126 126 .unlocked_ioctl = drm_ioctl, 127 127 .mmap = cirrus_mmap, 128 128 .poll = drm_poll, 129 - #ifdef CONFIG_COMPAT 130 129 .compat_ioctl = drm_compat_ioctl, 131 - #endif 132 130 }; 133 131 static struct drm_driver driver = { 134 132 .driver_features = DRIVER_MODESET | DRIVER_GEM,
+186
drivers/gpu/drm/drm_atomic.c
··· 30 30 #include <drm/drm_atomic.h> 31 31 #include <drm/drm_mode.h> 32 32 #include <drm/drm_plane_helper.h> 33 + #include <drm/drm_print.h> 33 34 34 35 #include "drm_crtc_internal.h" 35 36 ··· 606 605 return 0; 607 606 } 608 607 608 + static void drm_atomic_crtc_print_state(struct drm_printer *p, 609 + const struct drm_crtc_state *state) 610 + { 611 + struct drm_crtc *crtc = state->crtc; 612 + 613 + drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 614 + drm_printf(p, "\tenable=%d\n", state->enable); 615 + drm_printf(p, "\tactive=%d\n", state->active); 616 + drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 617 + drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 618 + drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 619 + drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 620 + drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 621 + drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 622 + drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 623 + drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 624 + drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 625 + 626 + if (crtc->funcs->atomic_print_state) 627 + crtc->funcs->atomic_print_state(p, state); 628 + } 629 + 609 630 /** 610 631 * drm_atomic_get_plane_state - get plane state 611 632 * @state: global atomic state object ··· 904 881 return 0; 905 882 } 906 883 884 + static void drm_atomic_plane_print_state(struct drm_printer *p, 885 + const struct drm_plane_state *state) 886 + { 887 + struct drm_plane *plane = state->plane; 888 + struct drm_rect src = drm_plane_state_src(state); 889 + struct drm_rect dest = drm_plane_state_dest(state); 890 + 891 + drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 892 + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 893 + drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 894 + if (state->fb) { 895 + struct drm_framebuffer *fb = state->fb; 896 + int i, n = drm_format_num_planes(fb->pixel_format); 897 + 898 + drm_printf(p, "\t\tformat=%s\n", 899 + drm_get_format_name(fb->pixel_format)); 900 + drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height); 901 + drm_printf(p, "\t\tlayers:\n"); 902 + for (i = 0; i < n; i++) { 903 + drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]); 904 + drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]); 905 + drm_printf(p, "\t\t\tmodifier[%d]=0x%llx\n", i, fb->modifier[i]); 906 + } 907 + } 908 + drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 909 + drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 910 + drm_printf(p, "\trotation=%x\n", state->rotation); 911 + 912 + if (plane->funcs->atomic_print_state) 913 + plane->funcs->atomic_print_state(p, state); 914 + } 915 + 907 916 /** 908 917 * drm_atomic_get_connector_state - get connector state 909 918 * @state: global atomic state object ··· 1050 995 } 1051 996 } 1052 997 EXPORT_SYMBOL(drm_atomic_connector_set_property); 998 + 999 + static void drm_atomic_connector_print_state(struct drm_printer *p, 1000 + const struct drm_connector_state *state) 1001 + { 1002 + struct drm_connector *connector = state->connector; 1003 + 1004 + drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1005 + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1006 + 1007 + if (connector->funcs->atomic_print_state) 1008 + connector->funcs->atomic_print_state(p, state); 1009 + } 1053 1010 1054 1011 /** 1055 1012 * drm_atomic_connector_get_property - get property value from connector state ··· 1215 1148 plane_state); 1216 1149 } 1217 1150 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1151 + 1152 + /** 1153 + * drm_atomic_set_fence_for_plane - set fence for plane 1154 + * @plane_state: atomic state object for the plane 1155 + * @fence: dma_fence to use for the plane 1156 + * 1157 + * Helper to setup the plane_state fence in case it is not set yet. 1158 + * By using this drivers doesn't need to worry if the user choose 1159 + * implicit or explicit fencing. 1160 + * 1161 + * This function will not set the fence to the state if it was set 1162 + * via explicit fencing interfaces on the atomic ioctl. It will 1163 + * all drope the reference to the fence as we not storing it 1164 + * anywhere. 1165 + * 1166 + * Otherwise, if plane_state->fence is not set this function we 1167 + * just set it with the received implict fence. 1168 + */ 1169 + void 1170 + drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1171 + struct dma_fence *fence) 1172 + { 1173 + if (plane_state->fence) { 1174 + dma_fence_put(fence); 1175 + return; 1176 + } 1177 + 1178 + plane_state->fence = fence; 1179 + } 1180 + EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1218 1181 1219 1182 /** 1220 1183 * drm_atomic_set_crtc_for_connector - set crtc for connector ··· 1557 1460 } 1558 1461 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1559 1462 1463 + static void drm_atomic_print_state(const struct drm_atomic_state *state) 1464 + { 1465 + struct drm_printer p = drm_info_printer(state->dev->dev); 1466 + struct drm_plane *plane; 1467 + struct drm_plane_state *plane_state; 1468 + struct drm_crtc *crtc; 1469 + struct drm_crtc_state *crtc_state; 1470 + struct drm_connector *connector; 1471 + struct drm_connector_state *connector_state; 1472 + int i; 1473 + 1474 + DRM_DEBUG_ATOMIC("checking %p\n", state); 1475 + 1476 + for_each_plane_in_state(state, plane, plane_state, i) 1477 + drm_atomic_plane_print_state(&p, plane_state); 1478 + 1479 + for_each_crtc_in_state(state, crtc, crtc_state, i) 1480 + drm_atomic_crtc_print_state(&p, crtc_state); 1481 + 1482 + for_each_connector_in_state(state, connector, connector_state, i) 1483 + drm_atomic_connector_print_state(&p, connector_state); 1484 + } 1485 + 1486 + /** 1487 + * drm_state_dump - dump entire device atomic state 1488 + * @dev: the drm device 1489 + * @p: where to print the state to 1490 + * 1491 + * Just for debugging. Drivers might want an option to dump state 1492 + * to dmesg in case of error irq's. (Hint, you probably want to 1493 + * ratelimit this!) 1494 + * 1495 + * The caller must drm_modeset_lock_all(), or if this is called 1496 + * from error irq handler, it should not be enabled by default. 1497 + * (Ie. if you are debugging errors you might not care that this 1498 + * is racey. But calling this without all modeset locks held is 1499 + * not inherently safe.) 1500 + */ 1501 + void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1502 + { 1503 + struct drm_mode_config *config = &dev->mode_config; 1504 + struct drm_plane *plane; 1505 + struct drm_crtc *crtc; 1506 + struct drm_connector *connector; 1507 + 1508 + if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 1509 + return; 1510 + 1511 + list_for_each_entry(plane, &config->plane_list, head) 1512 + drm_atomic_plane_print_state(p, plane->state); 1513 + 1514 + list_for_each_entry(crtc, &config->crtc_list, head) 1515 + drm_atomic_crtc_print_state(p, crtc->state); 1516 + 1517 + list_for_each_entry(connector, &config->connector_list, head) 1518 + drm_atomic_connector_print_state(p, connector->state); 1519 + } 1520 + EXPORT_SYMBOL(drm_state_dump); 1521 + 1522 + #ifdef CONFIG_DEBUG_FS 1523 + static int drm_state_info(struct seq_file *m, void *data) 1524 + { 1525 + struct drm_info_node *node = (struct drm_info_node *) m->private; 1526 + struct drm_device *dev = node->minor->dev; 1527 + struct drm_printer p = drm_seq_file_printer(m); 1528 + 1529 + drm_modeset_lock_all(dev); 1530 + drm_state_dump(dev, &p); 1531 + drm_modeset_unlock_all(dev); 1532 + 1533 + return 0; 1534 + } 1535 + 1536 + /* any use in debugfs files to dump individual planes/crtc/etc? */ 1537 + static const struct drm_info_list drm_atomic_debugfs_list[] = { 1538 + {"state", drm_state_info, 0}, 1539 + }; 1540 + 1541 + int drm_atomic_debugfs_init(struct drm_minor *minor) 1542 + { 1543 + return drm_debugfs_create_files(drm_atomic_debugfs_list, 1544 + ARRAY_SIZE(drm_atomic_debugfs_list), 1545 + minor->debugfs_root, minor); 1546 + } 1547 + #endif 1548 + 1560 1549 /* 1561 1550 * The big monstor ioctl 1562 1551 */ ··· 1932 1749 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 1933 1750 ret = drm_atomic_nonblocking_commit(state); 1934 1751 } else { 1752 + if (unlikely(drm_debug & DRM_UT_STATE)) 1753 + drm_atomic_print_state(state); 1754 + 1935 1755 ret = drm_atomic_commit(state); 1936 1756 } 1937 1757
+2 -2
drivers/gpu/drm/drm_crtc.c
··· 229 229 230 230 crtc->primary = primary; 231 231 crtc->cursor = cursor; 232 - if (primary) 232 + if (primary && !primary->possible_crtcs) 233 233 primary->possible_crtcs = 1 << drm_crtc_index(crtc); 234 - if (cursor) 234 + if (cursor && !cursor->possible_crtcs) 235 235 cursor->possible_crtcs = 1 << drm_crtc_index(crtc); 236 236 237 237 ret = drm_crtc_crc_init(crtc);
+9
drivers/gpu/drm/drm_debugfs.c
··· 36 36 #include <linux/export.h> 37 37 #include <drm/drmP.h> 38 38 #include <drm/drm_edid.h> 39 + #include <drm/drm_atomic.h> 39 40 #include "drm_internal.h" 40 41 41 42 #if defined(CONFIG_DEBUG_FS) ··· 162 161 minor->debugfs_root = NULL; 163 162 DRM_ERROR("Failed to create core drm debugfs files\n"); 164 163 return ret; 164 + } 165 + 166 + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { 167 + ret = drm_atomic_debugfs_init(minor); 168 + if (ret) { 169 + DRM_ERROR("Failed to create atomic debugfs files\n"); 170 + return ret; 171 + } 165 172 } 166 173 167 174 if (dev->driver->debugfs_init) {
+2 -2
drivers/gpu/drm/drm_edid.c
··· 957 957 798, 858, 0, 480, 489, 495, 525, 0, 958 958 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 959 959 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, 960 - /* 58 - 720(1440)x480i@240 */ 960 + /* 58 - 720(1440)x480i@240Hz */ 961 961 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739, 962 962 801, 858, 0, 480, 488, 494, 525, 0, 963 963 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 964 964 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), 965 965 .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, }, 966 - /* 59 - 720(1440)x480i@240 */ 966 + /* 59 - 720(1440)x480i@240Hz */ 967 967 { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739, 968 968 801, 858, 0, 480, 488, 494, 525, 0, 969 969 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+6 -7
drivers/gpu/drm/drm_fops.c
··· 51 51 * Drivers must define the file operations structure that forms the DRM 52 52 * userspace API entry point, even though most of those operations are 53 53 * implemented in the DRM core. The mandatory functions are drm_open(), 54 - * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled. 55 - * Drivers which implement private ioctls that require 32/64 bit compatibility 56 - * support must provided their onw .compat_ioctl() handler that processes 57 - * private ioctls and calls drm_compat_ioctl() for core ioctls. 54 + * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled 55 + * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which 56 + * implement private ioctls that require 32/64 bit compatibility support must 57 + * provide their own .compat_ioctl() handler that processes private ioctls and 58 + * calls drm_compat_ioctl() for core ioctls. 58 59 * 59 60 * In addition drm_read() and drm_poll() provide support for DRM events. DRM 60 61 * events are a generic and extensible means to send asynchronous events to ··· 76 75 * .open = drm_open, 77 76 * .release = drm_release, 78 77 * .unlocked_ioctl = drm_ioctl, 79 - * #ifdef CONFIG_COMPAT 80 - * .compat_ioctl = drm_compat_ioctl, 81 - * #endif 78 + * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n 82 79 * .poll = drm_poll, 83 80 * .read = drm_read, 84 81 * .llseek = no_llseek,
+5
drivers/gpu/drm/drm_framebuffer.c
··· 673 673 * those used for fbdev. Note that the caller must hold a reference of it's own, 674 674 * i.e. the object may not be destroyed through this call (since it'll lead to a 675 675 * locking inversion). 676 + * 677 + * NOTE: This function is deprecated. For driver-private framebuffers it is not 678 + * recommended to embed a framebuffer struct info fbdev struct, instead, a 679 + * framebuffer pointer is preferred and drm_framebuffer_unreference() should be 680 + * called when the framebuffer is to be cleaned up. 676 681 */ 677 682 void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) 678 683 {
+73 -3
drivers/gpu/drm/drm_mm.c
··· 104 104 u64 end, 105 105 enum drm_mm_search_flags flags); 106 106 107 + #ifdef CONFIG_DRM_DEBUG_MM 108 + #include <linux/stackdepot.h> 109 + 110 + #define STACKDEPTH 32 111 + #define BUFSZ 4096 112 + 113 + static noinline void save_stack(struct drm_mm_node *node) 114 + { 115 + unsigned long entries[STACKDEPTH]; 116 + struct stack_trace trace = { 117 + .entries = entries, 118 + .max_entries = STACKDEPTH, 119 + .skip = 1 120 + }; 121 + 122 + save_stack_trace(&trace); 123 + if (trace.nr_entries != 0 && 124 + trace.entries[trace.nr_entries-1] == ULONG_MAX) 125 + trace.nr_entries--; 126 + 127 + /* May be called under spinlock, so avoid sleeping */ 128 + node->stack = depot_save_stack(&trace, GFP_NOWAIT); 129 + } 130 + 131 + static void show_leaks(struct drm_mm *mm) 132 + { 133 + struct drm_mm_node *node; 134 + unsigned long entries[STACKDEPTH]; 135 + char *buf; 136 + 137 + buf = kmalloc(BUFSZ, GFP_KERNEL); 138 + if (!buf) 139 + return; 140 + 141 + list_for_each_entry(node, &mm->head_node.node_list, node_list) { 142 + struct stack_trace trace = { 143 + .entries = entries, 144 + .max_entries = STACKDEPTH 145 + }; 146 + 147 + if (!node->stack) { 148 + DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", 149 + node->start, node->size); 150 + continue; 151 + } 152 + 153 + depot_fetch_stack(node->stack, &trace); 154 + snprint_stack_trace(buf, BUFSZ, &trace, 0); 155 + DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", 156 + node->start, node->size, buf); 157 + } 158 + 159 + kfree(buf); 160 + } 161 + 162 + #undef STACKDEPTH 163 + #undef BUFSZ 164 + #else 165 + static void save_stack(struct drm_mm_node *node) { } 166 + static void show_leaks(struct drm_mm *mm) { } 167 + #endif 168 + 107 169 #define START(node) ((node)->start) 108 170 #define LAST(node) ((node)->start + (node)->size - 1) 109 171 ··· 290 228 list_add(&node->hole_stack, &mm->hole_stack); 291 229 node->hole_follows = 1; 292 230 } 231 + 232 + save_stack(node); 293 233 } 294 234 295 235 /** ··· 356 292 list_add(&node->hole_stack, &mm->hole_stack); 357 293 node->hole_follows = 1; 358 294 } 295 + 296 + save_stack(node); 359 297 360 298 return 0; 361 299 } ··· 463 397 list_add(&node->hole_stack, &mm->hole_stack); 464 398 node->hole_follows = 1; 465 399 } 400 + 401 + save_stack(node); 466 402 } 467 403 468 404 /** ··· 929 861 * Note that it is a bug to call this function on an allocator which is not 930 862 * clean. 931 863 */ 932 - void drm_mm_takedown(struct drm_mm * mm) 864 + void drm_mm_takedown(struct drm_mm *mm) 933 865 { 934 - WARN(!list_empty(&mm->head_node.node_list), 935 - "Memory manager not clean during takedown.\n"); 866 + if (WARN(!list_empty(&mm->head_node.node_list), 867 + "Memory manager not clean during takedown.\n")) 868 + show_leaks(mm); 869 + 936 870 } 937 871 EXPORT_SYMBOL(drm_mm_takedown); 938 872
+1 -7
drivers/gpu/drm/drm_modes.c
··· 49 49 */ 50 50 void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) 51 51 { 52 - DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " 53 - "0x%x 0x%x\n", 54 - mode->base.id, mode->name, mode->vrefresh, mode->clock, 55 - mode->hdisplay, mode->hsync_start, 56 - mode->hsync_end, mode->htotal, 57 - mode->vdisplay, mode->vsync_start, 58 - mode->vsync_end, mode->vtotal, mode->type, mode->flags); 52 + DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 59 53 } 60 54 EXPORT_SYMBOL(drm_mode_debug_printmodeline); 61 55
+2 -9
drivers/gpu/drm/drm_plane_helper.c
··· 130 130 unsigned int rotation = state->rotation; 131 131 int hscale, vscale; 132 132 133 - src->x1 = state->src_x; 134 - src->y1 = state->src_y; 135 - src->x2 = state->src_x + state->src_w; 136 - src->y2 = state->src_y + state->src_h; 137 - 138 - dst->x1 = state->crtc_x; 139 - dst->y1 = state->crtc_y; 140 - dst->x2 = state->crtc_x + state->crtc_w; 141 - dst->y2 = state->crtc_y + state->crtc_h; 133 + *src = drm_plane_state_src(state); 134 + *dst = drm_plane_state_dest(state); 142 135 143 136 if (!fb) { 144 137 state->visible = false;
+54
drivers/gpu/drm/drm_print.c
··· 1 + /* 2 + * Copyright (C) 2016 Red Hat 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: 23 + * Rob Clark <robdclark@gmail.com> 24 + */ 25 + 26 + #include <stdarg.h> 27 + #include <linux/seq_file.h> 28 + #include <drm/drmP.h> 29 + #include <drm/drm_print.h> 30 + 31 + void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf) 32 + { 33 + seq_printf(p->arg, "%pV", vaf); 34 + } 35 + EXPORT_SYMBOL(__drm_printfn_seq_file); 36 + 37 + void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf) 38 + { 39 + dev_printk(KERN_INFO, p->arg, "[" DRM_NAME "] %pV", vaf); 40 + } 41 + EXPORT_SYMBOL(__drm_printfn_info); 42 + 43 + void drm_printf(struct drm_printer *p, const char *f, ...) 44 + { 45 + struct va_format vaf; 46 + va_list args; 47 + 48 + va_start(args, f); 49 + vaf.fmt = f; 50 + vaf.va = &args; 51 + p->printfn(p, &vaf); 52 + va_end(args); 53 + } 54 + EXPORT_SYMBOL(drm_printf);
+2 -9
drivers/gpu/drm/drm_rect.c
··· 281 281 */ 282 282 void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point) 283 283 { 284 - int w = drm_rect_width(r); 285 - int h = drm_rect_height(r); 286 - 287 284 if (fixed_point) 288 - DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix, 289 - w >> 16, ((w & 0xffff) * 15625) >> 10, 290 - h >> 16, ((h & 0xffff) * 15625) >> 10, 291 - r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10, 292 - r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10); 285 + DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r)); 293 286 else 294 - DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1); 287 + DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r)); 295 288 } 296 289 EXPORT_SYMBOL(drm_rect_debug_print); 297 290
-2
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 479 479 .open = drm_open, 480 480 .release = drm_release, 481 481 .unlocked_ioctl = drm_ioctl, 482 - #ifdef CONFIG_COMPAT 483 482 .compat_ioctl = drm_compat_ioctl, 484 - #endif 485 483 .poll = drm_poll, 486 484 .read = drm_read, 487 485 .llseek = no_llseek,
-2
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 366 366 .poll = drm_poll, 367 367 .read = drm_read, 368 368 .unlocked_ioctl = drm_ioctl, 369 - #ifdef CONFIG_COMPAT 370 369 .compat_ioctl = drm_compat_ioctl, 371 - #endif 372 370 .release = drm_release, 373 371 }; 374 372
-2
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
··· 180 180 .open = drm_open, 181 181 .release = drm_release, 182 182 .unlocked_ioctl = drm_ioctl, 183 - #ifdef CONFIG_COMPAT 184 183 .compat_ioctl = drm_compat_ioctl, 185 - #endif 186 184 .poll = drm_poll, 187 185 .read = drm_read, 188 186 .llseek = no_llseek,
+1 -1
drivers/gpu/drm/gma500/gtt.c
··· 131 131 * page table entries with the dummy page. This is protected via the gtt 132 132 * mutex which the caller must hold. 133 133 */ 134 - void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 134 + static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 135 135 { 136 136 struct drm_psb_private *dev_priv = dev->dev_private; 137 137 u32 __iomem *gtt_slot;
+1
drivers/gpu/drm/gma500/psb_drv.c
··· 473 473 .open = drm_open, 474 474 .release = drm_release, 475 475 .unlocked_ioctl = psb_unlocked_ioctl, 476 + .compat_ioctl = drm_compat_ioctl, 476 477 .mmap = drm_gem_mmap, 477 478 .poll = drm_poll, 478 479 .read = drm_read,
-4
drivers/gpu/drm/gma500/psb_drv.h
··· 753 753 extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, 754 754 uint32_t handle, uint64_t *offset); 755 755 extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 756 - extern int psb_gem_create_ioctl(struct drm_device *dev, void *data, 757 - struct drm_file *file); 758 - extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data, 759 - struct drm_file *file); 760 756 761 757 /* psb_device.c */ 762 758 extern const struct psb_ops psb_chip_ops;
-2
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
··· 152 152 .open = drm_open, 153 153 .release = drm_release, 154 154 .unlocked_ioctl = drm_ioctl, 155 - #ifdef CONFIG_COMPAT 156 155 .compat_ioctl = drm_compat_ioctl, 157 - #endif 158 156 .poll = drm_poll, 159 157 .read = drm_read, 160 158 .llseek = no_llseek,
-2
drivers/gpu/drm/i810/i810_dma.c
··· 113 113 .release = drm_release, 114 114 .unlocked_ioctl = drm_ioctl, 115 115 .mmap = i810_mmap_buffers, 116 - #ifdef CONFIG_COMPAT 117 116 .compat_ioctl = drm_compat_ioctl, 118 - #endif 119 117 .llseek = noop_llseek, 120 118 }; 121 119
-2
drivers/gpu/drm/i810/i810_drv.c
··· 49 49 .unlocked_ioctl = drm_ioctl, 50 50 .mmap = drm_legacy_mmap, 51 51 .poll = drm_poll, 52 - #ifdef CONFIG_COMPAT 53 52 .compat_ioctl = drm_compat_ioctl, 54 - #endif 55 53 .llseek = noop_llseek, 56 54 }; 57 55
+1
drivers/gpu/drm/i915/Kconfig.debug
··· 21 21 select PREEMPT_COUNT 22 22 select X86_MSR # used by igt/pm_rpm 23 23 select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) 24 + select DRM_DEBUG_MM if DRM=y 24 25 default n 25 26 help 26 27 Choose this option to turn on extra driver debugging that may affect
-2
drivers/gpu/drm/i915/i915_drv.c
··· 2497 2497 .mmap = drm_gem_mmap, 2498 2498 .poll = drm_poll, 2499 2499 .read = drm_read, 2500 - #ifdef CONFIG_COMPAT 2501 2500 .compat_ioctl = i915_compat_ioctl, 2502 - #endif 2503 2501 .llseek = noop_llseek, 2504 2502 }; 2505 2503
+2
drivers/gpu/drm/i915/i915_drv.h
··· 3001 3001 #ifdef CONFIG_COMPAT 3002 3002 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 3003 3003 unsigned long arg); 3004 + #else 3005 + #define i915_compat_ioctl NULL 3004 3006 #endif 3005 3007 extern const struct dev_pm_ops i915_pm_ops; 3006 3008
+2 -8
drivers/gpu/drm/i915/intel_display.c
··· 2821 2821 plane_state->crtc_w = fb->width; 2822 2822 plane_state->crtc_h = fb->height; 2823 2823 2824 - intel_state->base.src.x1 = plane_state->src_x; 2825 - intel_state->base.src.y1 = plane_state->src_y; 2826 - intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w; 2827 - intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h; 2828 - intel_state->base.dst.x1 = plane_state->crtc_x; 2829 - intel_state->base.dst.y1 = plane_state->crtc_y; 2830 - intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w; 2831 - intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h; 2824 + intel_state->base.src = drm_plane_state_src(plane_state); 2825 + intel_state->base.dst = drm_plane_state_dest(plane_state); 2832 2826 2833 2827 obj = intel_fb_obj(fb); 2834 2828 if (i915_gem_object_is_tiled(obj))
+2 -9
drivers/gpu/drm/i915/intel_sprite.c
··· 773 773 bool can_scale; 774 774 int ret; 775 775 776 - src->x1 = state->base.src_x; 777 - src->y1 = state->base.src_y; 778 - src->x2 = state->base.src_x + state->base.src_w; 779 - src->y2 = state->base.src_y + state->base.src_h; 780 - 781 - dst->x1 = state->base.crtc_x; 782 - dst->y1 = state->base.crtc_y; 783 - dst->x2 = state->base.crtc_x + state->base.crtc_w; 784 - dst->y2 = state->base.crtc_y + state->base.crtc_h; 776 + *src = drm_plane_state_src(&state->base); 777 + *dst = drm_plane_state_dest(&state->base); 785 778 786 779 if (!fb) { 787 780 state->base.visible = false;
+4 -2
drivers/gpu/drm/imx/imx-drm-core.c
··· 158 158 struct drm_plane_state *plane_state; 159 159 struct drm_plane *plane; 160 160 struct dma_buf *dma_buf; 161 + struct dma_fence *fence; 161 162 int i; 162 163 163 164 /* ··· 171 170 0)->base.dma_buf; 172 171 if (!dma_buf) 173 172 continue; 174 - plane_state->fence = 175 - reservation_object_get_excl_rcu(dma_buf->resv); 173 + fence = reservation_object_get_excl_rcu(dma_buf->resv); 174 + 175 + drm_atomic_set_fence_for_plane(plane_state, fence); 176 176 } 177 177 } 178 178
-2
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 249 249 .mmap = mtk_drm_gem_mmap, 250 250 .poll = drm_poll, 251 251 .read = drm_read, 252 - #ifdef CONFIG_COMPAT 253 252 .compat_ioctl = drm_compat_ioctl, 254 - #endif 255 253 }; 256 254 257 255 static struct drm_driver mtk_drm_driver = {
-2
drivers/gpu/drm/mgag200/mgag200_drv.c
··· 82 82 .unlocked_ioctl = drm_ioctl, 83 83 .mmap = mgag200_mmap, 84 84 .poll = drm_poll, 85 - #ifdef CONFIG_COMPAT 86 85 .compat_ioctl = drm_compat_ioctl, 87 - #endif 88 86 .read = drm_read, 89 87 }; 90 88
+10
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
··· 15 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 16 */ 17 17 18 + #include <drm/drm_print.h> 18 19 19 20 #include "msm_drv.h" 20 21 #include "mdp4_kms.h" ··· 30 29 31 30 static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 32 31 { 32 + struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler); 33 + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); 34 + extern bool dumpstate; 35 + 33 36 DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); 37 + 38 + if (dumpstate && __ratelimit(&rs)) { 39 + struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev); 40 + drm_state_dump(mdp4_kms->dev, &p); 41 + } 34 42 } 35 43 36 44 void mdp4_irq_preinstall(struct msm_kms *kms)
+11
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
··· 17 17 18 18 #include <linux/irq.h> 19 19 20 + #include <drm/drm_print.h> 21 + 20 22 #include "msm_drv.h" 21 23 #include "mdp5_kms.h" 22 24 ··· 32 30 33 31 static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 34 32 { 33 + struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler); 34 + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); 35 + extern bool dumpstate; 36 + 35 37 DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); 38 + 39 + if (dumpstate && __ratelimit(&rs)) { 40 + struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev); 41 + drm_state_dump(mdp5_kms->dev, &p); 42 + } 36 43 } 37 44 38 45 void mdp5_irq_preinstall(struct msm_kms *kms)
+12
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
··· 114 114 return msm_readl(mdp5_kms->mmio + reg); 115 115 } 116 116 117 + static inline const char *stage2name(enum mdp_mixer_stage_id stage) 118 + { 119 + static const char *names[] = { 120 + #define NAME(n) [n] = #n 121 + NAME(STAGE_UNUSED), NAME(STAGE_BASE), 122 + NAME(STAGE0), NAME(STAGE1), NAME(STAGE2), 123 + NAME(STAGE3), NAME(STAGE4), NAME(STAGE6), 124 + #undef NAME 125 + }; 126 + return names[stage]; 127 + } 128 + 117 129 static inline const char *pipe2name(enum mdp5_pipe pipe) 118 130 { 119 131 static const char *names[] = {
+17 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 16 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 18 19 + #include <drm/drm_print.h> 19 20 #include "mdp5_kms.h" 20 21 21 22 struct mdp5_plane { ··· 182 181 #undef SET_PROPERTY 183 182 } 184 183 184 + static void 185 + mdp5_plane_atomic_print_state(struct drm_printer *p, 186 + const struct drm_plane_state *state) 187 + { 188 + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); 189 + 190 + drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); 191 + drm_printf(p, "\tzpos=%u\n", pstate->zpos); 192 + drm_printf(p, "\talpha=%u\n", pstate->alpha); 193 + drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 194 + drm_printf(p, "\tmode_changed=%u\n", pstate->mode_changed); 195 + drm_printf(p, "\tpending=%u\n", pstate->pending); 196 + } 197 + 185 198 static void mdp5_plane_reset(struct drm_plane *plane) 186 199 { 187 200 struct mdp5_plane_state *mdp5_state; ··· 259 244 .reset = mdp5_plane_reset, 260 245 .atomic_duplicate_state = mdp5_plane_duplicate_state, 261 246 .atomic_destroy_state = mdp5_plane_destroy_state, 247 + .atomic_print_state = mdp5_plane_atomic_print_state, 262 248 }; 263 249 264 250 static int mdp5_plane_prepare_fb(struct drm_plane *plane, ··· 929 913 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 930 914 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 931 915 mdp5_plane->formats, mdp5_plane->nformats, 932 - type, NULL); 916 + type, "%s", mdp5_plane->name); 933 917 if (ret) 934 918 goto fail; 935 919
+2 -1
drivers/gpu/drm/msm/msm_atomic.c
··· 217 217 if ((plane->state->fb != plane_state->fb) && plane_state->fb) { 218 218 struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0); 219 219 struct msm_gem_object *msm_obj = to_msm_bo(obj); 220 + struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv); 220 221 221 - plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); 222 + drm_atomic_set_fence_for_plane(plane_state, fence); 222 223 } 223 224 } 224 225
+4 -2
drivers/gpu/drm/msm/msm_drv.c
··· 79 79 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); 80 80 module_param(vram, charp, 0); 81 81 82 + bool dumpstate = false; 83 + MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); 84 + module_param(dumpstate, bool, 0600); 85 + 82 86 /* 83 87 * Util/helpers: 84 88 */ ··· 772 768 .open = drm_open, 773 769 .release = drm_release, 774 770 .unlocked_ioctl = drm_ioctl, 775 - #ifdef CONFIG_COMPAT 776 771 .compat_ioctl = drm_compat_ioctl, 777 - #endif 778 772 .poll = drm_poll, 779 773 .read = drm_read, 780 774 .llseek = no_llseek,
-2
drivers/gpu/drm/rcar-du/rcar_du_drv.c
··· 201 201 .open = drm_open, 202 202 .release = drm_release, 203 203 .unlocked_ioctl = drm_ioctl, 204 - #ifdef CONFIG_COMPAT 205 204 .compat_ioctl = drm_compat_ioctl, 206 - #endif 207 205 .poll = drm_poll, 208 206 .read = drm_read, 209 207 .llseek = no_llseek,
-2
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 275 275 .poll = drm_poll, 276 276 .read = drm_read, 277 277 .unlocked_ioctl = drm_ioctl, 278 - #ifdef CONFIG_COMPAT 279 278 .compat_ioctl = drm_compat_ioctl, 280 - #endif 281 279 .release = drm_release, 282 280 }; 283 281
-2
drivers/gpu/drm/savage/savage_drv.c
··· 42 42 .unlocked_ioctl = drm_ioctl, 43 43 .mmap = drm_legacy_mmap, 44 44 .poll = drm_poll, 45 - #ifdef CONFIG_COMPAT 46 45 .compat_ioctl = drm_compat_ioctl, 47 - #endif 48 46 .llseek = noop_llseek, 49 47 }; 50 48
-2
drivers/gpu/drm/shmobile/shmob_drm_drv.c
··· 245 245 .open = drm_open, 246 246 .release = drm_release, 247 247 .unlocked_ioctl = drm_ioctl, 248 - #ifdef CONFIG_COMPAT 249 248 .compat_ioctl = drm_compat_ioctl, 250 - #endif 251 249 .poll = drm_poll, 252 250 .read = drm_read, 253 251 .llseek = no_llseek,
-2
drivers/gpu/drm/sis/sis_drv.c
··· 72 72 .unlocked_ioctl = drm_ioctl, 73 73 .mmap = drm_legacy_mmap, 74 74 .poll = drm_poll, 75 - #ifdef CONFIG_COMPAT 76 75 .compat_ioctl = drm_compat_ioctl, 77 - #endif 78 76 .llseek = noop_llseek, 79 77 }; 80 78
-2
drivers/gpu/drm/sti/sti_drv.c
··· 297 297 .poll = drm_poll, 298 298 .read = drm_read, 299 299 .unlocked_ioctl = drm_ioctl, 300 - #ifdef CONFIG_COMPAT 301 300 .compat_ioctl = drm_compat_ioctl, 302 - #endif 303 301 .release = drm_release, 304 302 }; 305 303
-2
drivers/gpu/drm/sun4i/sun4i_drv.c
··· 53 53 .open = drm_open, 54 54 .release = drm_release, 55 55 .unlocked_ioctl = drm_ioctl, 56 - #ifdef CONFIG_COMPAT 57 56 .compat_ioctl = drm_compat_ioctl, 58 - #endif 59 57 .poll = drm_poll, 60 58 .read = drm_read, 61 59 .llseek = no_llseek,
-2
drivers/gpu/drm/tdfx/tdfx_drv.c
··· 49 49 .unlocked_ioctl = drm_ioctl, 50 50 .mmap = drm_legacy_mmap, 51 51 .poll = drm_poll, 52 - #ifdef CONFIG_COMPAT 53 52 .compat_ioctl = drm_compat_ioctl, 54 - #endif 55 53 .llseek = noop_llseek, 56 54 }; 57 55
-2
drivers/gpu/drm/tegra/drm.c
··· 802 802 .mmap = tegra_drm_mmap, 803 803 .poll = drm_poll, 804 804 .read = drm_read, 805 - #ifdef CONFIG_COMPAT 806 805 .compat_ioctl = drm_compat_ioctl, 807 - #endif 808 806 .llseek = noop_llseek, 809 807 }; 810 808
-2
drivers/gpu/drm/tilcdc/tilcdc_drv.c
··· 573 573 .open = drm_open, 574 574 .release = drm_release, 575 575 .unlocked_ioctl = drm_ioctl, 576 - #ifdef CONFIG_COMPAT 577 576 .compat_ioctl = drm_compat_ioctl, 578 - #endif 579 577 .poll = drm_poll, 580 578 .read = drm_read, 581 579 .llseek = no_llseek,
+8 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 1611 1611 int ttm_bo_wait(struct ttm_buffer_object *bo, 1612 1612 bool interruptible, bool no_wait) 1613 1613 { 1614 - long timeout = no_wait ? 0 : 15 * HZ; 1614 + long timeout = 15 * HZ; 1615 + 1616 + if (no_wait) { 1617 + if (reservation_object_test_signaled_rcu(bo->resv, true)) 1618 + return 0; 1619 + else 1620 + return -EBUSY; 1621 + } 1615 1622 1616 1623 timeout = reservation_object_wait_timeout_rcu(bo->resv, true, 1617 1624 interruptible, timeout);
-2
drivers/gpu/drm/udl/udl_drv.c
··· 44 44 .read = drm_read, 45 45 .unlocked_ioctl = drm_ioctl, 46 46 .release = drm_release, 47 - #ifdef CONFIG_COMPAT 48 47 .compat_ioctl = drm_compat_ioctl, 49 - #endif 50 48 .llseek = noop_llseek, 51 49 }; 52 50
-2
drivers/gpu/drm/vc4/vc4_drv.c
··· 103 103 .mmap = vc4_mmap, 104 104 .poll = drm_poll, 105 105 .read = drm_read, 106 - #ifdef CONFIG_COMPAT 107 106 .compat_ioctl = drm_compat_ioctl, 108 - #endif 109 107 .llseek = noop_llseek, 110 108 }; 111 109
-2
drivers/gpu/drm/via/via_drv.c
··· 64 64 .unlocked_ioctl = drm_ioctl, 65 65 .mmap = drm_legacy_mmap, 66 66 .poll = drm_poll, 67 - #ifdef CONFIG_COMPAT 68 67 .compat_ioctl = drm_compat_ioctl, 69 - #endif 70 68 .llseek = noop_llseek, 71 69 }; 72 70
-2
drivers/gpu/drm/virtio/virtgpu_drv.c
··· 108 108 .read = drm_read, 109 109 .unlocked_ioctl = drm_ioctl, 110 110 .release = drm_release, 111 - #ifdef CONFIG_COMPAT 112 111 .compat_ioctl = drm_compat_ioctl, 113 - #endif 114 112 .llseek = noop_llseek, 115 113 }; 116 114
+27
include/drm/drmP.h
··· 135 135 #define DRM_UT_PRIME 0x08 136 136 #define DRM_UT_ATOMIC 0x10 137 137 #define DRM_UT_VBL 0x20 138 + #define DRM_UT_STATE 0x40 138 139 139 140 extern __printf(6, 7) 140 141 void drm_dev_printk(const struct device *dev, const char *level, ··· 306 305 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) 307 306 #define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ 308 307 DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) 308 + 309 + /* Format strings and argument splitters to simplify printing 310 + * various "complex" objects 311 + */ 312 + #define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" 313 + #define DRM_MODE_ARG(m) \ 314 + (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \ 315 + (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ 316 + (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ 317 + (m)->type, (m)->flags 318 + 319 + #define DRM_RECT_FMT "%dx%d%+d%+d" 320 + #define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 321 + 322 + /* for rect's in fixed-point format: */ 323 + #define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u" 324 + #define DRM_RECT_FP_ARG(r) \ 325 + drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ 326 + drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ 327 + (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ 328 + (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 309 329 310 330 /*@}*/ 311 331 ··· 963 941 extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); 964 942 extern long drm_ioctl(struct file *filp, 965 943 unsigned int cmd, unsigned long arg); 944 + #ifdef CONFIG_COMPAT 966 945 extern long drm_compat_ioctl(struct file *filp, 967 946 unsigned int cmd, unsigned long arg); 947 + #else 948 + /* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */ 949 + #define drm_compat_ioctl NULL 950 + #endif 968 951 extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); 969 952 970 953 /* File Operations (drm_fops.c) */
+9
include/drm/drm_atomic.h
··· 345 345 struct drm_crtc *crtc); 346 346 void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 347 347 struct drm_framebuffer *fb); 348 + void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 349 + struct dma_fence *fence); 348 350 int __must_check 349 351 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 350 352 struct drm_crtc *crtc); ··· 365 363 int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 366 364 int __must_check drm_atomic_commit(struct drm_atomic_state *state); 367 365 int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); 366 + 367 + void drm_state_dump(struct drm_device *dev, struct drm_printer *p); 368 + 369 + #ifdef CONFIG_DEBUG_FS 370 + struct drm_minor; 371 + int drm_atomic_debugfs_init(struct drm_minor *minor); 372 + #endif 368 373 369 374 #define for_each_connector_in_state(__state, connector, connector_state, __i) \ 370 375 for ((__i) = 0; \
+13
include/drm/drm_connector.h
··· 37 37 struct drm_encoder; 38 38 struct drm_property; 39 39 struct drm_property_blob; 40 + struct drm_printer; 40 41 struct edid; 41 42 42 43 enum drm_connector_force { ··· 482 481 const struct drm_connector_state *state, 483 482 struct drm_property *property, 484 483 uint64_t *val); 484 + 485 + /** 486 + * @atomic_print_state: 487 + * 488 + * If driver subclasses struct &drm_connector_state, it should implement 489 + * this optional hook for printing additional driver specific state. 490 + * 491 + * Do not call this directly, use drm_atomic_connector_print_state() 492 + * instead. 493 + */ 494 + void (*atomic_print_state)(struct drm_printer *p, 495 + const struct drm_connector_state *state); 485 496 }; 486 497 487 498 /* mode specified on the command line */
+13
include/drm/drm_crtc.h
··· 53 53 struct drm_mode_set; 54 54 struct drm_file; 55 55 struct drm_clip_rect; 56 + struct drm_printer; 56 57 struct device_node; 57 58 struct dma_fence; 58 59 struct edid; ··· 595 594 */ 596 595 int (*set_crc_source)(struct drm_crtc *crtc, const char *source, 597 596 size_t *values_cnt); 597 + 598 + /** 599 + * @atomic_print_state: 600 + * 601 + * If driver subclasses struct &drm_crtc_state, it should implement 602 + * this optional hook for printing additional driver specific state. 603 + * 604 + * Do not call this directly, use drm_atomic_crtc_print_state() 605 + * instead. 606 + */ 607 + void (*atomic_print_state)(struct drm_printer *p, 608 + const struct drm_crtc_state *state); 598 609 }; 599 610 600 611 /**
+6
include/drm/drm_mm.h
··· 44 44 #ifdef CONFIG_DEBUG_FS 45 45 #include <linux/seq_file.h> 46 46 #endif 47 + #ifdef CONFIG_DRM_DEBUG_MM 48 + #include <linux/stackdepot.h> 49 + #endif 47 50 48 51 enum drm_mm_search_flags { 49 52 DRM_MM_SEARCH_DEFAULT = 0, ··· 77 74 u64 size; 78 75 u64 __subtree_last; 79 76 struct drm_mm *mm; 77 + #ifdef CONFIG_DRM_DEBUG_MM 78 + depot_stack_handle_t stack; 79 + #endif 80 80 }; 81 81 82 82 struct drm_mm {
+8 -8
include/drm/drm_modeset_helper_vtables.h
··· 361 361 * 362 362 * Note that the power state of the display pipe when this function is 363 363 * called depends upon the exact helpers and calling sequence the driver 364 - * has picked. See drm_atomic_commit_planes() for a discussion of the 365 - * tradeoffs and variants of plane commit helpers. 364 + * has picked. See drm_atomic_helper_commit_planes() for a discussion of 365 + * the tradeoffs and variants of plane commit helpers. 366 366 * 367 367 * This callback is used by the atomic modeset helpers and by the 368 368 * transitional plane helpers, but it is optional. ··· 385 385 * 386 386 * Note that the power state of the display pipe when this function is 387 387 * called depends upon the exact helpers and calling sequence the driver 388 - * has picked. See drm_atomic_commit_planes() for a discussion of the 389 - * tradeoffs and variants of plane commit helpers. 388 + * has picked. See drm_atomic_helper_commit_planes() for a discussion of 389 + * the tradeoffs and variants of plane commit helpers. 390 390 * 391 391 * This callback is used by the atomic modeset helpers and by the 392 392 * transitional plane helpers, but it is optional. ··· 940 940 * 941 941 * Note that the power state of the display pipe when this function is 942 942 * called depends upon the exact helpers and calling sequence the driver 943 - * has picked. See drm_atomic_commit_planes() for a discussion of the 944 - * tradeoffs and variants of plane commit helpers. 943 + * has picked. See drm_atomic_helper_commit_planes() for a discussion of 944 + * the tradeoffs and variants of plane commit helpers. 945 945 * 946 946 * This callback is used by the atomic modeset helpers and by the 947 947 * transitional plane helpers, but it is optional. ··· 963 963 * 964 964 * Note that the power state of the display pipe when this function is 965 965 * called depends upon the exact helpers and calling sequence the driver 966 - * has picked. See drm_atomic_commit_planes() for a discussion of the 967 - * tradeoffs and variants of plane commit helpers. 966 + * has picked. See drm_atomic_helper_commit_planes() for a discussion of 967 + * the tradeoffs and variants of plane commit helpers. 968 968 * 969 969 * This callback is used by the atomic modeset helpers and by the 970 970 * transitional plane helpers, but it is optional.
+80 -13
include/drm/drm_plane.h
··· 28 28 #include <drm/drm_mode_object.h> 29 29 30 30 struct drm_crtc; 31 + struct drm_printer; 31 32 32 33 /** 33 34 * struct drm_plane_state - mutable plane state 34 35 * @plane: backpointer to the plane 35 - * @crtc: currently bound CRTC, NULL if disabled 36 - * @fb: currently bound framebuffer 37 - * @fence: optional fence to wait for before scanning out @fb 38 - * @crtc_x: left position of visible portion of plane on crtc 39 - * @crtc_y: upper position of visible portion of plane on crtc 40 36 * @crtc_w: width of visible portion of plane on crtc 41 37 * @crtc_h: height of visible portion of plane on crtc 42 38 * @src_x: left position of visible portion of plane within ··· 53 57 * it can be trusted. 54 58 * @src: clipped source coordinates of the plane (in 16.16) 55 59 * @dst: clipped destination coordinates of the plane 56 - * @visible: visibility of the plane 57 60 * @state: backpointer to global drm_atomic_state 58 61 */ 59 62 struct drm_plane_state { 60 63 struct drm_plane *plane; 61 64 62 - struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ 63 - struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ 65 + /** 66 + * @crtc: 67 + * 68 + * Currently bound CRTC, NULL if disabled. Do not this write directly, 69 + * use drm_atomic_set_crtc_for_plane() 70 + */ 71 + struct drm_crtc *crtc; 72 + 73 + /** 74 + * @fb: 75 + * 76 + * Currently bound framebuffer. Do not write this directly, use 77 + * drm_atomic_set_fb_for_plane() 78 + */ 79 + struct drm_framebuffer *fb; 80 + 81 + /** 82 + * @fence: 83 + * 84 + * Optional fence to wait for before scanning out @fb. Do not write this 85 + * directly, use drm_atomic_set_fence_for_plane() 86 + */ 64 87 struct dma_fence *fence; 65 88 66 - /* Signed dest location allows it to be partially off screen */ 67 - int32_t crtc_x, crtc_y; 89 + /** 90 + * @crtc_x: 91 + * 92 + * Left position of visible portion of plane on crtc, signed dest 93 + * location allows it to be partially off screen. 94 + */ 95 + 96 + int32_t crtc_x; 97 + /** 98 + * @crtc_y: 99 + * 100 + * Upper position of visible portion of plane on crtc, signed dest 101 + * location allows it to be partially off screen. 102 + */ 103 + int32_t crtc_y; 104 + 68 105 uint32_t crtc_w, crtc_h; 69 106 70 107 /* Source values are 16.16 fixed point */ ··· 114 85 /* Clipped coordinates */ 115 86 struct drm_rect src, dst; 116 87 117 - /* 118 - * Is the plane actually visible? Can be false even 119 - * if fb!=NULL and crtc!=NULL, due to clipping. 88 + /** 89 + * @visible: 90 + * 91 + * Visibility of the plane. This can be false even if fb!=NULL and 92 + * crtc!=NULL, due to clipping. 120 93 */ 121 94 bool visible; 122 95 123 96 struct drm_atomic_state *state; 124 97 }; 98 + 99 + static inline struct drm_rect 100 + drm_plane_state_src(const struct drm_plane_state *state) 101 + { 102 + struct drm_rect src = { 103 + .x1 = state->src_x, 104 + .y1 = state->src_y, 105 + .x2 = state->src_x + state->src_w, 106 + .y2 = state->src_y + state->src_h, 107 + }; 108 + return src; 109 + } 110 + 111 + static inline struct drm_rect 112 + drm_plane_state_dest(const struct drm_plane_state *state) 113 + { 114 + struct drm_rect dest = { 115 + .x1 = state->crtc_x, 116 + .y1 = state->crtc_y, 117 + .x2 = state->crtc_x + state->crtc_w, 118 + .y2 = state->crtc_y + state->crtc_h, 119 + }; 120 + return dest; 121 + } 125 122 126 123 /** 127 124 * struct drm_plane_funcs - driver plane control functions ··· 377 322 * before data structures are torndown. 378 323 */ 379 324 void (*early_unregister)(struct drm_plane *plane); 325 + 326 + /** 327 + * @atomic_print_state: 328 + * 329 + * If driver subclasses struct &drm_plane_state, it should implement 330 + * this optional hook for printing additional driver specific state. 331 + * 332 + * Do not call this directly, use drm_atomic_plane_print_state() 333 + * instead. 334 + */ 335 + void (*atomic_print_state)(struct drm_printer *p, 336 + const struct drm_plane_state *state); 380 337 }; 381 338 382 339 /**
+117
include/drm/drm_print.h
··· 1 + /* 2 + * Copyright (C) 2016 Red Hat 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: 23 + * Rob Clark <robdclark@gmail.com> 24 + */ 25 + 26 + #ifndef DRM_PRINT_H_ 27 + #define DRM_PRINT_H_ 28 + 29 + #include <linux/seq_file.h> 30 + #include <linux/device.h> 31 + 32 + /** 33 + * DOC: print 34 + * 35 + * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same 36 + * debug code to be used for both debugfs and printk logging. 37 + * 38 + * For example:: 39 + * 40 + * void log_some_info(struct drm_printer *p) 41 + * { 42 + * drm_printf(p, "foo=%d\n", foo); 43 + * drm_printf(p, "bar=%d\n", bar); 44 + * } 45 + * 46 + * #ifdef CONFIG_DEBUG_FS 47 + * void debugfs_show(struct seq_file *f) 48 + * { 49 + * struct drm_printer p = drm_seq_file_printer(f); 50 + * log_some_info(&p); 51 + * } 52 + * #endif 53 + * 54 + * void some_other_function(...) 55 + * { 56 + * struct drm_printer p = drm_info_printer(drm->dev); 57 + * log_some_info(&p); 58 + * } 59 + */ 60 + 61 + /** 62 + * struct drm_printer - drm output "stream" 63 + * @printfn: actual output fxn 64 + * @arg: output fxn specific data 65 + * 66 + * Do not use struct members directly. Use drm_printer_seq_file(), 67 + * drm_printer_info(), etc to initialize. And drm_printf() for output. 68 + */ 69 + struct drm_printer { 70 + void (*printfn)(struct drm_printer *p, struct va_format *vaf); 71 + void *arg; 72 + }; 73 + 74 + void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); 75 + void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); 76 + 77 + /** 78 + * drm_printf - print to a &drm_printer stream 79 + * @p: the &drm_printer 80 + * @f: format string 81 + */ 82 + void drm_printf(struct drm_printer *p, const char *f, ...); 83 + 84 + 85 + /** 86 + * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file 87 + * @f: the struct &seq_file to output to 88 + * 89 + * RETURNS: 90 + * The &drm_printer object 91 + */ 92 + static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) 93 + { 94 + struct drm_printer p = { 95 + .printfn = __drm_printfn_seq_file, 96 + .arg = f, 97 + }; 98 + return p; 99 + } 100 + 101 + /** 102 + * drm_info_printer - construct a &drm_printer that outputs to dev_printk() 103 + * @dev: the struct &device pointer 104 + * 105 + * RETURNS: 106 + * The &drm_printer object 107 + */ 108 + static inline struct drm_printer drm_info_printer(struct device *dev) 109 + { 110 + struct drm_printer p = { 111 + .printfn = __drm_printfn_info, 112 + .arg = dev, 113 + }; 114 + return p; 115 + } 116 + 117 + #endif /* DRM_PRINT_H_ */
+2 -1
include/linux/dma-fence.h
··· 382 382 bool intr, signed long timeout); 383 383 signed long dma_fence_wait_any_timeout(struct dma_fence **fences, 384 384 uint32_t count, 385 - bool intr, signed long timeout); 385 + bool intr, signed long timeout, 386 + uint32_t *idx); 386 387 387 388 /** 388 389 * dma_fence_wait - sleep until the fence gets signaled
+28
include/uapi/drm/amdgpu_drm.h
··· 50 50 #define DRM_AMDGPU_WAIT_CS 0x09 51 51 #define DRM_AMDGPU_GEM_OP 0x10 52 52 #define DRM_AMDGPU_GEM_USERPTR 0x11 53 + #define DRM_AMDGPU_WAIT_FENCES 0x12 53 54 54 55 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 55 56 #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) ··· 64 63 #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 65 64 #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 66 65 #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) 66 + #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) 67 67 68 68 #define AMDGPU_GEM_DOMAIN_CPU 0x1 69 69 #define AMDGPU_GEM_DOMAIN_GTT 0x2 ··· 307 305 union drm_amdgpu_wait_cs { 308 306 struct drm_amdgpu_wait_cs_in in; 309 307 struct drm_amdgpu_wait_cs_out out; 308 + }; 309 + 310 + struct drm_amdgpu_fence { 311 + __u32 ctx_id; 312 + __u32 ip_type; 313 + __u32 ip_instance; 314 + __u32 ring; 315 + __u64 seq_no; 316 + }; 317 + 318 + struct drm_amdgpu_wait_fences_in { 319 + /** This points to uint64_t * which points to fences */ 320 + __u64 fences; 321 + __u32 fence_count; 322 + __u32 wait_all; 323 + __u64 timeout_ns; 324 + }; 325 + 326 + struct drm_amdgpu_wait_fences_out { 327 + __u32 status; 328 + __u32 first_signaled; 329 + }; 330 + 331 + union drm_amdgpu_wait_fences { 332 + struct drm_amdgpu_wait_fences_in in; 333 + struct drm_amdgpu_wait_fences_out out; 310 334 }; 311 335 312 336 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
+9 -1
include/uapi/drm/drm_mode.h
··· 47 47 #define DRM_MODE_TYPE_DRIVER (1<<6) 48 48 49 49 /* Video mode flags */ 50 - /* bit compatible with the xorg definitions. */ 50 + /* bit compatible with the xrandr RR_ definitions (bits 0-13) 51 + * 52 + * ABI warning: Existing userspace really expects 53 + * the mode flags to match the xrandr definitions. Any 54 + * changes that don't match the xrandr definitions will 55 + * likely need a new client cap or some other mechanism 56 + * to avoid breaking existing userspace. This includes 57 + * allocating new flags in the previously unused bits! 58 + */ 51 59 #define DRM_MODE_FLAG_PHSYNC (1<<0) 52 60 #define DRM_MODE_FLAG_NHSYNC (1<<1) 53 61 #define DRM_MODE_FLAG_PVSYNC (1<<2)