Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'omapdrm-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux into drm-next

omapdrm changes for 4.11

The main change here is the IRQ code cleanup, which gives us properly working
vblank counts and timestamps. We also get much less calls to runtime PM gets &
puts.

* tag 'omapdrm-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux: (26 commits)
drm/omap: panel-sony-acx565akm.c: Add MODULE_ALIAS
drm/omap: dsi: fix compile errors when enabling debug prints
drm: omapdrm: Perform initialization/cleanup at probe/remove time
drm: Move vblank cleanup from unregister to release
drm: omapdrm: Use sizeof(*var) instead of sizeof(type) for structures
drm: omapdrm: Remove global variables
drm: omapdrm: Simplify IRQ wait implementation
drm: omapdrm: Inline the pipe2vbl function
drm: omapdrm: Don't call DISPC power handling in IRQ wait functions
drm: omapdrm: Remove unused parameter from omap_drm_irq handler
drm: omapdrm: Don't expose the omap_irq_(un)register() functions
drm: omapdrm: Keep vblank interrupt enabled while CRTC is active
drm: omapdrm: Use a spinlock to protect the CRTC pending flag
drm: omapdrm: Prevent processing the same event multiple times
drm: omapdrm: Check the CRTC software state at enable/disable time
drm: omapdrm: Let the DRM core skip plane commit on inactive CRTCs
drm: omapdrm: Replace DSS manager state check with omapdrm CRTC state
drm: omapdrm: Handle OCP error IRQ directly
drm: omapdrm: Handle CRTC error IRQs directly
drm: omapdrm: Handle FIFO underflow IRQs internally
...

+446 -484
+2 -2
drivers/gpu/drm/drm_drv.c
··· 598 598 { 599 599 struct drm_device *dev = container_of(ref, struct drm_device, ref); 600 600 601 + drm_vblank_cleanup(dev); 602 + 601 603 if (drm_core_check_feature(dev, DRIVER_GEM)) 602 604 drm_gem_destroy(dev); 603 605 ··· 806 804 807 805 if (dev->agp) 808 806 drm_pci_agp_destroy(dev); 809 - 810 - drm_vblank_cleanup(dev); 811 807 812 808 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 813 809 drm_legacy_rmmap(dev, r_list->map);
+1 -1
drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
··· 1253 1253 dsicm_hw_reset(ddata); 1254 1254 1255 1255 if (ddata->use_dsi_backlight) { 1256 - memset(&props, 0, sizeof(struct backlight_properties)); 1256 + memset(&props, 0, sizeof(props)); 1257 1257 props.max_brightness = 255; 1258 1258 1259 1259 props.type = BACKLIGHT_RAW;
+1
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
··· 909 909 910 910 module_spi_driver(acx565akm_driver); 911 911 912 + MODULE_ALIAS("spi:sony,acx565akm"); 912 913 MODULE_AUTHOR("Nokia Corporation"); 913 914 MODULE_DESCRIPTION("acx565akm LCD Driver"); 914 915 MODULE_LICENSE("GPL");
+13 -14
drivers/gpu/drm/omapdrm/dss/dispc.c
··· 620 620 return DISPC_IRQ_FRAMEDONEWB; 621 621 } 622 622 623 + void dispc_mgr_enable(enum omap_channel channel, bool enable) 624 + { 625 + mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); 626 + /* flush posted write */ 627 + mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); 628 + } 629 + EXPORT_SYMBOL(dispc_mgr_enable); 630 + 631 + static bool dispc_mgr_is_enabled(enum omap_channel channel) 632 + { 633 + return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); 634 + } 635 + 623 636 bool dispc_mgr_go_busy(enum omap_channel channel) 624 637 { 625 638 return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; ··· 2913 2900 return dss_feat_get_supported_outputs(channel); 2914 2901 } 2915 2902 EXPORT_SYMBOL(dispc_mgr_get_supported_outputs); 2916 - 2917 - void dispc_mgr_enable(enum omap_channel channel, bool enable) 2918 - { 2919 - mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); 2920 - /* flush posted write */ 2921 - mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); 2922 - } 2923 - EXPORT_SYMBOL(dispc_mgr_enable); 2924 - 2925 - bool dispc_mgr_is_enabled(enum omap_channel channel) 2926 - { 2927 - return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); 2928 - } 2929 - EXPORT_SYMBOL(dispc_mgr_is_enabled); 2930 2903 2931 2904 void dispc_wb_enable(bool enable) 2932 2905 {
+9 -9
drivers/gpu/drm/omapdrm/dss/dsi.c
··· 4336 4336 4337 4337 wc = DIV_ROUND_UP(t->hact * t->bitspp, 8); 4338 4338 pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */ 4339 - bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch; 4339 + bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp; 4340 4340 tot = bl + pps; 4341 4341 4342 4342 #define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk)) ··· 4345 4345 "%u/%u/%u/%u/%u/%u = %u + %u = %u\n", 4346 4346 str, 4347 4347 byteclk, 4348 - t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch, 4348 + t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp, 4349 4349 bl, pps, tot, 4350 4350 TO_DSI_T(t->hss), 4351 4351 TO_DSI_T(t->hsa), 4352 4352 TO_DSI_T(t->hse), 4353 4353 TO_DSI_T(t->hbp), 4354 4354 TO_DSI_T(pps), 4355 - TO_DSI_T(t->hfront_porch), 4355 + TO_DSI_T(t->hfp), 4356 4356 4357 4357 TO_DSI_T(bl), 4358 4358 TO_DSI_T(pps), ··· 4367 4367 int hact, bl, tot; 4368 4368 4369 4369 hact = vm->hactive; 4370 - bl = vm->hsync_len + vm->hbp + vm->hfront_porch; 4370 + bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch; 4371 4371 tot = hact + bl; 4372 4372 4373 4373 #define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck)) ··· 4376 4376 "%u/%u/%u/%u = %u + %u = %u\n", 4377 4377 str, 4378 4378 pck, 4379 - vm->hsync_len, vm->hbp, hact, vm->hfront_porch, 4379 + vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch, 4380 4380 bl, hact, tot, 4381 4381 TO_DISPC_T(vm->hsync_len), 4382 - TO_DISPC_T(vm->hbp), 4382 + TO_DISPC_T(vm->hback_porch), 4383 4383 TO_DISPC_T(hact), 4384 4384 TO_DISPC_T(vm->hfront_porch), 4385 4385 TO_DISPC_T(bl), ··· 4401 4401 dsi_tput = (u64)byteclk * t->ndl * 8; 4402 4402 pck = (u32)div64_u64(dsi_tput, t->bitspp); 4403 4403 dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl); 4404 - dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch; 4404 + dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp; 4405 4405 4406 4406 vm.pixelclock = pck; 4407 4407 vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk); 4408 - vm.hbp = div64_u64((u64)t->hbp * pck, byteclk); 4409 - vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk); 4408 + vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk); 4409 + vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk); 4410 4410 vm.hactive = t->hact; 4411 4411 4412 4412 print_dispc_vm(str, &vm);
+1 -2
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
··· 119 119 120 120 static void __init omapdss_add_to_list(struct device_node *node, bool root) 121 121 { 122 - struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), 123 - GFP_KERNEL); 122 + struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL); 124 123 if (n) { 125 124 n->node = node; 126 125 n->root = root;
-1
drivers/gpu/drm/omapdrm/dss/omapdss.h
··· 856 856 void dispc_runtime_put(void); 857 857 858 858 void dispc_mgr_enable(enum omap_channel channel, bool enable); 859 - bool dispc_mgr_is_enabled(enum omap_channel channel); 860 859 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel); 861 860 u32 dispc_mgr_get_framedone_irq(enum omap_channel channel); 862 861 u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
+2 -4
drivers/gpu/drm/omapdrm/omap_connector.c
··· 162 162 163 163 dssdrv->get_timings(dssdev, &t); 164 164 165 - if (memcmp(&vm, &t, sizeof(struct videomode))) 165 + if (memcmp(&vm, &t, sizeof(vm))) 166 166 r = -EINVAL; 167 167 else 168 168 r = 0; ··· 217 217 218 218 omap_dss_get_device(dssdev); 219 219 220 - omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL); 220 + omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); 221 221 if (!omap_connector) 222 222 goto fail; 223 223 ··· 239 239 240 240 connector->interlace_allowed = 1; 241 241 connector->doublescan_allowed = 0; 242 - 243 - drm_connector_register(connector); 244 242 245 243 return connector; 246 244
+81 -81
drivers/gpu/drm/omapdrm/omap_crtc.c
··· 36 36 37 37 struct videomode vm; 38 38 39 - struct omap_drm_irq vblank_irq; 40 - struct omap_drm_irq error_irq; 41 - 42 39 bool ignore_digit_sync_lost; 43 40 41 + bool enabled; 44 42 bool pending; 45 43 wait_queue_head_t pending_wait; 44 + struct drm_pending_vblank_event *event; 46 45 }; 47 46 48 47 /* ----------------------------------------------------------------------------- 49 48 * Helper Functions 50 49 */ 51 - 52 - uint32_t pipe2vbl(struct drm_crtc *crtc) 53 - { 54 - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 55 - 56 - return dispc_mgr_get_vsync_irq(omap_crtc->channel); 57 - } 58 50 59 51 struct videomode *omap_crtc_timings(struct drm_crtc *crtc) 60 52 { ··· 60 68 return omap_crtc->channel; 61 69 } 62 70 71 + static bool omap_crtc_is_pending(struct drm_crtc *crtc) 72 + { 73 + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 74 + unsigned long flags; 75 + bool pending; 76 + 77 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 78 + pending = omap_crtc->pending; 79 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 80 + 81 + return pending; 82 + } 83 + 63 84 int omap_crtc_wait_pending(struct drm_crtc *crtc) 64 85 { 65 86 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); ··· 82 77 * a single frame refresh even on slower displays. 83 78 */ 84 79 return wait_event_timeout(omap_crtc->pending_wait, 85 - !omap_crtc->pending, 80 + !omap_crtc_is_pending(crtc), 86 81 msecs_to_jiffies(250)); 87 82 } 88 83 ··· 140 135 u32 framedone_irq, vsync_irq; 141 136 int ret; 142 137 138 + if (WARN_ON(omap_crtc->enabled == enable)) 139 + return; 140 + 143 141 if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) { 144 142 dispc_mgr_enable(channel, enable); 143 + omap_crtc->enabled = enable; 145 144 return; 146 145 } 147 - 148 - if (dispc_mgr_is_enabled(channel) == enable) 149 - return; 150 146 151 147 if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) { 152 148 /* ··· 179 173 } 180 174 181 175 dispc_mgr_enable(channel, enable); 176 + omap_crtc->enabled = enable; 182 177 183 178 ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); 184 179 if (ret) { ··· 266 259 * Setup, Flush and Page Flip 267 260 */ 268 261 269 - static void omap_crtc_complete_page_flip(struct drm_crtc *crtc) 262 + void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus) 270 263 { 271 - struct drm_pending_vblank_event *event; 272 - struct drm_device *dev = crtc->dev; 273 - unsigned long flags; 274 - 275 - event = crtc->state->event; 276 - 277 - if (!event) 278 - return; 279 - 280 - spin_lock_irqsave(&dev->event_lock, flags); 281 - drm_crtc_send_vblank_event(crtc, event); 282 - spin_unlock_irqrestore(&dev->event_lock, flags); 283 - } 284 - 285 - static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) 286 - { 287 - struct omap_crtc *omap_crtc = 288 - container_of(irq, struct omap_crtc, error_irq); 264 + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 289 265 290 266 if (omap_crtc->ignore_digit_sync_lost) { 291 267 irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT; ··· 279 289 DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus); 280 290 } 281 291 282 - static void omap_crtc_vblank_irq(struct omap_drm_irq *irq, uint32_t irqstatus) 292 + void omap_crtc_vblank_irq(struct drm_crtc *crtc) 283 293 { 284 - struct omap_crtc *omap_crtc = 285 - container_of(irq, struct omap_crtc, vblank_irq); 286 - struct drm_device *dev = omap_crtc->base.dev; 294 + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 295 + bool pending; 287 296 288 - if (dispc_mgr_go_busy(omap_crtc->channel)) 297 + spin_lock(&crtc->dev->event_lock); 298 + /* 299 + * If the dispc is busy we're racing the flush operation. Try again on 300 + * the next vblank interrupt. 301 + */ 302 + if (dispc_mgr_go_busy(omap_crtc->channel)) { 303 + spin_unlock(&crtc->dev->event_lock); 289 304 return; 305 + } 306 + 307 + /* Send the vblank event if one has been requested. */ 308 + if (omap_crtc->event) { 309 + drm_crtc_send_vblank_event(crtc, omap_crtc->event); 310 + omap_crtc->event = NULL; 311 + } 312 + 313 + pending = omap_crtc->pending; 314 + omap_crtc->pending = false; 315 + spin_unlock(&crtc->dev->event_lock); 316 + 317 + if (pending) 318 + drm_crtc_vblank_put(crtc); 319 + 320 + /* Wake up omap_atomic_complete. */ 321 + wake_up(&omap_crtc->pending_wait); 290 322 291 323 DBG("%s: apply done", omap_crtc->name); 292 - 293 - __omap_irq_unregister(dev, &omap_crtc->vblank_irq); 294 - 295 - rmb(); 296 - WARN_ON(!omap_crtc->pending); 297 - omap_crtc->pending = false; 298 - wmb(); 299 - 300 - /* wake up userspace */ 301 - omap_crtc_complete_page_flip(&omap_crtc->base); 302 - 303 - /* wake up omap_atomic_complete */ 304 - wake_up(&omap_crtc->pending_wait); 305 324 } 306 325 307 326 /* ----------------------------------------------------------------------------- ··· 323 324 324 325 DBG("%s", omap_crtc->name); 325 326 326 - WARN_ON(omap_crtc->vblank_irq.registered); 327 - omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); 328 - 329 327 drm_crtc_cleanup(crtc); 330 328 331 329 kfree(omap_crtc); ··· 331 335 static void omap_crtc_enable(struct drm_crtc *crtc) 332 336 { 333 337 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 338 + int ret; 334 339 335 340 DBG("%s", omap_crtc->name); 336 341 337 - rmb(); 342 + spin_lock_irq(&crtc->dev->event_lock); 343 + drm_crtc_vblank_on(crtc); 344 + ret = drm_crtc_vblank_get(crtc); 345 + WARN_ON(ret != 0); 346 + 338 347 WARN_ON(omap_crtc->pending); 339 348 omap_crtc->pending = true; 340 - wmb(); 341 - 342 - omap_irq_register(crtc->dev, &omap_crtc->vblank_irq); 343 - 344 - drm_crtc_vblank_on(crtc); 349 + spin_unlock_irq(&crtc->dev->event_lock); 345 350 } 346 351 347 352 static void omap_crtc_disable(struct drm_crtc *crtc) ··· 387 390 } 388 391 389 392 static void omap_crtc_atomic_begin(struct drm_crtc *crtc, 390 - struct drm_crtc_state *old_crtc_state) 393 + struct drm_crtc_state *old_crtc_state) 391 394 { 392 395 } 393 396 394 397 static void omap_crtc_atomic_flush(struct drm_crtc *crtc, 395 - struct drm_crtc_state *old_crtc_state) 398 + struct drm_crtc_state *old_crtc_state) 396 399 { 397 400 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 398 - 399 - WARN_ON(omap_crtc->vblank_irq.registered); 401 + int ret; 400 402 401 403 if (crtc->state->color_mgmt_changed) { 402 404 struct drm_color_lut *lut = NULL; ··· 410 414 dispc_mgr_set_gamma(omap_crtc->channel, lut, length); 411 415 } 412 416 413 - if (dispc_mgr_is_enabled(omap_crtc->channel)) { 417 + /* 418 + * Only flush the CRTC if it is currently enabled. CRTCs that require a 419 + * mode set are disabled prior plane updates and enabled afterwards. 420 + * They are thus not active (regardless of what their CRTC core state 421 + * reports) and the DRM core could thus call this function even though 422 + * the CRTC is currently disabled. Do nothing in that case. 423 + */ 424 + if (!omap_crtc->enabled) 425 + return; 414 426 415 - DBG("%s: GO", omap_crtc->name); 427 + DBG("%s: GO", omap_crtc->name); 416 428 417 - rmb(); 418 - WARN_ON(omap_crtc->pending); 419 - omap_crtc->pending = true; 420 - wmb(); 429 + ret = drm_crtc_vblank_get(crtc); 430 + WARN_ON(ret != 0); 421 431 422 - dispc_mgr_go(omap_crtc->channel); 423 - omap_irq_register(crtc->dev, &omap_crtc->vblank_irq); 424 - } 432 + spin_lock_irq(&crtc->dev->event_lock); 433 + dispc_mgr_go(omap_crtc->channel); 434 + 435 + WARN_ON(omap_crtc->pending); 436 + omap_crtc->pending = true; 437 + 438 + if (crtc->state->event) 439 + omap_crtc->event = crtc->state->event; 440 + spin_unlock_irq(&crtc->dev->event_lock); 425 441 } 426 442 427 443 static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc, ··· 553 545 554 546 omap_crtc->channel = channel; 555 547 omap_crtc->name = channel_names[channel]; 556 - 557 - omap_crtc->vblank_irq.irqmask = pipe2vbl(crtc); 558 - omap_crtc->vblank_irq.irq = omap_crtc_vblank_irq; 559 - 560 - omap_crtc->error_irq.irqmask = 561 - dispc_mgr_get_sync_lost_irq(channel); 562 - omap_crtc->error_irq.irq = omap_crtc_error_irq; 563 - omap_irq_register(dev, &omap_crtc->error_irq); 564 548 565 549 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, 566 550 &omap_crtc_funcs, NULL);
+2 -2
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
··· 224 224 int rows = (1 + area->y1 - area->y0); 225 225 int i = columns*rows; 226 226 227 - pat = alloc_dma(txn, sizeof(struct pat), &pat_pa); 227 + pat = alloc_dma(txn, sizeof(*pat), &pat_pa); 228 228 229 229 if (txn->last_pat) 230 230 txn->last_pat->next_pa = (uint32_t)pat_pa; ··· 735 735 736 736 /* alloc engines */ 737 737 omap_dmm->engines = kcalloc(omap_dmm->num_engines, 738 - sizeof(struct refill_engine), GFP_KERNEL); 738 + sizeof(*omap_dmm->engines), GFP_KERNEL); 739 739 if (!omap_dmm->engines) { 740 740 ret = -ENOMEM; 741 741 goto fail;
+109 -107
drivers/gpu/drm/omapdrm/omap_drv.c
··· 96 96 dispc_runtime_get(); 97 97 98 98 drm_atomic_helper_commit_modeset_disables(dev, old_state); 99 - drm_atomic_helper_commit_planes(dev, old_state, 0); 99 + drm_atomic_helper_commit_planes(dev, old_state, 100 + DRM_PLANE_COMMIT_ACTIVE_ONLY); 100 101 drm_atomic_helper_commit_modeset_enables(dev, old_state); 101 102 102 103 omap_atomic_wait_for_completion(dev, old_state); ··· 316 315 317 316 drm_mode_config_init(dev); 318 317 319 - omap_drm_irq_install(dev); 320 - 321 318 ret = omap_modeset_init_properties(dev); 322 319 if (ret < 0) 323 320 return ret; ··· 488 489 489 490 drm_mode_config_reset(dev); 490 491 491 - return 0; 492 - } 492 + omap_drm_irq_install(dev); 493 493 494 - static void omap_modeset_free(struct drm_device *dev) 495 - { 496 - drm_mode_config_cleanup(dev); 494 + return 0; 497 495 } 498 496 499 497 /* ··· 628 632 * drm driver funcs 629 633 */ 630 634 631 - /** 632 - * load - setup chip and create an initial config 633 - * @dev: DRM device 634 - * @flags: startup flags 635 - * 636 - * The driver load routine has to do several things: 637 - * - initialize the memory manager 638 - * - allocate initial config memory 639 - * - setup the DRM framebuffer with the allocated memory 640 - */ 641 - static int dev_load(struct drm_device *dev, unsigned long flags) 642 - { 643 - struct omap_drm_platform_data *pdata = dev->dev->platform_data; 644 - struct omap_drm_private *priv; 645 - unsigned int i; 646 - int ret; 647 - 648 - DBG("load: dev=%p", dev); 649 - 650 - priv = kzalloc(sizeof(*priv), GFP_KERNEL); 651 - if (!priv) 652 - return -ENOMEM; 653 - 654 - priv->omaprev = pdata->omaprev; 655 - 656 - dev->dev_private = priv; 657 - 658 - priv->wq = alloc_ordered_workqueue("omapdrm", 0); 659 - init_waitqueue_head(&priv->commit.wait); 660 - spin_lock_init(&priv->commit.lock); 661 - 662 - spin_lock_init(&priv->list_lock); 663 - INIT_LIST_HEAD(&priv->obj_list); 664 - 665 - omap_gem_init(dev); 666 - 667 - ret = omap_modeset_init(dev); 668 - if (ret) { 669 - dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret); 670 - dev->dev_private = NULL; 671 - kfree(priv); 672 - return ret; 673 - } 674 - 675 - /* Initialize vblank handling, start with all CRTCs disabled. */ 676 - ret = drm_vblank_init(dev, priv->num_crtcs); 677 - if (ret) 678 - dev_warn(dev->dev, "could not init vblank\n"); 679 - 680 - for (i = 0; i < priv->num_crtcs; i++) 681 - drm_crtc_vblank_off(priv->crtcs[i]); 682 - 683 - priv->fbdev = omap_fbdev_init(dev); 684 - 685 - /* store off drm_device for use in pm ops */ 686 - dev_set_drvdata(dev->dev, dev); 687 - 688 - drm_kms_helper_poll_init(dev); 689 - 690 - return 0; 691 - } 692 - 693 - static void dev_unload(struct drm_device *dev) 694 - { 695 - struct omap_drm_private *priv = dev->dev_private; 696 - 697 - DBG("unload: dev=%p", dev); 698 - 699 - drm_kms_helper_poll_fini(dev); 700 - 701 - if (priv->fbdev) 702 - omap_fbdev_free(dev); 703 - 704 - omap_modeset_free(dev); 705 - omap_gem_deinit(dev); 706 - 707 - destroy_workqueue(priv->wq); 708 - 709 - drm_vblank_cleanup(dev); 710 - omap_drm_irq_uninstall(dev); 711 - 712 - kfree(dev->dev_private); 713 - dev->dev_private = NULL; 714 - 715 - dev_set_drvdata(dev->dev, NULL); 716 - } 717 - 718 635 static int dev_open(struct drm_device *dev, struct drm_file *file) 719 636 { 720 637 file->driver_priv = NULL; ··· 712 803 static struct drm_driver omap_drm_driver = { 713 804 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 714 805 DRIVER_ATOMIC, 715 - .load = dev_load, 716 - .unload = dev_unload, 717 806 .open = dev_open, 718 807 .lastclose = dev_lastclose, 719 808 .get_vblank_counter = drm_vblank_no_hw_counter, ··· 741 834 .patchlevel = DRIVER_PATCHLEVEL, 742 835 }; 743 836 744 - static int pdev_probe(struct platform_device *device) 837 + static int pdev_probe(struct platform_device *pdev) 745 838 { 746 - int r; 839 + struct omap_drm_platform_data *pdata = pdev->dev.platform_data; 840 + struct omap_drm_private *priv; 841 + struct drm_device *ddev; 842 + unsigned int i; 843 + int ret; 844 + 845 + DBG("%s", pdev->name); 747 846 748 847 if (omapdss_is_initialized() == false) 749 848 return -EPROBE_DEFER; 750 849 751 850 omap_crtc_pre_init(); 752 851 753 - r = omap_connect_dssdevs(); 754 - if (r) { 755 - omap_crtc_pre_uninit(); 756 - return r; 852 + ret = omap_connect_dssdevs(); 853 + if (ret) 854 + goto err_crtc_uninit; 855 + 856 + /* Allocate and initialize the driver private structure. */ 857 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 858 + if (!priv) { 859 + ret = -ENOMEM; 860 + goto err_disconnect_dssdevs; 757 861 } 758 862 759 - DBG("%s", device->name); 760 - return drm_platform_init(&omap_drm_driver, device); 863 + priv->omaprev = pdata->omaprev; 864 + priv->wq = alloc_ordered_workqueue("omapdrm", 0); 865 + 866 + init_waitqueue_head(&priv->commit.wait); 867 + spin_lock_init(&priv->commit.lock); 868 + spin_lock_init(&priv->list_lock); 869 + INIT_LIST_HEAD(&priv->obj_list); 870 + 871 + /* Allocate and initialize the DRM device. */ 872 + ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev); 873 + if (IS_ERR(ddev)) { 874 + ret = PTR_ERR(ddev); 875 + goto err_free_priv; 876 + } 877 + 878 + ddev->dev_private = priv; 879 + platform_set_drvdata(pdev, ddev); 880 + 881 + omap_gem_init(ddev); 882 + 883 + ret = omap_modeset_init(ddev); 884 + if (ret) { 885 + dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret); 886 + goto err_free_drm_dev; 887 + } 888 + 889 + /* Initialize vblank handling, start with all CRTCs disabled. */ 890 + ret = drm_vblank_init(ddev, priv->num_crtcs); 891 + if (ret) { 892 + dev_err(&pdev->dev, "could not init vblank\n"); 893 + goto err_cleanup_modeset; 894 + } 895 + 896 + for (i = 0; i < priv->num_crtcs; i++) 897 + drm_crtc_vblank_off(priv->crtcs[i]); 898 + 899 + priv->fbdev = omap_fbdev_init(ddev); 900 + 901 + drm_kms_helper_poll_init(ddev); 902 + 903 + /* 904 + * Register the DRM device with the core and the connectors with 905 + * sysfs. 906 + */ 907 + ret = drm_dev_register(ddev, 0); 908 + if (ret) 909 + goto err_cleanup_helpers; 910 + 911 + return 0; 912 + 913 + err_cleanup_helpers: 914 + drm_kms_helper_poll_fini(ddev); 915 + if (priv->fbdev) 916 + omap_fbdev_free(ddev); 917 + err_cleanup_modeset: 918 + drm_mode_config_cleanup(ddev); 919 + omap_drm_irq_uninstall(ddev); 920 + err_free_drm_dev: 921 + omap_gem_deinit(ddev); 922 + drm_dev_unref(ddev); 923 + err_free_priv: 924 + destroy_workqueue(priv->wq); 925 + kfree(priv); 926 + err_disconnect_dssdevs: 927 + omap_disconnect_dssdevs(); 928 + err_crtc_uninit: 929 + omap_crtc_pre_uninit(); 930 + return ret; 761 931 } 762 932 763 - static int pdev_remove(struct platform_device *device) 933 + static int pdev_remove(struct platform_device *pdev) 764 934 { 935 + struct drm_device *ddev = platform_get_drvdata(pdev); 936 + struct omap_drm_private *priv = ddev->dev_private; 937 + 765 938 DBG(""); 766 939 767 - drm_put_dev(platform_get_drvdata(device)); 940 + drm_dev_unregister(ddev); 941 + 942 + drm_kms_helper_poll_fini(ddev); 943 + 944 + if (priv->fbdev) 945 + omap_fbdev_free(ddev); 946 + 947 + drm_mode_config_cleanup(ddev); 948 + 949 + omap_drm_irq_uninstall(ddev); 950 + omap_gem_deinit(ddev); 951 + 952 + drm_dev_unref(ddev); 953 + 954 + destroy_workqueue(priv->wq); 955 + kfree(priv); 768 956 769 957 omap_disconnect_dssdevs(); 770 958 omap_crtc_pre_uninit();
+5 -46
drivers/gpu/drm/omapdrm/omap_drv.h
··· 48 48 uint32_t src_w, src_h; 49 49 }; 50 50 51 - /* For transiently registering for different DSS irqs that various parts 52 - * of the KMS code need during setup/configuration. We these are not 53 - * necessarily the same as what drm_vblank_get/put() are requesting, and 54 - * the hysteresis in drm_vblank_put() is not necessarily desirable for 55 - * internal housekeeping related irq usage. 56 - */ 57 - struct omap_drm_irq { 58 - struct list_head node; 59 - uint32_t irqmask; 60 - bool registered; 61 - void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus); 62 - }; 63 - 64 51 /* For KMS code that needs to wait for a certain # of IRQs: 65 52 */ 66 53 struct omap_irq_wait; ··· 88 101 struct drm_property *zorder_prop; 89 102 90 103 /* irq handling: */ 91 - struct list_head irq_list; /* list of omap_drm_irq */ 92 - uint32_t vblank_mask; /* irq bits set for userspace vblank */ 93 - struct omap_drm_irq error_handler; 104 + spinlock_t wait_lock; /* protects the wait_list */ 105 + struct list_head wait_list; /* list of omap_irq_wait */ 106 + uint32_t irq_mask; /* enabled irqs in addition to wait_list */ 94 107 95 108 /* atomic commit */ 96 109 struct { ··· 115 128 116 129 int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe); 117 130 void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe); 118 - void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); 119 - void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); 120 - void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); 121 - void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); 122 131 void omap_drm_irq_uninstall(struct drm_device *dev); 123 132 int omap_drm_irq_install(struct drm_device *dev); 124 133 ··· 138 155 struct drm_crtc *omap_crtc_init(struct drm_device *dev, 139 156 struct drm_plane *plane, enum omap_channel channel, int id); 140 157 int omap_crtc_wait_pending(struct drm_crtc *crtc); 158 + void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus); 159 + void omap_crtc_vblank_irq(struct drm_crtc *crtc); 141 160 142 161 struct drm_plane *omap_plane_init(struct drm_device *dev, 143 162 int id, enum drm_plane_type type, ··· 218 233 struct dma_buf *buffer); 219 234 220 235 /* map crtc to vblank mask */ 221 - uint32_t pipe2vbl(struct drm_crtc *crtc); 222 236 struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); 223 - 224 - /* should these be made into common util helpers? 225 - */ 226 - 227 - static inline int objects_lookup( 228 - struct drm_file *filp, uint32_t pixel_format, 229 - struct drm_gem_object **bos, const uint32_t *handles) 230 - { 231 - int i, n = drm_format_num_planes(pixel_format); 232 - 233 - for (i = 0; i < n; i++) { 234 - bos[i] = drm_gem_object_lookup(filp, handles[i]); 235 - if (!bos[i]) 236 - goto fail; 237 - 238 - } 239 - 240 - return 0; 241 - 242 - fail: 243 - while (--i > 0) 244 - drm_gem_object_unreference_unlocked(bos[i]); 245 - 246 - return -ENOENT; 247 - } 248 237 249 238 #endif /* __OMAP_DRV_H__ */
+1 -1
drivers/gpu/drm/omapdrm/omap_encoder.c
··· 117 117 118 118 dssdrv->get_timings(dssdev, &t); 119 119 120 - if (memcmp(vm, &t, sizeof(struct videomode))) 120 + if (memcmp(vm, &t, sizeof(*vm))) 121 121 ret = -EINVAL; 122 122 else 123 123 ret = 0;
+85 -81
drivers/gpu/drm/omapdrm/omap_fb.c
··· 29 29 * framebuffer funcs 30 30 */ 31 31 32 - /* per-format info: */ 33 - struct format { 32 + /* DSS to DRM formats mapping */ 33 + static const struct { 34 34 enum omap_color_mode dss_format; 35 35 uint32_t pixel_format; 36 - struct { 37 - int stride_bpp; /* this times width is stride */ 38 - int sub_y; /* sub-sample in y dimension */ 39 - } planes[4]; 40 - bool yuv; 41 - }; 42 - 43 - static const struct format formats[] = { 36 + } formats[] = { 44 37 /* 16bpp [A]RGB: */ 45 - { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */ 46 - { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */ 47 - { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */ 48 - { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */ 49 - { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */ 50 - { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */ 51 - { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */ 38 + { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565 }, /* RGB16-565 */ 39 + { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444 }, /* RGB12x-4444 */ 40 + { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444 }, /* xRGB12-4444 */ 41 + { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444 }, /* RGBA12-4444 */ 42 + { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444 }, /* ARGB16-4444 */ 43 + { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555 }, /* xRGB15-1555 */ 44 + { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555 }, /* ARGB16-1555 */ 52 45 /* 24bpp RGB: */ 53 - { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */ 46 + { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888 }, /* RGB24-888 */ 54 47 /* 32bpp [A]RGB: */ 55 - { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */ 56 - { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */ 57 - { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */ 58 - { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */ 48 + { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888 }, /* RGBx24-8888 */ 49 + { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888 }, /* xRGB24-8888 */ 50 + { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888 }, /* RGBA32-8888 */ 51 + { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888 }, /* ARGB32-8888 */ 59 52 /* YUV: */ 60 - { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true }, 61 - { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true }, 62 - { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true }, 53 + { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12 }, 54 + { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV }, 55 + { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY }, 63 56 }; 64 57 65 58 /* convert from overlay's pixel formats bitmask to an array of fourcc's */ ··· 82 89 struct omap_framebuffer { 83 90 struct drm_framebuffer base; 84 91 int pin_count; 85 - const struct format *format; 86 - struct plane planes[4]; 92 + const struct drm_format_info *format; 93 + enum omap_color_mode dss_format; 94 + struct plane planes[2]; 87 95 /* lock for pinning (pin_count and planes.paddr) */ 88 96 struct mutex lock; 89 97 }; ··· 122 128 }; 123 129 124 130 static uint32_t get_linear_addr(struct plane *plane, 125 - const struct format *format, int n, int x, int y) 131 + const struct drm_format_info *format, int n, int x, int y) 126 132 { 127 133 uint32_t offset; 128 134 129 - offset = plane->offset + 130 - (x * format->planes[n].stride_bpp) + 131 - (y * plane->pitch / format->planes[n].sub_y); 135 + offset = plane->offset 136 + + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub)) 137 + + (y * plane->pitch / (n == 0 ? 1 : format->vsub)); 132 138 133 139 return plane->paddr + offset; 134 140 } ··· 147 153 struct omap_drm_window *win, struct omap_overlay_info *info) 148 154 { 149 155 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 150 - const struct format *format = omap_fb->format; 156 + const struct drm_format_info *format = omap_fb->format; 151 157 struct plane *plane = &omap_fb->planes[0]; 152 158 uint32_t x, y, orient = 0; 153 159 154 - info->color_mode = format->dss_format; 160 + info->color_mode = omap_fb->dss_format; 155 161 156 162 info->pos_x = win->crtc_x; 157 163 info->pos_y = win->crtc_y; ··· 225 231 } 226 232 227 233 /* convert to pixels: */ 228 - info->screen_width /= format->planes[0].stride_bpp; 234 + info->screen_width /= format->cpp[0]; 229 235 230 - if (format->dss_format == OMAP_DSS_COLOR_NV12) { 236 + if (omap_fb->dss_format == OMAP_DSS_COLOR_NV12) { 231 237 plane = &omap_fb->planes[1]; 232 238 233 239 if (info->rotation_type == OMAP_DSS_ROT_TILER) { ··· 354 360 struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 355 361 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) 356 362 { 363 + unsigned int num_planes = drm_format_num_planes(mode_cmd->pixel_format); 357 364 struct drm_gem_object *bos[4]; 358 365 struct drm_framebuffer *fb; 359 - int ret; 366 + int i; 360 367 361 - ret = objects_lookup(file, mode_cmd->pixel_format, 362 - bos, mode_cmd->handles); 363 - if (ret) 364 - return ERR_PTR(ret); 368 + for (i = 0; i < num_planes; i++) { 369 + bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]); 370 + if (!bos[i]) { 371 + fb = ERR_PTR(-ENOENT); 372 + goto error; 373 + } 374 + } 365 375 366 376 fb = omap_framebuffer_init(dev, mode_cmd, bos); 367 - if (IS_ERR(fb)) { 368 - int i, n = drm_format_num_planes(mode_cmd->pixel_format); 369 - for (i = 0; i < n; i++) 370 - drm_gem_object_unreference_unlocked(bos[i]); 371 - return fb; 372 - } 377 + if (IS_ERR(fb)) 378 + goto error; 379 + 380 + return fb; 381 + 382 + error: 383 + while (--i > 0) 384 + drm_gem_object_unreference_unlocked(bos[i]); 385 + 373 386 return fb; 374 387 } 375 388 376 389 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 377 390 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) 378 391 { 392 + const struct drm_format_info *format = NULL; 379 393 struct omap_framebuffer *omap_fb = NULL; 380 394 struct drm_framebuffer *fb = NULL; 381 - const struct format *format = NULL; 382 - int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); 395 + enum omap_color_mode dss_format = 0; 396 + unsigned int pitch = mode_cmd->pitches[0]; 397 + int ret, i; 383 398 384 399 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", 385 400 dev, mode_cmd, mode_cmd->width, mode_cmd->height, 386 401 (char *)&mode_cmd->pixel_format); 387 402 403 + format = drm_format_info(mode_cmd->pixel_format); 404 + 388 405 for (i = 0; i < ARRAY_SIZE(formats); i++) { 389 406 if (formats[i].pixel_format == mode_cmd->pixel_format) { 390 - format = &formats[i]; 407 + dss_format = formats[i].dss_format; 391 408 break; 392 409 } 393 410 } 394 411 395 - if (!format) { 396 - dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 397 - (char *)&mode_cmd->pixel_format); 412 + if (!format || !dss_format) { 413 + dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n", 414 + (char *)&mode_cmd->pixel_format); 398 415 ret = -EINVAL; 399 416 goto fail; 400 417 } ··· 418 413 419 414 fb = &omap_fb->base; 420 415 omap_fb->format = format; 416 + omap_fb->dss_format = dss_format; 421 417 mutex_init(&omap_fb->lock); 422 418 423 - for (i = 0; i < n; i++) { 419 + /* 420 + * The code below assumes that no format use more than two planes, and 421 + * that the two planes of multiplane formats need the same number of 422 + * bytes per pixel. 423 + */ 424 + if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) { 425 + dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n"); 426 + ret = -EINVAL; 427 + goto fail; 428 + } 429 + 430 + if (pitch % format->cpp[0]) { 431 + dev_dbg(dev->dev, 432 + "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n", 433 + pitch, format->cpp[0]); 434 + ret = -EINVAL; 435 + goto fail; 436 + } 437 + 438 + for (i = 0; i < format->num_planes; i++) { 424 439 struct plane *plane = &omap_fb->planes[i]; 425 - int size, pitch = mode_cmd->pitches[i]; 440 + unsigned int vsub = i == 0 ? 1 : format->vsub; 441 + unsigned int size; 426 442 427 - if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) { 428 - dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n", 429 - pitch, mode_cmd->width * format->planes[i].stride_bpp); 430 - ret = -EINVAL; 431 - goto fail; 432 - } 443 + size = pitch * mode_cmd->height / vsub; 433 444 434 - if (pitch % format->planes[i].stride_bpp != 0) { 435 - dev_err(dev->dev, 436 - "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n", 437 - pitch, format->planes[i].stride_bpp); 438 - ret = -EINVAL; 439 - goto fail; 440 - } 441 - 442 - size = pitch * mode_cmd->height / format->planes[i].sub_y; 443 - 444 - if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) { 445 - dev_err(dev->dev, "provided buffer object is too small! %d < %d\n", 446 - bos[i]->size - mode_cmd->offsets[i], size); 447 - ret = -EINVAL; 448 - goto fail; 449 - } 450 - 451 - if (i > 0 && pitch != mode_cmd->pitches[i - 1]) { 452 - dev_err(dev->dev, 453 - "pitches are not the same between framebuffer planes %d != %d\n", 454 - pitch, mode_cmd->pitches[i - 1]); 445 + if (size > omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i]) { 446 + dev_dbg(dev->dev, 447 + "provided buffer object is too small! %d < %d\n", 448 + bos[i]->size - mode_cmd->offsets[i], size); 455 449 ret = -EINVAL; 456 450 goto fail; 457 451 }
+133 -109
drivers/gpu/drm/omapdrm/omap_irq.c
··· 19 19 20 20 #include "omap_drv.h" 21 21 22 - static DEFINE_SPINLOCK(list_lock); 22 + struct omap_irq_wait { 23 + struct list_head node; 24 + wait_queue_head_t wq; 25 + uint32_t irqmask; 26 + int count; 27 + }; 23 28 24 - static void omap_irq_error_handler(struct omap_drm_irq *irq, 25 - uint32_t irqstatus) 26 - { 27 - DRM_ERROR("errors: %08x\n", irqstatus); 28 - } 29 - 30 - /* call with list_lock and dispc runtime held */ 29 + /* call with wait_lock and dispc runtime held */ 31 30 static void omap_irq_update(struct drm_device *dev) 32 31 { 33 32 struct omap_drm_private *priv = dev->dev_private; 34 - struct omap_drm_irq *irq; 35 - uint32_t irqmask = priv->vblank_mask; 33 + struct omap_irq_wait *wait; 34 + uint32_t irqmask = priv->irq_mask; 36 35 37 - assert_spin_locked(&list_lock); 36 + assert_spin_locked(&priv->wait_lock); 38 37 39 - list_for_each_entry(irq, &priv->irq_list, node) 40 - irqmask |= irq->irqmask; 38 + list_for_each_entry(wait, &priv->wait_list, node) 39 + irqmask |= wait->irqmask; 41 40 42 41 DBG("irqmask=%08x", irqmask); 43 42 ··· 44 45 dispc_read_irqenable(); /* flush posted write */ 45 46 } 46 47 47 - void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) 48 + static void omap_irq_wait_handler(struct omap_irq_wait *wait) 48 49 { 49 - struct omap_drm_private *priv = dev->dev_private; 50 - unsigned long flags; 51 - 52 - spin_lock_irqsave(&list_lock, flags); 53 - 54 - if (!WARN_ON(irq->registered)) { 55 - irq->registered = true; 56 - list_add(&irq->node, &priv->irq_list); 57 - omap_irq_update(dev); 58 - } 59 - 60 - spin_unlock_irqrestore(&list_lock, flags); 61 - } 62 - 63 - void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) 64 - { 65 - dispc_runtime_get(); 66 - 67 - __omap_irq_register(dev, irq); 68 - 69 - dispc_runtime_put(); 70 - } 71 - 72 - void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) 73 - { 74 - unsigned long flags; 75 - 76 - spin_lock_irqsave(&list_lock, flags); 77 - 78 - if (!WARN_ON(!irq->registered)) { 79 - irq->registered = false; 80 - list_del(&irq->node); 81 - omap_irq_update(dev); 82 - } 83 - 84 - spin_unlock_irqrestore(&list_lock, flags); 85 - } 86 - 87 - void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) 88 - { 89 - dispc_runtime_get(); 90 - 91 - __omap_irq_unregister(dev, irq); 92 - 93 - dispc_runtime_put(); 94 - } 95 - 96 - struct omap_irq_wait { 97 - struct omap_drm_irq irq; 98 - int count; 99 - }; 100 - 101 - static DECLARE_WAIT_QUEUE_HEAD(wait_event); 102 - 103 - static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus) 104 - { 105 - struct omap_irq_wait *wait = 106 - container_of(irq, struct omap_irq_wait, irq); 107 50 wait->count--; 108 - wake_up_all(&wait_event); 51 + wake_up(&wait->wq); 109 52 } 110 53 111 54 struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, 112 55 uint32_t irqmask, int count) 113 56 { 57 + struct omap_drm_private *priv = dev->dev_private; 114 58 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL); 115 - wait->irq.irq = wait_irq; 116 - wait->irq.irqmask = irqmask; 59 + unsigned long flags; 60 + 61 + init_waitqueue_head(&wait->wq); 62 + wait->irqmask = irqmask; 117 63 wait->count = count; 118 - omap_irq_register(dev, &wait->irq); 64 + 65 + spin_lock_irqsave(&priv->wait_lock, flags); 66 + list_add(&wait->node, &priv->wait_list); 67 + omap_irq_update(dev); 68 + spin_unlock_irqrestore(&priv->wait_lock, flags); 69 + 119 70 return wait; 120 71 } 121 72 122 73 int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait, 123 74 unsigned long timeout) 124 75 { 125 - int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout); 126 - omap_irq_unregister(dev, &wait->irq); 76 + struct omap_drm_private *priv = dev->dev_private; 77 + unsigned long flags; 78 + int ret; 79 + 80 + ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout); 81 + 82 + spin_lock_irqsave(&priv->wait_lock, flags); 83 + list_del(&wait->node); 84 + omap_irq_update(dev); 85 + spin_unlock_irqrestore(&priv->wait_lock, flags); 86 + 127 87 kfree(wait); 128 - if (ret == 0) 129 - return -1; 130 - return 0; 88 + 89 + return ret == 0 ? -1 : 0; 131 90 } 132 91 133 92 /** ··· 109 152 110 153 DBG("dev=%p, crtc=%u", dev, pipe); 111 154 112 - spin_lock_irqsave(&list_lock, flags); 113 - priv->vblank_mask |= pipe2vbl(crtc); 155 + spin_lock_irqsave(&priv->wait_lock, flags); 156 + priv->irq_mask |= dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc)); 114 157 omap_irq_update(dev); 115 - spin_unlock_irqrestore(&list_lock, flags); 158 + spin_unlock_irqrestore(&priv->wait_lock, flags); 116 159 117 160 return 0; 118 161 } ··· 134 177 135 178 DBG("dev=%p, crtc=%u", dev, pipe); 136 179 137 - spin_lock_irqsave(&list_lock, flags); 138 - priv->vblank_mask &= ~pipe2vbl(crtc); 180 + spin_lock_irqsave(&priv->wait_lock, flags); 181 + priv->irq_mask &= ~dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc)); 139 182 omap_irq_update(dev); 140 - spin_unlock_irqrestore(&list_lock, flags); 183 + spin_unlock_irqrestore(&priv->wait_lock, flags); 184 + } 185 + 186 + static void omap_irq_fifo_underflow(struct omap_drm_private *priv, 187 + u32 irqstatus) 188 + { 189 + static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 190 + DEFAULT_RATELIMIT_BURST); 191 + static const struct { 192 + const char *name; 193 + u32 mask; 194 + } sources[] = { 195 + { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW }, 196 + { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW }, 197 + { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW }, 198 + { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW }, 199 + }; 200 + 201 + const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW 202 + | DISPC_IRQ_VID1_FIFO_UNDERFLOW 203 + | DISPC_IRQ_VID2_FIFO_UNDERFLOW 204 + | DISPC_IRQ_VID3_FIFO_UNDERFLOW; 205 + unsigned int i; 206 + 207 + spin_lock(&priv->wait_lock); 208 + irqstatus &= priv->irq_mask & mask; 209 + spin_unlock(&priv->wait_lock); 210 + 211 + if (!irqstatus) 212 + return; 213 + 214 + if (!__ratelimit(&_rs)) 215 + return; 216 + 217 + DRM_ERROR("FIFO underflow on "); 218 + 219 + for (i = 0; i < ARRAY_SIZE(sources); ++i) { 220 + if (sources[i].mask & irqstatus) 221 + pr_cont("%s ", sources[i].name); 222 + } 223 + 224 + pr_cont("(0x%08x)\n", irqstatus); 225 + } 226 + 227 + static void omap_irq_ocp_error_handler(u32 irqstatus) 228 + { 229 + if (!(irqstatus & DISPC_IRQ_OCP_ERR)) 230 + return; 231 + 232 + DRM_ERROR("OCP error\n"); 141 233 } 142 234 143 235 static irqreturn_t omap_irq_handler(int irq, void *arg) 144 236 { 145 237 struct drm_device *dev = (struct drm_device *) arg; 146 238 struct omap_drm_private *priv = dev->dev_private; 147 - struct omap_drm_irq *handler, *n; 239 + struct omap_irq_wait *wait, *n; 148 240 unsigned long flags; 149 241 unsigned int id; 150 242 u32 irqstatus; ··· 206 200 207 201 for (id = 0; id < priv->num_crtcs; id++) { 208 202 struct drm_crtc *crtc = priv->crtcs[id]; 203 + enum omap_channel channel = omap_crtc_channel(crtc); 209 204 210 - if (irqstatus & pipe2vbl(crtc)) 205 + if (irqstatus & dispc_mgr_get_vsync_irq(channel)) { 211 206 drm_handle_vblank(dev, id); 207 + omap_crtc_vblank_irq(crtc); 208 + } 209 + 210 + if (irqstatus & dispc_mgr_get_sync_lost_irq(channel)) 211 + omap_crtc_error_irq(crtc, irqstatus); 212 212 } 213 213 214 - spin_lock_irqsave(&list_lock, flags); 215 - list_for_each_entry_safe(handler, n, &priv->irq_list, node) { 216 - if (handler->irqmask & irqstatus) { 217 - spin_unlock_irqrestore(&list_lock, flags); 218 - handler->irq(handler, handler->irqmask & irqstatus); 219 - spin_lock_irqsave(&list_lock, flags); 220 - } 214 + omap_irq_ocp_error_handler(irqstatus); 215 + omap_irq_fifo_underflow(priv, irqstatus); 216 + 217 + spin_lock_irqsave(&priv->wait_lock, flags); 218 + list_for_each_entry_safe(wait, n, &priv->wait_list, node) { 219 + if (wait->irqmask & irqstatus) 220 + omap_irq_wait_handler(wait); 221 221 } 222 - spin_unlock_irqrestore(&list_lock, flags); 222 + spin_unlock_irqrestore(&priv->wait_lock, flags); 223 223 224 224 return IRQ_HANDLED; 225 225 } 226 + 227 + static const u32 omap_underflow_irqs[] = { 228 + [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, 229 + [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, 230 + [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, 231 + [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, 232 + }; 226 233 227 234 /* 228 235 * We need a special version, instead of just using drm_irq_install(), ··· 247 228 int omap_drm_irq_install(struct drm_device *dev) 248 229 { 249 230 struct omap_drm_private *priv = dev->dev_private; 250 - struct omap_drm_irq *error_handler = &priv->error_handler; 231 + unsigned int num_mgrs = dss_feat_get_num_mgrs(); 232 + unsigned int max_planes; 233 + unsigned int i; 251 234 int ret; 252 235 253 - INIT_LIST_HEAD(&priv->irq_list); 236 + spin_lock_init(&priv->wait_lock); 237 + INIT_LIST_HEAD(&priv->wait_list); 238 + 239 + priv->irq_mask = DISPC_IRQ_OCP_ERR; 240 + 241 + max_planes = min(ARRAY_SIZE(priv->planes), 242 + ARRAY_SIZE(omap_underflow_irqs)); 243 + for (i = 0; i < max_planes; ++i) { 244 + if (priv->planes[i]) 245 + priv->irq_mask |= omap_underflow_irqs[i]; 246 + } 247 + 248 + for (i = 0; i < num_mgrs; ++i) 249 + priv->irq_mask |= dispc_mgr_get_sync_lost_irq(i); 254 250 255 251 dispc_runtime_get(); 256 252 dispc_clear_irqstatus(0xffffffff); ··· 274 240 ret = dispc_request_irq(omap_irq_handler, dev); 275 241 if (ret < 0) 276 242 return ret; 277 - 278 - error_handler->irq = omap_irq_error_handler; 279 - error_handler->irqmask = DISPC_IRQ_OCP_ERR; 280 - 281 - /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think 282 - * we just need to ignore it while enabling tv-out 283 - */ 284 - error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT; 285 - 286 - omap_irq_register(dev, error_handler); 287 243 288 244 dev->irq_enabled = true; 289 245
-24
drivers/gpu/drm/omapdrm/omap_plane.c
··· 43 43 44 44 uint32_t nformats; 45 45 uint32_t formats[32]; 46 - 47 - struct omap_drm_irq error_irq; 48 46 }; 49 47 50 48 struct omap_plane_state { ··· 202 204 203 205 DBG("%s", omap_plane->name); 204 206 205 - omap_irq_unregister(plane->dev, &omap_plane->error_irq); 206 - 207 207 drm_plane_cleanup(plane); 208 208 209 209 kfree(omap_plane); ··· 328 332 .atomic_get_property = omap_plane_atomic_get_property, 329 333 }; 330 334 331 - static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) 332 - { 333 - struct omap_plane *omap_plane = 334 - container_of(irq, struct omap_plane, error_irq); 335 - DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name, 336 - irqstatus); 337 - } 338 - 339 335 static const char *plane_names[] = { 340 336 [OMAP_DSS_GFX] = "gfx", 341 337 [OMAP_DSS_VIDEO1] = "vid1", 342 338 [OMAP_DSS_VIDEO2] = "vid2", 343 339 [OMAP_DSS_VIDEO3] = "vid3", 344 - }; 345 - 346 - static const uint32_t error_irqs[] = { 347 - [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, 348 - [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, 349 - [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, 350 - [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, 351 340 }; 352 341 353 342 /* initialize plane */ ··· 358 377 359 378 plane = &omap_plane->base; 360 379 361 - omap_plane->error_irq.irqmask = error_irqs[id]; 362 - omap_plane->error_irq.irq = omap_plane_error_irq; 363 - omap_irq_register(dev, &omap_plane->error_irq); 364 - 365 380 ret = drm_universal_plane_init(dev, plane, possible_crtcs, 366 381 &omap_plane_funcs, omap_plane->formats, 367 382 omap_plane->nformats, type, NULL); ··· 371 394 return plane; 372 395 373 396 error: 374 - omap_irq_unregister(plane->dev, &omap_plane->error_irq); 375 397 kfree(omap_plane); 376 398 return NULL; 377 399 }
+1
include/uapi/drm/Kbuild
··· 9 9 header-y += i915_drm.h 10 10 header-y += mga_drm.h 11 11 header-y += nouveau_drm.h 12 + header-y += omap_drm.h 12 13 header-y += qxl_drm.h 13 14 header-y += r128_drm.h 14 15 header-y += radeon_drm.h