Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2021-08-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v5.15:

UAPI Changes:

Cross-subsystem Changes:
- Add lockdep_assert(once) helpers.

Core Changes:
- Add lockdep assert to drm_is_current_master_locked.
- Fix typos in dma-buf documentation.
- Mark drm irq midlayer as legacy only.
- Fix GPF in udmabuf_create.
- Rename member to correct value in drm_edid.h

Driver Changes:
- Build fix to make nouveau build with NOUVEAU_BACKLIGHT.
- Add MI101AIT-ICP1, LTTD800480070-L6WWH-RT panels.
- Assorted fixes to bridge/it66121, anx7625.
- Add custom crtc_state to simple helpers, and use it to
convert pll handling in mgag200 to atomic.
- Convert drivers to use offset-adjusted framebuffer bo mappings.
- Assorted small fixes and fix for a use-after-free in vmwgfx.
- Convert remaining callers of non-legacy drivers to use linux irqs directly.
- Small cleanup in ingenic.
- Small fixes to virtio and ti-sn65dsi86.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1cf2d7fc-402d-1852-574a-21cbbd2eaebf@linux.intel.com

+2000 -1363
+2 -2
drivers/dma-buf/udmabuf.c
··· 227 227 if (!hpage) { 228 228 hpage = find_get_page_flags(mapping, pgoff, 229 229 FGP_ACCESSED); 230 - if (IS_ERR(hpage)) { 231 - ret = PTR_ERR(hpage); 230 + if (!hpage) { 231 + ret = -EINVAL; 232 232 goto err; 233 233 } 234 234 }
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1786 1786 .open = amdgpu_driver_open_kms, 1787 1787 .postclose = amdgpu_driver_postclose_kms, 1788 1788 .lastclose = amdgpu_driver_lastclose_kms, 1789 - .irq_handler = amdgpu_irq_handler, 1790 1789 .ioctls = amdgpu_ioctls_kms, 1791 1790 .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), 1792 1791 .dumb_create = amdgpu_mode_dumb_create,
+14 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
··· 46 46 #include <linux/pci.h> 47 47 48 48 #include <drm/drm_crtc_helper.h> 49 - #include <drm/drm_irq.h> 50 49 #include <drm/drm_vblank.h> 51 50 #include <drm/amdgpu_drm.h> 52 51 #include <drm/drm_drv.h> ··· 183 184 * Returns: 184 185 * result of handling the IRQ, as defined by &irqreturn_t 185 186 */ 186 - irqreturn_t amdgpu_irq_handler(int irq, void *arg) 187 + static irqreturn_t amdgpu_irq_handler(int irq, void *arg) 187 188 { 188 189 struct drm_device *dev = (struct drm_device *) arg; 189 190 struct amdgpu_device *adev = drm_to_adev(dev); ··· 306 307 int amdgpu_irq_init(struct amdgpu_device *adev) 307 308 { 308 309 int r = 0; 310 + unsigned int irq; 309 311 310 312 spin_lock_init(&adev->irq.lock); 311 313 ··· 349 349 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); 350 350 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft); 351 351 352 - adev->irq.installed = true; 353 - /* Use vector 0 for MSI-X */ 354 - r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0)); 352 + /* Use vector 0 for MSI-X. */ 353 + r = pci_irq_vector(adev->pdev, 0); 354 + if (r < 0) 355 + return r; 356 + irq = r; 357 + 358 + /* PCI devices require shared interrupts. */ 359 + r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, 360 + adev_to_drm(adev)); 355 361 if (r) { 356 - adev->irq.installed = false; 357 362 if (!amdgpu_device_has_dc_support(adev)) 358 363 flush_work(&adev->hotplug_work); 359 364 return r; 360 365 } 366 + adev->irq.installed = true; 367 + adev->irq.irq = irq; 361 368 adev_to_drm(adev)->max_vblank_count = 0x00ffffff; 362 369 363 370 DRM_DEBUG("amdgpu: irq initialized.\n"); ··· 375 368 void amdgpu_irq_fini_hw(struct amdgpu_device *adev) 376 369 { 377 370 if (adev->irq.installed) { 378 - drm_irq_uninstall(&adev->ddev); 371 + free_irq(adev->irq.irq, adev_to_drm(adev)); 379 372 adev->irq.installed = false; 380 373 if (adev->irq.msi_enabled) 381 374 pci_free_irq_vectors(adev->pdev);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
··· 80 80 81 81 struct amdgpu_irq { 82 82 bool installed; 83 + unsigned int irq; 83 84 spinlock_t lock; 84 85 /* interrupt sources */ 85 86 struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX]; ··· 101 100 }; 102 101 103 102 void amdgpu_irq_disable_all(struct amdgpu_device *adev); 104 - irqreturn_t amdgpu_irq_handler(int irq, void *arg); 105 103 106 104 int amdgpu_irq_init(struct amdgpu_device *adev); 107 105 void amdgpu_irq_fini_sw(struct amdgpu_device *adev);
+96 -78
drivers/gpu/drm/arm/hdlcd_drv.c
··· 29 29 #include <drm/drm_fb_helper.h> 30 30 #include <drm/drm_gem_cma_helper.h> 31 31 #include <drm/drm_gem_framebuffer_helper.h> 32 - #include <drm/drm_irq.h> 33 32 #include <drm/drm_modeset_helper.h> 34 33 #include <drm/drm_of.h> 35 34 #include <drm/drm_probe_helper.h> ··· 36 37 37 38 #include "hdlcd_drv.h" 38 39 #include "hdlcd_regs.h" 40 + 41 + static irqreturn_t hdlcd_irq(int irq, void *arg) 42 + { 43 + struct drm_device *drm = arg; 44 + struct hdlcd_drm_private *hdlcd = drm->dev_private; 45 + unsigned long irq_status; 46 + 47 + irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS); 48 + 49 + #ifdef CONFIG_DEBUG_FS 50 + if (irq_status & HDLCD_INTERRUPT_UNDERRUN) 51 + atomic_inc(&hdlcd->buffer_underrun_count); 52 + 53 + if (irq_status & HDLCD_INTERRUPT_DMA_END) 54 + atomic_inc(&hdlcd->dma_end_count); 55 + 56 + if (irq_status & HDLCD_INTERRUPT_BUS_ERROR) 57 + atomic_inc(&hdlcd->bus_error_count); 58 + 59 + if (irq_status & HDLCD_INTERRUPT_VSYNC) 60 + atomic_inc(&hdlcd->vsync_count); 61 + 62 + #endif 63 + if (irq_status & HDLCD_INTERRUPT_VSYNC) 64 + drm_crtc_handle_vblank(&hdlcd->crtc); 65 + 66 + /* acknowledge interrupt(s) */ 67 + hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); 68 + 69 + return IRQ_HANDLED; 70 + } 71 + 72 + static void hdlcd_irq_preinstall(struct drm_device *drm) 73 + { 74 + struct hdlcd_drm_private *hdlcd = drm->dev_private; 75 + /* Ensure interrupts are disabled */ 76 + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0); 77 + hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0); 78 + } 79 + 80 + static void hdlcd_irq_postinstall(struct drm_device *drm) 81 + { 82 + #ifdef CONFIG_DEBUG_FS 83 + struct hdlcd_drm_private *hdlcd = drm->dev_private; 84 + unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); 85 + 86 + /* enable debug interrupts */ 87 + irq_mask |= HDLCD_DEBUG_INT_MASK; 88 + 89 + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); 90 + #endif 91 + } 92 + 93 + static int hdlcd_irq_install(struct drm_device *drm, int irq) 94 + { 95 + int ret; 96 + 97 + if (irq == IRQ_NOTCONNECTED) 98 + return -ENOTCONN; 99 + 100 + hdlcd_irq_preinstall(drm); 101 + 102 + ret = request_irq(irq, hdlcd_irq, 0, drm->driver->name, drm); 103 + if (ret) 104 + return ret; 105 + 106 + hdlcd_irq_postinstall(drm); 107 + 108 + return 0; 109 + } 110 + 111 + static void hdlcd_irq_uninstall(struct drm_device *drm) 112 + { 113 + struct hdlcd_drm_private *hdlcd = drm->dev_private; 114 + /* disable all the interrupts that we might have enabled */ 115 + unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); 116 + 117 + #ifdef CONFIG_DEBUG_FS 118 + /* disable debug interrupts */ 119 + irq_mask &= ~HDLCD_DEBUG_INT_MASK; 120 + #endif 121 + 122 + /* disable vsync interrupts */ 123 + irq_mask &= ~HDLCD_INTERRUPT_VSYNC; 124 + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); 125 + 126 + free_irq(hdlcd->irq, drm); 127 + } 39 128 40 129 static int hdlcd_load(struct drm_device *drm, unsigned long flags) 41 130 { ··· 177 90 goto setup_fail; 178 91 } 179 92 180 - ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); 93 + ret = platform_get_irq(pdev, 0); 94 + if (ret < 0) 95 + goto irq_fail; 96 + hdlcd->irq = ret; 97 + 98 + ret = hdlcd_irq_install(drm, hdlcd->irq); 181 99 if (ret < 0) { 182 100 DRM_ERROR("failed to install IRQ handler\n"); 183 101 goto irq_fail; ··· 212 120 drm->mode_config.max_width = HDLCD_MAX_XRES; 213 121 drm->mode_config.max_height = HDLCD_MAX_YRES; 214 122 drm->mode_config.funcs = &hdlcd_mode_config_funcs; 215 - } 216 - 217 - static irqreturn_t hdlcd_irq(int irq, void *arg) 218 - { 219 - struct drm_device *drm = arg; 220 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 221 - unsigned long irq_status; 222 - 223 - irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS); 224 - 225 - #ifdef CONFIG_DEBUG_FS 226 - if (irq_status & HDLCD_INTERRUPT_UNDERRUN) 227 - atomic_inc(&hdlcd->buffer_underrun_count); 228 - 229 - if (irq_status & HDLCD_INTERRUPT_DMA_END) 230 - atomic_inc(&hdlcd->dma_end_count); 231 - 232 - if (irq_status & HDLCD_INTERRUPT_BUS_ERROR) 233 - atomic_inc(&hdlcd->bus_error_count); 234 - 235 - if (irq_status & HDLCD_INTERRUPT_VSYNC) 236 - atomic_inc(&hdlcd->vsync_count); 237 - 238 - #endif 239 - if (irq_status & HDLCD_INTERRUPT_VSYNC) 240 - drm_crtc_handle_vblank(&hdlcd->crtc); 241 - 242 - /* acknowledge interrupt(s) */ 243 - hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); 244 - 245 - return IRQ_HANDLED; 246 - } 247 - 248 - static void hdlcd_irq_preinstall(struct drm_device *drm) 249 - { 250 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 251 - /* Ensure interrupts are disabled */ 252 - hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0); 253 - hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0); 254 - } 255 - 256 - static int hdlcd_irq_postinstall(struct drm_device *drm) 257 - { 258 - #ifdef CONFIG_DEBUG_FS 259 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 260 - unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); 261 - 262 - /* enable debug interrupts */ 263 - irq_mask |= HDLCD_DEBUG_INT_MASK; 264 - 265 - hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); 266 - #endif 267 - return 0; 268 - } 269 - 270 - static void hdlcd_irq_uninstall(struct drm_device *drm) 271 - { 272 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 273 - /* disable all the interrupts that we might have enabled */ 274 - unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); 275 - 276 - #ifdef CONFIG_DEBUG_FS 277 - /* disable debug interrupts */ 278 - irq_mask &= ~HDLCD_DEBUG_INT_MASK; 279 - #endif 280 - 281 - /* disable vsync interrupts */ 282 - irq_mask &= ~HDLCD_INTERRUPT_VSYNC; 283 - 284 - hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); 285 123 } 286 124 287 125 #ifdef CONFIG_DEBUG_FS ··· 258 236 259 237 static const struct drm_driver hdlcd_driver = { 260 238 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 261 - .irq_handler = hdlcd_irq, 262 - .irq_preinstall = hdlcd_irq_preinstall, 263 - .irq_postinstall = hdlcd_irq_postinstall, 264 - .irq_uninstall = hdlcd_irq_uninstall, 265 239 DRM_GEM_CMA_DRIVER_OPS, 266 240 #ifdef CONFIG_DEBUG_FS 267 241 .debugfs_init = hdlcd_debugfs_init, ··· 334 316 err_unload: 335 317 of_node_put(hdlcd->crtc.port); 336 318 hdlcd->crtc.port = NULL; 337 - drm_irq_uninstall(drm); 319 + hdlcd_irq_uninstall(drm); 338 320 of_reserved_mem_device_release(drm->dev); 339 321 err_free: 340 322 drm_mode_config_cleanup(drm); ··· 356 338 hdlcd->crtc.port = NULL; 357 339 pm_runtime_get_sync(dev); 358 340 drm_atomic_helper_shutdown(drm); 359 - drm_irq_uninstall(drm); 341 + hdlcd_irq_uninstall(drm); 360 342 pm_runtime_put(dev); 361 343 if (pm_runtime_enabled(dev)) 362 344 pm_runtime_disable(dev);
+1
drivers/gpu/drm/arm/hdlcd_drv.h
··· 11 11 struct clk *clk; 12 12 struct drm_crtc crtc; 13 13 struct drm_plane *plane; 14 + unsigned int irq; 14 15 #ifdef CONFIG_DEBUG_FS 15 16 atomic_t buffer_underrun_count; 16 17 atomic_t bus_error_count;
+1 -1
drivers/gpu/drm/ast/ast_mode.c
··· 808 808 ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map; 809 809 u64 dst_off = 810 810 ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off; 811 - struct dma_buf_map src_map = shadow_plane_state->map[0]; 811 + struct dma_buf_map src_map = shadow_plane_state->data[0]; 812 812 unsigned int offset_x, offset_y; 813 813 u16 x, y; 814 814 u8 x_offset, y_offset;
+47 -33
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
··· 22 22 #include <drm/drm_fb_helper.h> 23 23 #include <drm/drm_gem_cma_helper.h> 24 24 #include <drm/drm_gem_framebuffer_helper.h> 25 - #include <drm/drm_irq.h> 26 25 #include <drm/drm_probe_helper.h> 27 26 #include <drm/drm_vblank.h> 28 27 ··· 556 557 return IRQ_HANDLED; 557 558 } 558 559 560 + static void atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev) 561 + { 562 + struct atmel_hlcdc_dc *dc = dev->dev_private; 563 + unsigned int cfg = 0; 564 + int i; 565 + 566 + /* Enable interrupts on activated layers */ 567 + for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { 568 + if (dc->layers[i]) 569 + cfg |= ATMEL_HLCDC_LAYER_STATUS(i); 570 + } 571 + 572 + regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, cfg); 573 + } 574 + 575 + static void atmel_hlcdc_dc_irq_disable(struct drm_device *dev) 576 + { 577 + struct atmel_hlcdc_dc *dc = dev->dev_private; 578 + unsigned int isr; 579 + 580 + regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, 0xffffffff); 581 + regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); 582 + } 583 + 584 + static int atmel_hlcdc_dc_irq_install(struct drm_device *dev, unsigned int irq) 585 + { 586 + int ret; 587 + 588 + atmel_hlcdc_dc_irq_disable(dev); 589 + 590 + ret = devm_request_irq(dev->dev, irq, atmel_hlcdc_dc_irq_handler, 0, 591 + dev->driver->name, dev); 592 + if (ret) 593 + return ret; 594 + 595 + atmel_hlcdc_dc_irq_postinstall(dev); 596 + 597 + return 0; 598 + } 599 + 600 + static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev) 601 + { 602 + atmel_hlcdc_dc_irq_disable(dev); 603 + } 604 + 559 605 static const struct drm_mode_config_funcs mode_config_funcs = { 560 606 .fb_create = drm_gem_fb_create, 561 607 .atomic_check = drm_atomic_helper_check, ··· 691 647 drm_mode_config_reset(dev); 692 648 693 649 pm_runtime_get_sync(dev->dev); 694 - ret = drm_irq_install(dev, dc->hlcdc->irq); 650 + ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq); 695 651 pm_runtime_put_sync(dev->dev); 696 652 if (ret < 0) { 697 653 dev_err(dev->dev, "failed to install IRQ handler\n"); ··· 720 676 drm_mode_config_cleanup(dev); 721 677 722 678 pm_runtime_get_sync(dev->dev); 723 - drm_irq_uninstall(dev); 679 + atmel_hlcdc_dc_irq_uninstall(dev); 724 680 pm_runtime_put_sync(dev->dev); 725 681 726 682 dev->dev_private = NULL; ··· 729 685 clk_disable_unprepare(dc->hlcdc->periph_clk); 730 686 } 731 687 732 - static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev) 733 - { 734 - struct atmel_hlcdc_dc *dc = dev->dev_private; 735 - unsigned int cfg = 0; 736 - int i; 737 - 738 - /* Enable interrupts on activated layers */ 739 - for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { 740 - if (dc->layers[i]) 741 - cfg |= ATMEL_HLCDC_LAYER_STATUS(i); 742 - } 743 - 744 - regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, cfg); 745 - 746 - return 0; 747 - } 748 - 749 - static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev) 750 - { 751 - struct atmel_hlcdc_dc *dc = dev->dev_private; 752 - unsigned int isr; 753 - 754 - regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, 0xffffffff); 755 - regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); 756 - } 757 - 758 688 DEFINE_DRM_GEM_CMA_FOPS(fops); 759 689 760 690 static const struct drm_driver atmel_hlcdc_dc_driver = { 761 691 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 762 - .irq_handler = atmel_hlcdc_dc_irq_handler, 763 - .irq_preinstall = atmel_hlcdc_dc_irq_uninstall, 764 - .irq_postinstall = atmel_hlcdc_dc_irq_postinstall, 765 - .irq_uninstall = atmel_hlcdc_dc_irq_uninstall, 766 692 DRM_GEM_CMA_DRIVER_OPS, 767 693 .fops = &fops, 768 694 .name = "atmel-hlcdc",
+21 -3
drivers/gpu/drm/bridge/analogix/anx7625.c
··· 384 384 return ret; 385 385 } 386 386 387 + /* 388 + * The MIPI source video data exist large variation (e.g. 59Hz ~ 61Hz), 389 + * anx7625 defined K ratio for matching MIPI input video clock and 390 + * DP output video clock. Increase K value can match bigger video data 391 + * variation. IVO panel has small variation than DP CTS spec, need 392 + * decrease the K value. 393 + */ 394 + static int anx7625_set_k_value(struct anx7625_data *ctx) 395 + { 396 + struct edid *edid = (struct edid *)ctx->slimport_edid_p.edid_raw_data; 397 + 398 + if (edid->mfg_id[0] == IVO_MID0 && edid->mfg_id[1] == IVO_MID1) 399 + return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, 400 + MIPI_DIGITAL_ADJ_1, 0x3B); 401 + 402 + return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, 403 + MIPI_DIGITAL_ADJ_1, 0x3D); 404 + } 405 + 387 406 static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) 388 407 { 389 408 struct device *dev = &ctx->client->dev; ··· 489 470 MIPI_PLL_N_NUM_15_8, (n >> 8) & 0xff); 490 471 ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_7_0, 491 472 (n & 0xff)); 492 - /* Diff */ 493 - ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, 494 - MIPI_DIGITAL_ADJ_1, 0x3D); 473 + 474 + anx7625_set_k_value(ctx); 495 475 496 476 ret |= anx7625_odfc_config(ctx, post_divider - 1); 497 477
+3 -1
drivers/gpu/drm/bridge/analogix/anx7625.h
··· 210 210 #define MIPI_VIDEO_STABLE_CNT 0x0A 211 211 212 212 #define MIPI_LANE_CTRL_10 0x0F 213 - #define MIPI_DIGITAL_ADJ_1 0x1B 213 + #define MIPI_DIGITAL_ADJ_1 0x1B 214 + #define IVO_MID0 0x26 215 + #define IVO_MID1 0xCF 214 216 215 217 #define MIPI_PLL_M_NUM_23_16 0x1E 216 218 #define MIPI_PLL_M_NUM_15_8 0x1F
+2
drivers/gpu/drm/bridge/ite-it66121.c
··· 536 536 return -EINVAL; 537 537 538 538 ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); 539 + if (ret) 540 + return ret; 539 541 540 542 ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, 541 543 IT66121_CLK_BANK_PWROFF_RCLK, 0);
+3
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 739 739 } 740 740 pdata->dsi = dsi; 741 741 742 + /* We never want the next bridge to *also* create a connector: */ 743 + flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR; 744 + 742 745 /* Attach the next bridge */ 743 746 ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, 744 747 &pdata->bridge, flags);
+3 -3
drivers/gpu/drm/drm_auth.c
··· 63 63 64 64 static bool drm_is_current_master_locked(struct drm_file *fpriv) 65 65 { 66 - /* Either drm_device.master_mutex or drm_file.master_lookup_lock 67 - * should be held here. 68 - */ 66 + lockdep_assert_once(lockdep_is_held(&fpriv->master_lookup_lock) || 67 + lockdep_is_held(&fpriv->minor->dev->master_mutex)); 68 + 69 69 return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; 70 70 } 71 71
+1 -1
drivers/gpu/drm/drm_gem_atomic_helper.c
··· 339 339 if (ret) 340 340 return ret; 341 341 342 - return drm_gem_fb_vmap(fb, shadow_plane_state->map); 342 + return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); 343 343 } 344 344 EXPORT_SYMBOL(drm_gem_prepare_shadow_fb); 345 345
+16 -1
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 315 315 * drm_gem_fb_vmap - maps all framebuffer BOs into kernel address space 316 316 * @fb: the framebuffer 317 317 * @map: returns the mapping's address for each BO 318 + * @data: returns the data address for each BO, can be NULL 318 319 * 319 320 * This function maps all buffer objects of the given framebuffer into 320 321 * kernel address space and stores them in struct dma_buf_map. If the 321 322 * mapping operation fails for one of the BOs, the function unmaps the 322 323 * already established mappings automatically. 324 + * 325 + * Callers that want to access a BO's stored data should pass @data. 326 + * The argument returns the addresses of the data stored in each BO. This 327 + * is different from @map if the framebuffer's offsets field is non-zero. 323 328 * 324 329 * See drm_gem_fb_vunmap() for unmapping. 325 330 * ··· 332 327 * 0 on success, or a negative errno code otherwise. 333 328 */ 334 329 int drm_gem_fb_vmap(struct drm_framebuffer *fb, 335 - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]) 330 + struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], 331 + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]) 336 332 { 337 333 struct drm_gem_object *obj; 338 334 unsigned int i; ··· 348 342 ret = drm_gem_vmap(obj, &map[i]); 349 343 if (ret) 350 344 goto err_drm_gem_vunmap; 345 + } 346 + 347 + if (data) { 348 + for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) { 349 + memcpy(&data[i], &map[i], sizeof(data[i])); 350 + if (dma_buf_map_is_null(&data[i])) 351 + continue; 352 + dma_buf_map_incr(&data[i], fb->offsets[i]); 353 + } 351 354 } 352 355 353 356 return 0;
+7 -88
drivers/gpu/drm/drm_irq.c
··· 60 60 #include <drm/drm.h> 61 61 #include <drm/drm_device.h> 62 62 #include <drm/drm_drv.h> 63 - #include <drm/drm_irq.h> 63 + #include <drm/drm_legacy.h> 64 64 #include <drm/drm_print.h> 65 65 #include <drm/drm_vblank.h> 66 66 67 67 #include "drm_internal.h" 68 68 69 - /** 70 - * DOC: irq helpers 71 - * 72 - * The DRM core provides very simple support helpers to enable IRQ handling on a 73 - * device through the drm_irq_install() and drm_irq_uninstall() functions. This 74 - * only supports devices with a single interrupt on the main device stored in 75 - * &drm_device.dev and set as the device parameter in drm_dev_alloc(). 76 - * 77 - * These IRQ helpers are strictly optional. Since these helpers don't automatically 78 - * clean up the requested interrupt like e.g. devm_request_irq() they're not really 79 - * recommended. 80 - */ 81 - 82 - /** 83 - * drm_irq_install - install IRQ handler 84 - * @dev: DRM device 85 - * @irq: IRQ number to install the handler for 86 - * 87 - * Initializes the IRQ related data. Installs the handler, calling the driver 88 - * &drm_driver.irq_preinstall and &drm_driver.irq_postinstall functions before 89 - * and after the installation. 90 - * 91 - * This is the simplified helper interface provided for drivers with no special 92 - * needs. 93 - * 94 - * @irq must match the interrupt number that would be passed to request_irq(), 95 - * if called directly instead of using this helper function. 96 - * 97 - * &drm_driver.irq_handler is called to handle the registered interrupt. 98 - * 99 - * Returns: 100 - * Zero on success or a negative error code on failure. 101 - */ 102 - int drm_irq_install(struct drm_device *dev, int irq) 69 + #if IS_ENABLED(CONFIG_DRM_LEGACY) 70 + static int drm_legacy_irq_install(struct drm_device *dev, int irq) 103 71 { 104 72 int ret; 105 73 unsigned long sh_flags = 0; ··· 112 144 113 145 return ret; 114 146 } 115 - EXPORT_SYMBOL(drm_irq_install); 116 147 117 - /** 118 - * drm_irq_uninstall - uninstall the IRQ handler 119 - * @dev: DRM device 120 - * 121 - * Calls the driver's &drm_driver.irq_uninstall function and unregisters the IRQ 122 - * handler. This should only be called by drivers which used drm_irq_install() 123 - * to set up their interrupt handler. 124 - * 125 - * Note that for kernel modesetting drivers it is a bug if this function fails. 126 - * The sanity checks are only to catch buggy user modesetting drivers which call 127 - * the same function through an ioctl. 128 - * 129 - * Returns: 130 - * Zero on success or a negative error code on failure. 131 - */ 132 - int drm_irq_uninstall(struct drm_device *dev) 148 + int drm_legacy_irq_uninstall(struct drm_device *dev) 133 149 { 134 150 unsigned long irqflags; 135 151 bool irq_enabled; ··· 159 207 160 208 return 0; 161 209 } 162 - EXPORT_SYMBOL(drm_irq_uninstall); 210 + EXPORT_SYMBOL(drm_legacy_irq_uninstall); 163 211 164 - static void devm_drm_irq_uninstall(void *data) 165 - { 166 - drm_irq_uninstall(data); 167 - } 168 - 169 - /** 170 - * devm_drm_irq_install - install IRQ handler 171 - * @dev: DRM device 172 - * @irq: IRQ number to install the handler for 173 - * 174 - * devm_drm_irq_install is a help function of drm_irq_install. 175 - * 176 - * if the driver uses devm_drm_irq_install, there is no need 177 - * to call drm_irq_uninstall when the drm module get unloaded, 178 - * as this will done automagically. 179 - * 180 - * Returns: 181 - * Zero on success or a negative error code on failure. 182 - */ 183 - int devm_drm_irq_install(struct drm_device *dev, int irq) 184 - { 185 - int ret; 186 - 187 - ret = drm_irq_install(dev, irq); 188 - if (ret) 189 - return ret; 190 - 191 - return devm_add_action_or_reset(dev->dev, 192 - devm_drm_irq_uninstall, dev); 193 - } 194 - EXPORT_SYMBOL(devm_drm_irq_install); 195 - 196 - #if IS_ENABLED(CONFIG_DRM_LEGACY) 197 212 int drm_legacy_irq_control(struct drm_device *dev, void *data, 198 213 struct drm_file *file_priv) 199 214 { ··· 189 270 ctl->irq != irq) 190 271 return -EINVAL; 191 272 mutex_lock(&dev->struct_mutex); 192 - ret = drm_irq_install(dev, irq); 273 + ret = drm_legacy_irq_install(dev, irq); 193 274 mutex_unlock(&dev->struct_mutex); 194 275 195 276 return ret; 196 277 case DRM_UNINST_HANDLER: 197 278 mutex_lock(&dev->struct_mutex); 198 - ret = drm_irq_uninstall(dev); 279 + ret = drm_legacy_irq_uninstall(dev); 199 280 mutex_unlock(&dev->struct_mutex); 200 281 201 282 return ret;
+1 -2
drivers/gpu/drm/drm_legacy_misc.c
··· 35 35 36 36 #include <drm/drm_device.h> 37 37 #include <drm/drm_drv.h> 38 - #include <drm/drm_irq.h> 39 38 #include <drm/drm_print.h> 40 39 41 40 #include "drm_internal.h" ··· 77 78 void drm_legacy_dev_reinit(struct drm_device *dev) 78 79 { 79 80 if (dev->irq_enabled) 80 - drm_irq_uninstall(dev); 81 + drm_legacy_irq_uninstall(dev); 81 82 82 83 mutex_lock(&dev->struct_mutex); 83 84
+36 -3
drivers/gpu/drm/drm_simple_kms_helper.c
··· 145 145 .atomic_disable = drm_simple_kms_crtc_disable, 146 146 }; 147 147 148 + static void drm_simple_kms_crtc_reset(struct drm_crtc *crtc) 149 + { 150 + struct drm_simple_display_pipe *pipe; 151 + 152 + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); 153 + if (!pipe->funcs || !pipe->funcs->reset_crtc) 154 + return drm_atomic_helper_crtc_reset(crtc); 155 + 156 + return pipe->funcs->reset_crtc(pipe); 157 + } 158 + 159 + static struct drm_crtc_state *drm_simple_kms_crtc_duplicate_state(struct drm_crtc *crtc) 160 + { 161 + struct drm_simple_display_pipe *pipe; 162 + 163 + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); 164 + if (!pipe->funcs || !pipe->funcs->duplicate_crtc_state) 165 + return drm_atomic_helper_crtc_duplicate_state(crtc); 166 + 167 + return pipe->funcs->duplicate_crtc_state(pipe); 168 + } 169 + 170 + static void drm_simple_kms_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) 171 + { 172 + struct drm_simple_display_pipe *pipe; 173 + 174 + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); 175 + if (!pipe->funcs || !pipe->funcs->destroy_crtc_state) 176 + drm_atomic_helper_crtc_destroy_state(crtc, state); 177 + else 178 + pipe->funcs->destroy_crtc_state(pipe, state); 179 + } 180 + 148 181 static int drm_simple_kms_crtc_enable_vblank(struct drm_crtc *crtc) 149 182 { 150 183 struct drm_simple_display_pipe *pipe; ··· 201 168 } 202 169 203 170 static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { 204 - .reset = drm_atomic_helper_crtc_reset, 171 + .reset = drm_simple_kms_crtc_reset, 205 172 .destroy = drm_crtc_cleanup, 206 173 .set_config = drm_atomic_helper_set_config, 207 174 .page_flip = drm_atomic_helper_page_flip, 208 - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 209 - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 175 + .atomic_duplicate_state = drm_simple_kms_crtc_duplicate_state, 176 + .atomic_destroy_state = drm_simple_kms_crtc_destroy_state, 210 177 .enable_vblank = drm_simple_kms_crtc_enable_vblank, 211 178 .disable_vblank = drm_simple_kms_crtc_disable_vblank, 212 179 };
+4 -4
drivers/gpu/drm/drm_vblank.c
··· 1739 1739 1740 1740 static bool drm_wait_vblank_supported(struct drm_device *dev) 1741 1741 { 1742 - if (IS_ENABLED(CONFIG_DRM_LEGACY)) { 1743 - if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY))) 1744 - return dev->irq_enabled; 1745 - } 1742 + #if IS_ENABLED(CONFIG_DRM_LEGACY) 1743 + if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY))) 1744 + return dev->irq_enabled; 1745 + #endif 1746 1746 return drm_dev_has_vblank(dev); 1747 1747 } 1748 1748
+71 -57
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
··· 23 23 #include <drm/drm_fb_cma_helper.h> 24 24 #include <drm/drm_fb_helper.h> 25 25 #include <drm/drm_gem_cma_helper.h> 26 - #include <drm/drm_irq.h> 27 26 #include <drm/drm_modeset_helper.h> 28 27 #include <drm/drm_probe_helper.h> 29 28 #include <drm/drm_vblank.h> ··· 50 51 .volatile_reg = fsl_dcu_drm_is_volatile_reg, 51 52 }; 52 53 53 - static void fsl_dcu_irq_uninstall(struct drm_device *dev) 54 + static void fsl_dcu_irq_reset(struct drm_device *dev) 54 55 { 55 56 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 56 57 57 58 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, ~0); 58 59 regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); 59 - } 60 - 61 - static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) 62 - { 63 - struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 64 - int ret; 65 - 66 - ret = fsl_dcu_drm_modeset_init(fsl_dev); 67 - if (ret < 0) { 68 - dev_err(dev->dev, "failed to initialize mode setting\n"); 69 - return ret; 70 - } 71 - 72 - ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 73 - if (ret < 0) { 74 - dev_err(dev->dev, "failed to initialize vblank\n"); 75 - goto done; 76 - } 77 - 78 - ret = drm_irq_install(dev, fsl_dev->irq); 79 - if (ret < 0) { 80 - dev_err(dev->dev, "failed to install IRQ handler\n"); 81 - goto done; 82 - } 83 - 84 - if (legacyfb_depth != 16 && legacyfb_depth != 24 && 85 - legacyfb_depth != 32) { 86 - dev_warn(dev->dev, 87 - "Invalid legacyfb_depth. Defaulting to 24bpp\n"); 88 - legacyfb_depth = 24; 89 - } 90 - 91 - return 0; 92 - done: 93 - drm_kms_helper_poll_fini(dev); 94 - 95 - drm_mode_config_cleanup(dev); 96 - drm_irq_uninstall(dev); 97 - dev->dev_private = NULL; 98 - 99 - return ret; 100 - } 101 - 102 - static void fsl_dcu_unload(struct drm_device *dev) 103 - { 104 - drm_atomic_helper_shutdown(dev); 105 - drm_kms_helper_poll_fini(dev); 106 - 107 - drm_mode_config_cleanup(dev); 108 - drm_irq_uninstall(dev); 109 - 110 - dev->dev_private = NULL; 111 60 } 112 61 113 62 static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) ··· 79 132 return IRQ_HANDLED; 80 133 } 81 134 135 + static int fsl_dcu_irq_install(struct drm_device *dev, unsigned int irq) 136 + { 137 + if (irq == IRQ_NOTCONNECTED) 138 + return -ENOTCONN; 139 + 140 + fsl_dcu_irq_reset(dev); 141 + 142 + return request_irq(irq, fsl_dcu_drm_irq, 0, dev->driver->name, dev); 143 + } 144 + 145 + static void fsl_dcu_irq_uninstall(struct drm_device *dev) 146 + { 147 + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 148 + 149 + fsl_dcu_irq_reset(dev); 150 + free_irq(fsl_dev->irq, dev); 151 + } 152 + 153 + static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) 154 + { 155 + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 156 + int ret; 157 + 158 + ret = fsl_dcu_drm_modeset_init(fsl_dev); 159 + if (ret < 0) { 160 + dev_err(dev->dev, "failed to initialize mode setting\n"); 161 + return ret; 162 + } 163 + 164 + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 165 + if (ret < 0) { 166 + dev_err(dev->dev, "failed to initialize vblank\n"); 167 + goto done_vblank; 168 + } 169 + 170 + ret = fsl_dcu_irq_install(dev, fsl_dev->irq); 171 + if (ret < 0) { 172 + dev_err(dev->dev, "failed to install IRQ handler\n"); 173 + goto done_irq; 174 + } 175 + 176 + if (legacyfb_depth != 16 && legacyfb_depth != 24 && 177 + legacyfb_depth != 32) { 178 + dev_warn(dev->dev, 179 + "Invalid legacyfb_depth. Defaulting to 24bpp\n"); 180 + legacyfb_depth = 24; 181 + } 182 + 183 + return 0; 184 + done_irq: 185 + drm_kms_helper_poll_fini(dev); 186 + 187 + drm_mode_config_cleanup(dev); 188 + done_vblank: 189 + dev->dev_private = NULL; 190 + 191 + return ret; 192 + } 193 + 194 + static void fsl_dcu_unload(struct drm_device *dev) 195 + { 196 + drm_atomic_helper_shutdown(dev); 197 + drm_kms_helper_poll_fini(dev); 198 + 199 + drm_mode_config_cleanup(dev); 200 + fsl_dcu_irq_uninstall(dev); 201 + 202 + dev->dev_private = NULL; 203 + } 204 + 82 205 DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops); 83 206 84 207 static const struct drm_driver fsl_dcu_drm_driver = { 85 208 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 86 209 .load = fsl_dcu_load, 87 210 .unload = fsl_dcu_unload, 88 - .irq_handler = fsl_dcu_drm_irq, 89 - .irq_preinstall = fsl_dcu_irq_uninstall, 90 - .irq_uninstall = fsl_dcu_irq_uninstall, 91 211 DRM_GEM_CMA_DRIVER_OPS, 92 212 .fops = &fsl_dcu_drm_fops, 93 213 .name = "fsl-dcu-drm",
+1
drivers/gpu/drm/gma500/power.c
··· 32 32 #include "psb_drv.h" 33 33 #include "psb_reg.h" 34 34 #include "psb_intel_reg.h" 35 + #include "psb_irq.h" 35 36 #include <linux/mutex.h> 36 37 #include <linux/pm_runtime.h> 37 38
+2 -6
drivers/gpu/drm/gma500/psb_drv.c
··· 23 23 #include <drm/drm_fb_helper.h> 24 24 #include <drm/drm_file.h> 25 25 #include <drm/drm_ioctl.h> 26 - #include <drm/drm_irq.h> 27 26 #include <drm/drm_pciids.h> 28 27 #include <drm/drm_vblank.h> 29 28 ··· 32 33 #include "power.h" 33 34 #include "psb_drv.h" 34 35 #include "psb_intel_reg.h" 36 + #include "psb_irq.h" 35 37 #include "psb_reg.h" 36 38 37 39 static const struct drm_driver driver; ··· 380 380 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); 381 381 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 382 382 383 - drm_irq_install(dev, pdev->irq); 383 + psb_irq_install(dev, pdev->irq); 384 384 385 385 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 386 386 ··· 515 515 .lastclose = drm_fb_helper_lastclose, 516 516 517 517 .num_ioctls = ARRAY_SIZE(psb_ioctls), 518 - .irq_preinstall = psb_irq_preinstall, 519 - .irq_postinstall = psb_irq_postinstall, 520 - .irq_uninstall = psb_irq_uninstall, 521 - .irq_handler = psb_irq_handler, 522 518 523 519 .dumb_create = psb_gem_dumb_create, 524 520 .ioctls = psb_ioctls,
-5
drivers/gpu/drm/gma500/psb_drv.h
··· 624 624 } 625 625 626 626 /* psb_irq.c */ 627 - extern irqreturn_t psb_irq_handler(int irq, void *arg); 628 - extern void psb_irq_preinstall(struct drm_device *dev); 629 - extern int psb_irq_postinstall(struct drm_device *dev); 630 - extern void psb_irq_uninstall(struct drm_device *dev); 631 - 632 627 extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands); 633 628 extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 634 629 extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+24 -2
drivers/gpu/drm/gma500/psb_irq.c
··· 8 8 * 9 9 **************************************************************************/ 10 10 11 + #include <drm/drm_drv.h> 11 12 #include <drm/drm_vblank.h> 12 13 13 14 #include "power.h" ··· 223 222 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2); 224 223 } 225 224 226 - irqreturn_t psb_irq_handler(int irq, void *arg) 225 + static irqreturn_t psb_irq_handler(int irq, void *arg) 227 226 { 228 227 struct drm_device *dev = arg; 229 228 struct drm_psb_private *dev_priv = dev->dev_private; ··· 305 304 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 306 305 } 307 306 308 - int psb_irq_postinstall(struct drm_device *dev) 307 + void psb_irq_postinstall(struct drm_device *dev) 309 308 { 310 309 struct drm_psb_private *dev_priv = dev->dev_private; 311 310 unsigned long irqflags; ··· 333 332 dev_priv->ops->hotplug_enable(dev, true); 334 333 335 334 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 335 + } 336 + 337 + int psb_irq_install(struct drm_device *dev, unsigned int irq) 338 + { 339 + int ret; 340 + 341 + if (irq == IRQ_NOTCONNECTED) 342 + return -ENOTCONN; 343 + 344 + psb_irq_preinstall(dev); 345 + 346 + /* PCI devices require shared interrupts. */ 347 + ret = request_irq(irq, psb_irq_handler, IRQF_SHARED, dev->driver->name, dev); 348 + if (ret) 349 + return ret; 350 + 351 + psb_irq_postinstall(dev); 352 + 336 353 return 0; 337 354 } 338 355 339 356 void psb_irq_uninstall(struct drm_device *dev) 340 357 { 341 358 struct drm_psb_private *dev_priv = dev->dev_private; 359 + struct pci_dev *pdev = to_pci_dev(dev->dev); 342 360 unsigned long irqflags; 343 361 unsigned int i; 344 362 ··· 386 366 /* This register is safe even if display island is off */ 387 367 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R); 388 368 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 369 + 370 + free_irq(pdev->irq, dev); 389 371 } 390 372 391 373 /*
+2 -2
drivers/gpu/drm/gma500/psb_irq.h
··· 19 19 void sysirq_uninit(struct drm_device *dev); 20 20 21 21 void psb_irq_preinstall(struct drm_device *dev); 22 - int psb_irq_postinstall(struct drm_device *dev); 22 + void psb_irq_postinstall(struct drm_device *dev); 23 + int psb_irq_install(struct drm_device *dev, unsigned int irq); 23 24 void psb_irq_uninstall(struct drm_device *dev); 24 - irqreturn_t psb_irq_handler(int irq, void *arg); 25 25 26 26 int psb_enable_vblank(struct drm_crtc *crtc); 27 27 void psb_disable_vblank(struct drm_crtc *crtc);
+3 -2
drivers/gpu/drm/gud/gud_pipe.c
··· 153 153 struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach; 154 154 u8 compression = gdrm->compression; 155 155 struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; 156 + struct dma_buf_map map_data[DRM_FORMAT_MAX_PLANES]; 156 157 void *vaddr, *buf; 157 158 size_t pitch, len; 158 159 int ret = 0; ··· 163 162 if (len > gdrm->bulk_len) 164 163 return -E2BIG; 165 164 166 - ret = drm_gem_fb_vmap(fb, map); 165 + ret = drm_gem_fb_vmap(fb, map, map_data); 167 166 if (ret) 168 167 return ret; 169 168 170 - vaddr = map[0].vaddr + fb->offsets[0]; 169 + vaddr = map_data[0].vaddr; 171 170 172 171 ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 173 172 if (ret)
+2 -2
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
··· 105 105 crtc_state->mode.hdisplay, 106 106 crtc_state->mode.vdisplay, 107 107 plane_state->fb->pitches[0]); 108 - hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->map[0]); 108 + hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]); 109 109 } 110 110 111 111 static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe, ··· 133 133 struct drm_rect rect; 134 134 135 135 if (drm_atomic_helper_damage_merged(old_state, state, &rect)) { 136 - hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->map[0], &rect); 136 + hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect); 137 137 hyperv_update_dirt(hv->hdev, &rect); 138 138 } 139 139 }
+1 -2
drivers/gpu/drm/i810/i810_dma.c
··· 38 38 #include <drm/drm_drv.h> 39 39 #include <drm/drm_file.h> 40 40 #include <drm/drm_ioctl.h> 41 - #include <drm/drm_irq.h> 42 41 #include <drm/drm_print.h> 43 42 #include <drm/i810_drm.h> 44 43 ··· 208 209 * is freed, it's too late. 209 210 */ 210 211 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled) 211 - drm_irq_uninstall(dev); 212 + drm_legacy_irq_uninstall(dev); 212 213 213 214 if (dev->dev_private) { 214 215 int i;
+4 -27
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
··· 701 701 } 702 702 } 703 703 704 - static void ingenic_drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state) 705 - { 706 - /* 707 - * Just your regular drm_atomic_helper_commit_tail(), but only calls 708 - * drm_atomic_helper_wait_for_vblanks() if priv->no_vblank. 709 - */ 710 - struct drm_device *dev = old_state->dev; 711 - struct ingenic_drm *priv = drm_device_get_priv(dev); 712 - 713 - drm_atomic_helper_commit_modeset_disables(dev, old_state); 714 - 715 - drm_atomic_helper_commit_planes(dev, old_state, 0); 716 - 717 - drm_atomic_helper_commit_modeset_enables(dev, old_state); 718 - 719 - drm_atomic_helper_commit_hw_done(old_state); 720 - 721 - if (!priv->no_vblank) 722 - drm_atomic_helper_wait_for_vblanks(dev, old_state); 723 - 724 - drm_atomic_helper_cleanup_planes(dev, old_state); 725 - } 726 - 727 704 static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) 728 705 { 729 706 struct ingenic_drm *priv = drm_device_get_priv(arg); ··· 720 743 static int ingenic_drm_enable_vblank(struct drm_crtc *crtc) 721 744 { 722 745 struct ingenic_drm *priv = drm_crtc_get_priv(crtc); 746 + 747 + if (priv->no_vblank) 748 + return -EINVAL; 723 749 724 750 regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, 725 751 JZ_LCD_CTRL_EOF_IRQ, JZ_LCD_CTRL_EOF_IRQ); ··· 831 851 }; 832 852 833 853 static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = { 834 - .atomic_commit_tail = ingenic_drm_atomic_helper_commit_tail, 854 + .atomic_commit_tail = drm_atomic_helper_commit_tail, 835 855 }; 836 856 837 857 static void ingenic_drm_unbind_all(void *d) ··· 963 983 + offsetof(struct ingenic_dma_hwdescs, palette); 964 984 priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL 965 985 | (sizeof(priv->dma_hwdescs->palette) / 4); 966 - 967 - if (soc_info->has_osd) 968 - priv->ipu_plane = drm_plane_from_index(drm, 0); 969 986 970 987 primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0; 971 988
+20 -6
drivers/gpu/drm/kmb/kmb_drv.c
··· 17 17 #include <drm/drm_drv.h> 18 18 #include <drm/drm_gem_cma_helper.h> 19 19 #include <drm/drm_gem_framebuffer_helper.h> 20 - #include <drm/drm_irq.h> 21 20 #include <drm/drm_probe_helper.h> 22 21 #include <drm/drm_vblank.h> 23 22 ··· 398 399 kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0); 399 400 } 400 401 402 + static int kmb_irq_install(struct drm_device *drm, unsigned int irq) 403 + { 404 + if (irq == IRQ_NOTCONNECTED) 405 + return -ENOTCONN; 406 + 407 + kmb_irq_reset(drm); 408 + 409 + return request_irq(irq, kmb_isr, 0, drm->driver->name, drm); 410 + } 411 + 412 + static void kmb_irq_uninstall(struct drm_device *drm) 413 + { 414 + struct kmb_drm_private *kmb = to_kmb(drm); 415 + 416 + kmb_irq_reset(drm); 417 + free_irq(kmb->irq_lcd, drm); 418 + } 419 + 401 420 DEFINE_DRM_GEM_CMA_FOPS(fops); 402 421 403 422 static const struct drm_driver kmb_driver = { 404 423 .driver_features = DRIVER_GEM | 405 424 DRIVER_MODESET | DRIVER_ATOMIC, 406 - .irq_handler = kmb_isr, 407 - .irq_preinstall = kmb_irq_reset, 408 - .irq_uninstall = kmb_irq_reset, 409 425 /* GEM Operations */ 410 426 .fops = &fops, 411 427 DRM_GEM_CMA_DRIVER_OPS_VMAP, ··· 442 428 of_node_put(kmb->crtc.port); 443 429 kmb->crtc.port = NULL; 444 430 pm_runtime_get_sync(drm->dev); 445 - drm_irq_uninstall(drm); 431 + kmb_irq_uninstall(drm); 446 432 pm_runtime_put_sync(drm->dev); 447 433 pm_runtime_disable(drm->dev); 448 434 ··· 532 518 if (ret) 533 519 goto err_free; 534 520 535 - ret = drm_irq_install(&kmb->drm, kmb->irq_lcd); 521 + ret = kmb_irq_install(&kmb->drm, kmb->irq_lcd); 536 522 if (ret < 0) { 537 523 drm_err(&kmb->drm, "failed to install IRQ handler\n"); 538 524 goto err_irq;
+1 -1
drivers/gpu/drm/mga/mga_dma.c
··· 949 949 * is freed, it's too late. 950 950 */ 951 951 if (dev->irq_enabled) 952 - drm_irq_uninstall(dev); 952 + drm_legacy_irq_uninstall(dev); 953 953 954 954 if (dev->dev_private) { 955 955 drm_mga_private_t *dev_priv = dev->dev_private;
-1
drivers/gpu/drm/mga/mga_drv.h
··· 38 38 #include <drm/drm_device.h> 39 39 #include <drm/drm_file.h> 40 40 #include <drm/drm_ioctl.h> 41 - #include <drm/drm_irq.h> 42 41 #include <drm/drm_legacy.h> 43 42 #include <drm/drm_print.h> 44 43 #include <drm/drm_sarea.h>
+1 -1
drivers/gpu/drm/mgag200/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - mgag200-y := mgag200_drv.o mgag200_i2c.o mgag200_mm.o mgag200_mode.o 2 + mgag200-y := mgag200_drv.o mgag200_i2c.o mgag200_mm.o mgag200_mode.o mgag200_pll.o 3 3 4 4 obj-$(CONFIG_DRM_MGAG200) += mgag200.o
+62 -1
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 43 43 #define ATTR_INDEX 0x1fc0 44 44 #define ATTR_DATA 0x1fc1 45 45 46 + #define WREG_MISC(v) \ 47 + WREG8(MGA_MISC_OUT, v) 48 + 49 + #define RREG_MISC(v) \ 50 + ((v) = RREG8(MGA_MISC_IN)) 51 + 52 + #define WREG_MISC_MASKED(v, mask) \ 53 + do { \ 54 + u8 misc_; \ 55 + u8 mask_ = (mask); \ 56 + RREG_MISC(misc_); \ 57 + misc_ &= ~mask_; \ 58 + misc_ |= ((v) & mask_); \ 59 + WREG_MISC(misc_); \ 60 + } while (0) 61 + 46 62 #define WREG_ATTR(reg, v) \ 47 63 do { \ 48 64 RREG8(0x1fda); \ ··· 125 109 126 110 #define MGAG200_MAX_FB_HEIGHT 4096 127 111 #define MGAG200_MAX_FB_WIDTH 4096 112 + 113 + struct mga_device; 114 + struct mgag200_pll; 115 + 116 + /* 117 + * Stores parameters for programming the PLLs 118 + * 119 + * Fref: reference frequency (A: 25.175 Mhz, B: 28.361, C: XX Mhz) 120 + * Fo: output frequency 121 + * Fvco = Fref * (N / M) 122 + * Fo = Fvco / P 123 + * 124 + * S = [0..3] 125 + */ 126 + struct mgag200_pll_values { 127 + unsigned int m; 128 + unsigned int n; 129 + unsigned int p; 130 + unsigned int s; 131 + }; 132 + 133 + struct mgag200_pll_funcs { 134 + int (*compute)(struct mgag200_pll *pll, long clock, struct mgag200_pll_values *pllc); 135 + void (*update)(struct mgag200_pll *pll, const struct mgag200_pll_values *pllc); 136 + }; 137 + 138 + struct mgag200_pll { 139 + struct mga_device *mdev; 140 + 141 + const struct mgag200_pll_funcs *funcs; 142 + }; 143 + 144 + struct mgag200_crtc_state { 145 + struct drm_crtc_state base; 146 + 147 + struct mgag200_pll_values pixpllc; 148 + }; 149 + 150 + static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_state *base) 151 + { 152 + return container_of(base, struct mgag200_crtc_state, base); 153 + } 128 154 129 155 #define to_mga_connector(x) container_of(x, struct mga_connector, base) 130 156 ··· 238 180 } g200se; 239 181 } model; 240 182 241 - 242 183 struct mga_connector connector; 184 + struct mgag200_pll pixpll; 243 185 struct drm_simple_display_pipe display_pipe; 244 186 }; 245 187 ··· 257 199 258 200 /* mgag200_mm.c */ 259 201 int mgag200_mm_init(struct mga_device *mdev); 202 + 203 + /* mgag200_pll.c */ 204 + int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev); 260 205 261 206 #endif /* __MGAG200_DRV_H__ */
+72 -709
drivers/gpu/drm/mgag200/mgag200_mode.c
··· 110 110 } while ((status & 0x01) && time_before(jiffies, timeout)); 111 111 } 112 112 113 - /* 114 - * PLL setup 115 - */ 116 - 117 - static int mgag200_g200_set_plls(struct mga_device *mdev, long clock) 118 - { 119 - struct drm_device *dev = &mdev->base; 120 - const int post_div_max = 7; 121 - const int in_div_min = 1; 122 - const int in_div_max = 6; 123 - const int feed_div_min = 7; 124 - const int feed_div_max = 127; 125 - u8 testm, testn; 126 - u8 n = 0, m = 0, p, s; 127 - long f_vco; 128 - long computed; 129 - long delta, tmp_delta; 130 - long ref_clk = mdev->model.g200.ref_clk; 131 - long p_clk_min = mdev->model.g200.pclk_min; 132 - long p_clk_max = mdev->model.g200.pclk_max; 133 - 134 - if (clock > p_clk_max) { 135 - drm_err(dev, "Pixel Clock %ld too high\n", clock); 136 - return 1; 137 - } 138 - 139 - if (clock < p_clk_min >> 3) 140 - clock = p_clk_min >> 3; 141 - 142 - f_vco = clock; 143 - for (p = 0; 144 - p <= post_div_max && f_vco < p_clk_min; 145 - p = (p << 1) + 1, f_vco <<= 1) 146 - ; 147 - 148 - delta = clock; 149 - 150 - for (testm = in_div_min; testm <= in_div_max; testm++) { 151 - for (testn = feed_div_min; testn <= feed_div_max; testn++) { 152 - computed = ref_clk * (testn + 1) / (testm + 1); 153 - if (computed < f_vco) 154 - tmp_delta = f_vco - computed; 155 - else 156 - tmp_delta = computed - f_vco; 157 - if (tmp_delta < delta) { 158 - delta = tmp_delta; 159 - m = testm; 160 - n = testn; 161 - } 162 - } 163 - } 164 - f_vco = ref_clk * (n + 1) / (m + 1); 165 - if (f_vco < 100000) 166 - s = 0; 167 - else if (f_vco < 140000) 168 - s = 1; 169 - else if (f_vco < 180000) 170 - s = 2; 171 - else 172 - s = 3; 173 - 174 - drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n", 175 - clock, f_vco, m, n, p, s); 176 - 177 - WREG_DAC(MGA1064_PIX_PLLC_M, m); 178 - WREG_DAC(MGA1064_PIX_PLLC_N, n); 179 - WREG_DAC(MGA1064_PIX_PLLC_P, (p | (s << 3))); 180 - 181 - return 0; 182 - } 183 - 184 - #define P_ARRAY_SIZE 9 185 - 186 - static int mga_g200se_set_plls(struct mga_device *mdev, long clock) 187 - { 188 - u32 unique_rev_id = mdev->model.g200se.unique_rev_id; 189 - unsigned int vcomax, vcomin, pllreffreq; 190 - unsigned int delta, tmpdelta, permitteddelta; 191 - unsigned int testp, testm, testn; 192 - unsigned int p, m, n; 193 - unsigned int computed; 194 - unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1}; 195 - unsigned int fvv; 196 - unsigned int i; 197 - 198 - if (unique_rev_id <= 0x03) { 199 - 200 - m = n = p = 0; 201 - vcomax = 320000; 202 - vcomin = 160000; 203 - pllreffreq = 25000; 204 - 205 - delta = 0xffffffff; 206 - permitteddelta = clock * 5 / 1000; 207 - 208 - for (testp = 8; testp > 0; testp /= 2) { 209 - if (clock * testp > vcomax) 210 - continue; 211 - if (clock * testp < vcomin) 212 - continue; 213 - 214 - for (testn = 17; testn < 256; testn++) { 215 - for (testm = 1; testm < 32; testm++) { 216 - computed = (pllreffreq * testn) / 217 - (testm * testp); 218 - if (computed > clock) 219 - tmpdelta = computed - clock; 220 - else 221 - tmpdelta = clock - computed; 222 - if (tmpdelta < delta) { 223 - delta = tmpdelta; 224 - m = testm - 1; 225 - n = testn - 1; 226 - p = testp - 1; 227 - } 228 - } 229 - } 230 - } 231 - } else { 232 - 233 - 234 - m = n = p = 0; 235 - vcomax = 1600000; 236 - vcomin = 800000; 237 - pllreffreq = 25000; 238 - 239 - if (clock < 25000) 240 - clock = 25000; 241 - 242 - clock = clock * 2; 243 - 244 - delta = 0xFFFFFFFF; 245 - /* Permited delta is 0.5% as VESA Specification */ 246 - permitteddelta = clock * 5 / 1000; 247 - 248 - for (i = 0 ; i < P_ARRAY_SIZE ; i++) { 249 - testp = pvalues_e4[i]; 250 - 251 - if ((clock * testp) > vcomax) 252 - continue; 253 - if ((clock * testp) < vcomin) 254 - continue; 255 - 256 - for (testn = 50; testn <= 256; testn++) { 257 - for (testm = 1; testm <= 32; testm++) { 258 - computed = (pllreffreq * testn) / 259 - (testm * testp); 260 - if (computed > clock) 261 - tmpdelta = computed - clock; 262 - else 263 - tmpdelta = clock - computed; 264 - 265 - if (tmpdelta < delta) { 266 - delta = tmpdelta; 267 - m = testm - 1; 268 - n = testn - 1; 269 - p = testp - 1; 270 - } 271 - } 272 - } 273 - } 274 - 275 - fvv = pllreffreq * (n + 1) / (m + 1); 276 - fvv = (fvv - 800000) / 50000; 277 - 278 - if (fvv > 15) 279 - fvv = 15; 280 - 281 - p |= (fvv << 4); 282 - m |= 0x80; 283 - 284 - clock = clock / 2; 285 - } 286 - 287 - if (delta > permitteddelta) { 288 - pr_warn("PLL delta too large\n"); 289 - return 1; 290 - } 291 - 292 - WREG_DAC(MGA1064_PIX_PLLC_M, m); 293 - WREG_DAC(MGA1064_PIX_PLLC_N, n); 294 - WREG_DAC(MGA1064_PIX_PLLC_P, p); 295 - 296 - if (unique_rev_id >= 0x04) { 297 - WREG_DAC(0x1a, 0x09); 298 - msleep(20); 299 - WREG_DAC(0x1a, 0x01); 300 - 301 - } 302 - 303 - return 0; 304 - } 305 - 306 - static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) 307 - { 308 - unsigned int vcomax, vcomin, pllreffreq; 309 - unsigned int delta, tmpdelta; 310 - unsigned int testp, testm, testn, testp2; 311 - unsigned int p, m, n; 312 - unsigned int computed; 313 - int i, j, tmpcount, vcount; 314 - bool pll_locked = false; 315 - u8 tmp; 316 - 317 - m = n = p = 0; 318 - 319 - delta = 0xffffffff; 320 - 321 - if (mdev->type == G200_EW3) { 322 - 323 - vcomax = 800000; 324 - vcomin = 400000; 325 - pllreffreq = 25000; 326 - 327 - for (testp = 1; testp < 8; testp++) { 328 - for (testp2 = 1; testp2 < 8; testp2++) { 329 - if (testp < testp2) 330 - continue; 331 - if ((clock * testp * testp2) > vcomax) 332 - continue; 333 - if ((clock * testp * testp2) < vcomin) 334 - continue; 335 - for (testm = 1; testm < 26; testm++) { 336 - for (testn = 32; testn < 2048 ; testn++) { 337 - computed = (pllreffreq * testn) / 338 - (testm * testp * testp2); 339 - if (computed > clock) 340 - tmpdelta = computed - clock; 341 - else 342 - tmpdelta = clock - computed; 343 - if (tmpdelta < delta) { 344 - delta = tmpdelta; 345 - m = ((testn & 0x100) >> 1) | 346 - (testm); 347 - n = (testn & 0xFF); 348 - p = ((testn & 0x600) >> 3) | 349 - (testp2 << 3) | 350 - (testp); 351 - } 352 - } 353 - } 354 - } 355 - } 356 - } else { 357 - 358 - vcomax = 550000; 359 - vcomin = 150000; 360 - pllreffreq = 48000; 361 - 362 - for (testp = 1; testp < 9; testp++) { 363 - if (clock * testp > vcomax) 364 - continue; 365 - if (clock * testp < vcomin) 366 - continue; 367 - 368 - for (testm = 1; testm < 17; testm++) { 369 - for (testn = 1; testn < 151; testn++) { 370 - computed = (pllreffreq * testn) / 371 - (testm * testp); 372 - if (computed > clock) 373 - tmpdelta = computed - clock; 374 - else 375 - tmpdelta = clock - computed; 376 - if (tmpdelta < delta) { 377 - delta = tmpdelta; 378 - n = testn - 1; 379 - m = (testm - 1) | 380 - ((n >> 1) & 0x80); 381 - p = testp - 1; 382 - } 383 - } 384 - } 385 - } 386 - } 387 - 388 - for (i = 0; i <= 32 && pll_locked == false; i++) { 389 - if (i > 0) { 390 - WREG8(MGAREG_CRTC_INDEX, 0x1e); 391 - tmp = RREG8(MGAREG_CRTC_DATA); 392 - if (tmp < 0xff) 393 - WREG8(MGAREG_CRTC_DATA, tmp+1); 394 - } 395 - 396 - /* set pixclkdis to 1 */ 397 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 398 - tmp = RREG8(DAC_DATA); 399 - tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 400 - WREG8(DAC_DATA, tmp); 401 - 402 - WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 403 - tmp = RREG8(DAC_DATA); 404 - tmp |= MGA1064_REMHEADCTL_CLKDIS; 405 - WREG8(DAC_DATA, tmp); 406 - 407 - /* select PLL Set C */ 408 - tmp = RREG8(MGAREG_MEM_MISC_READ); 409 - tmp |= 0x3 << 2; 410 - WREG8(MGAREG_MEM_MISC_WRITE, tmp); 411 - 412 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 413 - tmp = RREG8(DAC_DATA); 414 - tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 415 - WREG8(DAC_DATA, tmp); 416 - 417 - udelay(500); 418 - 419 - /* reset the PLL */ 420 - WREG8(DAC_INDEX, MGA1064_VREF_CTL); 421 - tmp = RREG8(DAC_DATA); 422 - tmp &= ~0x04; 423 - WREG8(DAC_DATA, tmp); 424 - 425 - udelay(50); 426 - 427 - /* program pixel pll register */ 428 - WREG_DAC(MGA1064_WB_PIX_PLLC_N, n); 429 - WREG_DAC(MGA1064_WB_PIX_PLLC_M, m); 430 - WREG_DAC(MGA1064_WB_PIX_PLLC_P, p); 431 - 432 - udelay(50); 433 - 434 - /* turn pll on */ 435 - WREG8(DAC_INDEX, MGA1064_VREF_CTL); 436 - tmp = RREG8(DAC_DATA); 437 - tmp |= 0x04; 438 - WREG_DAC(MGA1064_VREF_CTL, tmp); 439 - 440 - udelay(500); 441 - 442 - /* select the pixel pll */ 443 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 444 - tmp = RREG8(DAC_DATA); 445 - tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 446 - tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 447 - WREG8(DAC_DATA, tmp); 448 - 449 - WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 450 - tmp = RREG8(DAC_DATA); 451 - tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 452 - tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 453 - WREG8(DAC_DATA, tmp); 454 - 455 - /* reset dotclock rate bit */ 456 - WREG8(MGAREG_SEQ_INDEX, 1); 457 - tmp = RREG8(MGAREG_SEQ_DATA); 458 - tmp &= ~0x8; 459 - WREG8(MGAREG_SEQ_DATA, tmp); 460 - 461 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 462 - tmp = RREG8(DAC_DATA); 463 - tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 464 - WREG8(DAC_DATA, tmp); 465 - 466 - vcount = RREG8(MGAREG_VCOUNT); 467 - 468 - for (j = 0; j < 30 && pll_locked == false; j++) { 469 - tmpcount = RREG8(MGAREG_VCOUNT); 470 - if (tmpcount < vcount) 471 - vcount = 0; 472 - if ((tmpcount - vcount) > 2) 473 - pll_locked = true; 474 - else 475 - udelay(5); 476 - } 477 - } 478 - WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 479 - tmp = RREG8(DAC_DATA); 480 - tmp &= ~MGA1064_REMHEADCTL_CLKDIS; 481 - WREG_DAC(MGA1064_REMHEADCTL, tmp); 482 - return 0; 483 - } 484 - 485 - static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) 486 - { 487 - unsigned int vcomax, vcomin, pllreffreq; 488 - unsigned int delta, tmpdelta; 489 - unsigned int testp, testm, testn; 490 - unsigned int p, m, n; 491 - unsigned int computed; 492 - u8 tmp; 493 - 494 - m = n = p = 0; 495 - vcomax = 550000; 496 - vcomin = 150000; 497 - pllreffreq = 50000; 498 - 499 - delta = 0xffffffff; 500 - 501 - for (testp = 16; testp > 0; testp--) { 502 - if (clock * testp > vcomax) 503 - continue; 504 - if (clock * testp < vcomin) 505 - continue; 506 - 507 - for (testn = 1; testn < 257; testn++) { 508 - for (testm = 1; testm < 17; testm++) { 509 - computed = (pllreffreq * testn) / 510 - (testm * testp); 511 - if (computed > clock) 512 - tmpdelta = computed - clock; 513 - else 514 - tmpdelta = clock - computed; 515 - if (tmpdelta < delta) { 516 - delta = tmpdelta; 517 - n = testn - 1; 518 - m = testm - 1; 519 - p = testp - 1; 520 - } 521 - } 522 - } 523 - } 524 - 525 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 526 - tmp = RREG8(DAC_DATA); 527 - tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 528 - WREG8(DAC_DATA, tmp); 529 - 530 - tmp = RREG8(MGAREG_MEM_MISC_READ); 531 - tmp |= 0x3 << 2; 532 - WREG8(MGAREG_MEM_MISC_WRITE, tmp); 533 - 534 - WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 535 - tmp = RREG8(DAC_DATA); 536 - WREG8(DAC_DATA, tmp & ~0x40); 537 - 538 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 539 - tmp = RREG8(DAC_DATA); 540 - tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 541 - WREG8(DAC_DATA, tmp); 542 - 543 - WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); 544 - WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); 545 - WREG_DAC(MGA1064_EV_PIX_PLLC_P, p); 546 - 547 - udelay(50); 548 - 549 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 550 - tmp = RREG8(DAC_DATA); 551 - tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 552 - WREG8(DAC_DATA, tmp); 553 - 554 - udelay(500); 555 - 556 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 557 - tmp = RREG8(DAC_DATA); 558 - tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 559 - tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 560 - WREG8(DAC_DATA, tmp); 561 - 562 - WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 563 - tmp = RREG8(DAC_DATA); 564 - WREG8(DAC_DATA, tmp | 0x40); 565 - 566 - tmp = RREG8(MGAREG_MEM_MISC_READ); 567 - tmp |= (0x3 << 2); 568 - WREG8(MGAREG_MEM_MISC_WRITE, tmp); 569 - 570 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 571 - tmp = RREG8(DAC_DATA); 572 - tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 573 - WREG8(DAC_DATA, tmp); 574 - 575 - return 0; 576 - } 577 - 578 - static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) 579 - { 580 - unsigned int vcomax, vcomin, pllreffreq; 581 - unsigned int delta, tmpdelta; 582 - unsigned int testp, testm, testn; 583 - unsigned int p, m, n; 584 - unsigned int computed; 585 - int i, j, tmpcount, vcount; 586 - u8 tmp; 587 - bool pll_locked = false; 588 - 589 - m = n = p = 0; 590 - 591 - if (mdev->type == G200_EH3) { 592 - vcomax = 3000000; 593 - vcomin = 1500000; 594 - pllreffreq = 25000; 595 - 596 - delta = 0xffffffff; 597 - 598 - testp = 0; 599 - 600 - for (testm = 150; testm >= 6; testm--) { 601 - if (clock * testm > vcomax) 602 - continue; 603 - if (clock * testm < vcomin) 604 - continue; 605 - for (testn = 120; testn >= 60; testn--) { 606 - computed = (pllreffreq * testn) / testm; 607 - if (computed > clock) 608 - tmpdelta = computed - clock; 609 - else 610 - tmpdelta = clock - computed; 611 - if (tmpdelta < delta) { 612 - delta = tmpdelta; 613 - n = testn; 614 - m = testm; 615 - p = testp; 616 - } 617 - if (delta == 0) 618 - break; 619 - } 620 - if (delta == 0) 621 - break; 622 - } 623 - } else { 624 - 625 - vcomax = 800000; 626 - vcomin = 400000; 627 - pllreffreq = 33333; 628 - 629 - delta = 0xffffffff; 630 - 631 - for (testp = 16; testp > 0; testp >>= 1) { 632 - if (clock * testp > vcomax) 633 - continue; 634 - if (clock * testp < vcomin) 635 - continue; 636 - 637 - for (testm = 1; testm < 33; testm++) { 638 - for (testn = 17; testn < 257; testn++) { 639 - computed = (pllreffreq * testn) / 640 - (testm * testp); 641 - if (computed > clock) 642 - tmpdelta = computed - clock; 643 - else 644 - tmpdelta = clock - computed; 645 - if (tmpdelta < delta) { 646 - delta = tmpdelta; 647 - n = testn - 1; 648 - m = (testm - 1); 649 - p = testp - 1; 650 - } 651 - if ((clock * testp) >= 600000) 652 - p |= 0x80; 653 - } 654 - } 655 - } 656 - } 657 - for (i = 0; i <= 32 && pll_locked == false; i++) { 658 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 659 - tmp = RREG8(DAC_DATA); 660 - tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 661 - WREG8(DAC_DATA, tmp); 662 - 663 - tmp = RREG8(MGAREG_MEM_MISC_READ); 664 - tmp |= 0x3 << 2; 665 - WREG8(MGAREG_MEM_MISC_WRITE, tmp); 666 - 667 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 668 - tmp = RREG8(DAC_DATA); 669 - tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 670 - WREG8(DAC_DATA, tmp); 671 - 672 - udelay(500); 673 - 674 - WREG_DAC(MGA1064_EH_PIX_PLLC_M, m); 675 - WREG_DAC(MGA1064_EH_PIX_PLLC_N, n); 676 - WREG_DAC(MGA1064_EH_PIX_PLLC_P, p); 677 - 678 - udelay(500); 679 - 680 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 681 - tmp = RREG8(DAC_DATA); 682 - tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 683 - tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 684 - WREG8(DAC_DATA, tmp); 685 - 686 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 687 - tmp = RREG8(DAC_DATA); 688 - tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 689 - tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 690 - WREG8(DAC_DATA, tmp); 691 - 692 - vcount = RREG8(MGAREG_VCOUNT); 693 - 694 - for (j = 0; j < 30 && pll_locked == false; j++) { 695 - tmpcount = RREG8(MGAREG_VCOUNT); 696 - if (tmpcount < vcount) 697 - vcount = 0; 698 - if ((tmpcount - vcount) > 2) 699 - pll_locked = true; 700 - else 701 - udelay(5); 702 - } 703 - } 704 - 705 - return 0; 706 - } 707 - 708 - static int mga_g200er_set_plls(struct mga_device *mdev, long clock) 709 - { 710 - static const unsigned int m_div_val[] = { 1, 2, 4, 8 }; 711 - unsigned int vcomax, vcomin, pllreffreq; 712 - unsigned int delta, tmpdelta; 713 - int testr, testn, testm, testo; 714 - unsigned int p, m, n; 715 - unsigned int computed, vco; 716 - int tmp; 717 - 718 - m = n = p = 0; 719 - vcomax = 1488000; 720 - vcomin = 1056000; 721 - pllreffreq = 48000; 722 - 723 - delta = 0xffffffff; 724 - 725 - for (testr = 0; testr < 4; testr++) { 726 - if (delta == 0) 727 - break; 728 - for (testn = 5; testn < 129; testn++) { 729 - if (delta == 0) 730 - break; 731 - for (testm = 3; testm >= 0; testm--) { 732 - if (delta == 0) 733 - break; 734 - for (testo = 5; testo < 33; testo++) { 735 - vco = pllreffreq * (testn + 1) / 736 - (testr + 1); 737 - if (vco < vcomin) 738 - continue; 739 - if (vco > vcomax) 740 - continue; 741 - computed = vco / (m_div_val[testm] * (testo + 1)); 742 - if (computed > clock) 743 - tmpdelta = computed - clock; 744 - else 745 - tmpdelta = clock - computed; 746 - if (tmpdelta < delta) { 747 - delta = tmpdelta; 748 - m = testm | (testo << 3); 749 - n = testn; 750 - p = testr | (testr << 3); 751 - } 752 - } 753 - } 754 - } 755 - } 756 - 757 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 758 - tmp = RREG8(DAC_DATA); 759 - tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 760 - WREG8(DAC_DATA, tmp); 761 - 762 - WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 763 - tmp = RREG8(DAC_DATA); 764 - tmp |= MGA1064_REMHEADCTL_CLKDIS; 765 - WREG8(DAC_DATA, tmp); 766 - 767 - tmp = RREG8(MGAREG_MEM_MISC_READ); 768 - tmp |= (0x3<<2) | 0xc0; 769 - WREG8(MGAREG_MEM_MISC_WRITE, tmp); 770 - 771 - WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 772 - tmp = RREG8(DAC_DATA); 773 - tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 774 - tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 775 - WREG8(DAC_DATA, tmp); 776 - 777 - udelay(500); 778 - 779 - WREG_DAC(MGA1064_ER_PIX_PLLC_N, n); 780 - WREG_DAC(MGA1064_ER_PIX_PLLC_M, m); 781 - WREG_DAC(MGA1064_ER_PIX_PLLC_P, p); 782 - 783 - udelay(50); 784 - 785 - return 0; 786 - } 787 - 788 - static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock) 789 - { 790 - u8 misc; 791 - 792 - switch(mdev->type) { 793 - case G200_PCI: 794 - case G200_AGP: 795 - return mgag200_g200_set_plls(mdev, clock); 796 - case G200_SE_A: 797 - case G200_SE_B: 798 - return mga_g200se_set_plls(mdev, clock); 799 - case G200_WB: 800 - case G200_EW3: 801 - return mga_g200wb_set_plls(mdev, clock); 802 - case G200_EV: 803 - return mga_g200ev_set_plls(mdev, clock); 804 - case G200_EH: 805 - case G200_EH3: 806 - return mga_g200eh_set_plls(mdev, clock); 807 - case G200_ER: 808 - return mga_g200er_set_plls(mdev, clock); 809 - } 810 - 811 - misc = RREG8(MGA_MISC_IN); 812 - misc &= ~MGAREG_MISC_CLK_SEL_MASK; 813 - misc |= MGAREG_MISC_CLK_SEL_MGA_MSK; 814 - WREG8(MGA_MISC_OUT, misc); 815 - 816 - return 0; 817 - } 818 - 819 113 static void mgag200_g200wb_hold_bmc(struct mga_device *mdev) 820 114 { 821 115 u8 tmp; ··· 864 1570 struct drm_crtc *crtc = &pipe->crtc; 865 1571 struct drm_device *dev = crtc->dev; 866 1572 struct mga_device *mdev = to_mga_device(dev); 1573 + struct mgag200_pll *pixpll = &mdev->pixpll; 867 1574 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 1575 + struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); 868 1576 struct drm_framebuffer *fb = plane_state->fb; 869 1577 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 870 1578 struct drm_rect fullscreen = { ··· 881 1585 882 1586 mgag200_set_format_regs(mdev, fb); 883 1587 mgag200_set_mode_regs(mdev, adjusted_mode); 884 - mgag200_crtc_set_plls(mdev, adjusted_mode->clock); 1588 + 1589 + pixpll->funcs->update(pixpll, &mgag200_crtc_state->pixpllc); 885 1590 886 1591 if (mdev->type == G200_ER) 887 1592 mgag200_g200er_reset_tagfifo(mdev); ··· 898 1601 mga_crtc_load_lut(crtc); 899 1602 mgag200_enable_display(mdev); 900 1603 901 - mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->map[0]); 1604 + mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->data[0]); 902 1605 } 903 1606 904 1607 static void ··· 916 1619 struct drm_crtc_state *crtc_state) 917 1620 { 918 1621 struct drm_plane *plane = plane_state->plane; 1622 + struct drm_device *dev = plane->dev; 1623 + struct mga_device *mdev = to_mga_device(dev); 1624 + struct mgag200_pll *pixpll = &mdev->pixpll; 1625 + struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); 919 1626 struct drm_framebuffer *new_fb = plane_state->fb; 920 1627 struct drm_framebuffer *fb = NULL; 1628 + int ret; 921 1629 922 1630 if (!new_fb) 923 1631 return 0; ··· 932 1630 933 1631 if (!fb || (fb->format != new_fb->format)) 934 1632 crtc_state->mode_changed = true; /* update PLL settings */ 1633 + 1634 + if (crtc_state->mode_changed) { 1635 + ret = pixpll->funcs->compute(pixpll, crtc_state->mode.clock, 1636 + &mgag200_crtc_state->pixpllc); 1637 + if (ret) 1638 + return ret; 1639 + } 935 1640 936 1641 return 0; 937 1642 } ··· 959 1650 return; 960 1651 961 1652 if (drm_atomic_helper_damage_merged(old_state, state, &damage)) 962 - mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->map[0]); 1653 + mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->data[0]); 1654 + } 1655 + 1656 + static struct drm_crtc_state * 1657 + mgag200_simple_display_pipe_duplicate_crtc_state(struct drm_simple_display_pipe *pipe) 1658 + { 1659 + struct drm_crtc *crtc = &pipe->crtc; 1660 + struct drm_crtc_state *crtc_state = crtc->state; 1661 + struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); 1662 + struct mgag200_crtc_state *new_mgag200_crtc_state; 1663 + 1664 + if (!crtc_state) 1665 + return NULL; 1666 + 1667 + new_mgag200_crtc_state = kzalloc(sizeof(*new_mgag200_crtc_state), GFP_KERNEL); 1668 + if (!new_mgag200_crtc_state) 1669 + return NULL; 1670 + __drm_atomic_helper_crtc_duplicate_state(crtc, &new_mgag200_crtc_state->base); 1671 + 1672 + memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc, 1673 + sizeof(new_mgag200_crtc_state->pixpllc)); 1674 + 1675 + return &new_mgag200_crtc_state->base; 1676 + } 1677 + 1678 + static void mgag200_simple_display_pipe_destroy_crtc_state(struct drm_simple_display_pipe *pipe, 1679 + struct drm_crtc_state *crtc_state) 1680 + { 1681 + struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); 1682 + 1683 + __drm_atomic_helper_crtc_destroy_state(&mgag200_crtc_state->base); 1684 + kfree(mgag200_crtc_state); 1685 + } 1686 + 1687 + static void mgag200_simple_display_pipe_reset_crtc(struct drm_simple_display_pipe *pipe) 1688 + { 1689 + struct drm_crtc *crtc = &pipe->crtc; 1690 + struct mgag200_crtc_state *mgag200_crtc_state; 1691 + 1692 + if (crtc->state) { 1693 + mgag200_simple_display_pipe_destroy_crtc_state(pipe, crtc->state); 1694 + crtc->state = NULL; /* must be set to NULL here */ 1695 + } 1696 + 1697 + mgag200_crtc_state = kzalloc(sizeof(*mgag200_crtc_state), GFP_KERNEL); 1698 + if (!mgag200_crtc_state) 1699 + return; 1700 + __drm_atomic_helper_crtc_reset(crtc, &mgag200_crtc_state->base); 963 1701 } 964 1702 965 1703 static const struct drm_simple_display_pipe_funcs ··· 1016 1660 .disable = mgag200_simple_display_pipe_disable, 1017 1661 .check = mgag200_simple_display_pipe_check, 1018 1662 .update = mgag200_simple_display_pipe_update, 1663 + .reset_crtc = mgag200_simple_display_pipe_reset_crtc, 1664 + .duplicate_crtc_state = mgag200_simple_display_pipe_duplicate_crtc_state, 1665 + .destroy_crtc_state = mgag200_simple_display_pipe_destroy_crtc_state, 1019 1666 DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, 1020 1667 }; 1021 1668 ··· 1084 1725 ret); 1085 1726 return ret; 1086 1727 } 1728 + 1729 + ret = mgag200_pixpll_init(&mdev->pixpll, mdev); 1730 + if (ret) 1731 + return ret; 1087 1732 1088 1733 ret = drm_simple_display_pipe_init(dev, pipe, 1089 1734 &mgag200_simple_display_pipe_funcs,
+992
drivers/gpu/drm/mgag200/mgag200_pll.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/delay.h> 4 + 5 + #include "mgag200_drv.h" 6 + 7 + /* 8 + * G200 9 + */ 10 + 11 + static int mgag200_pixpll_compute_g200(struct mgag200_pll *pixpll, long clock, 12 + struct mgag200_pll_values *pixpllc) 13 + { 14 + struct mga_device *mdev = pixpll->mdev; 15 + struct drm_device *dev = &mdev->base; 16 + const int post_div_max = 7; 17 + const int in_div_min = 1; 18 + const int in_div_max = 6; 19 + const int feed_div_min = 7; 20 + const int feed_div_max = 127; 21 + u8 testp, testm, testn; 22 + u8 n = 0, m = 0, p, s; 23 + long f_vco; 24 + long computed; 25 + long delta, tmp_delta; 26 + long ref_clk = mdev->model.g200.ref_clk; 27 + long p_clk_min = mdev->model.g200.pclk_min; 28 + long p_clk_max = mdev->model.g200.pclk_max; 29 + 30 + if (clock > p_clk_max) { 31 + drm_err(dev, "Pixel Clock %ld too high\n", clock); 32 + return -EINVAL; 33 + } 34 + 35 + if (clock < p_clk_min >> 3) 36 + clock = p_clk_min >> 3; 37 + 38 + f_vco = clock; 39 + for (testp = 0; 40 + testp <= post_div_max && f_vco < p_clk_min; 41 + testp = (testp << 1) + 1, f_vco <<= 1) 42 + ; 43 + p = testp + 1; 44 + 45 + delta = clock; 46 + 47 + for (testm = in_div_min; testm <= in_div_max; testm++) { 48 + for (testn = feed_div_min; testn <= feed_div_max; testn++) { 49 + computed = ref_clk * (testn + 1) / (testm + 1); 50 + if (computed < f_vco) 51 + tmp_delta = f_vco - computed; 52 + else 53 + tmp_delta = computed - f_vco; 54 + if (tmp_delta < delta) { 55 + delta = tmp_delta; 56 + m = testm + 1; 57 + n = testn + 1; 58 + } 59 + } 60 + } 61 + f_vco = ref_clk * n / m; 62 + if (f_vco < 100000) 63 + s = 0; 64 + else if (f_vco < 140000) 65 + s = 1; 66 + else if (f_vco < 180000) 67 + s = 2; 68 + else 69 + s = 3; 70 + 71 + drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n", 72 + clock, f_vco, m, n, p, s); 73 + 74 + pixpllc->m = m; 75 + pixpllc->n = n; 76 + pixpllc->p = p; 77 + pixpllc->s = s; 78 + 79 + return 0; 80 + } 81 + 82 + static void 83 + mgag200_pixpll_update_g200(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc) 84 + { 85 + struct mga_device *mdev = pixpll->mdev; 86 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 87 + u8 xpixpllcm, xpixpllcn, xpixpllcp; 88 + 89 + pixpllcm = pixpllc->m - 1; 90 + pixpllcn = pixpllc->n - 1; 91 + pixpllcp = pixpllc->p - 1; 92 + pixpllcs = pixpllc->s; 93 + 94 + xpixpllcm = pixpllcm; 95 + xpixpllcn = pixpllcn; 96 + xpixpllcp = (pixpllcs << 3) | pixpllcp; 97 + 98 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 99 + 100 + WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); 101 + WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); 102 + WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); 103 + } 104 + 105 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200 = { 106 + .compute = mgag200_pixpll_compute_g200, 107 + .update = mgag200_pixpll_update_g200, 108 + }; 109 + 110 + /* 111 + * G200SE 112 + */ 113 + 114 + static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clock, 115 + struct mgag200_pll_values *pixpllc) 116 + { 117 + static const unsigned int vcomax = 320000; 118 + static const unsigned int vcomin = 160000; 119 + static const unsigned int pllreffreq = 25000; 120 + 121 + unsigned int delta, tmpdelta, permitteddelta; 122 + unsigned int testp, testm, testn; 123 + unsigned int p, m, n, s; 124 + unsigned int computed; 125 + 126 + m = n = p = s = 0; 127 + permitteddelta = clock * 5 / 1000; 128 + 129 + for (testp = 8; testp > 0; testp /= 2) { 130 + if (clock * testp > vcomax) 131 + continue; 132 + if (clock * testp < vcomin) 133 + continue; 134 + 135 + for (testn = 17; testn < 256; testn++) { 136 + for (testm = 1; testm < 32; testm++) { 137 + computed = (pllreffreq * testn) / (testm * testp); 138 + if (computed > clock) 139 + tmpdelta = computed - clock; 140 + else 141 + tmpdelta = clock - computed; 142 + if (tmpdelta < delta) { 143 + delta = tmpdelta; 144 + m = testm; 145 + n = testn; 146 + p = testp; 147 + } 148 + } 149 + } 150 + } 151 + 152 + if (delta > permitteddelta) { 153 + pr_warn("PLL delta too large\n"); 154 + return -EINVAL; 155 + } 156 + 157 + pixpllc->m = m; 158 + pixpllc->n = n; 159 + pixpllc->p = p; 160 + pixpllc->s = s; 161 + 162 + return 0; 163 + } 164 + 165 + static void mgag200_pixpll_update_g200se_00(struct mgag200_pll *pixpll, 166 + const struct mgag200_pll_values *pixpllc) 167 + { 168 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 169 + u8 xpixpllcm, xpixpllcn, xpixpllcp; 170 + struct mga_device *mdev = pixpll->mdev; 171 + 172 + pixpllcm = pixpllc->m - 1; 173 + pixpllcn = pixpllc->n - 1; 174 + pixpllcp = pixpllc->p - 1; 175 + pixpllcs = pixpllc->s; 176 + 177 + xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); 178 + xpixpllcn = pixpllcn; 179 + xpixpllcp = (pixpllcs << 3) | pixpllcp; 180 + 181 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 182 + 183 + WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); 184 + WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); 185 + WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); 186 + } 187 + 188 + static int mgag200_pixpll_compute_g200se_04(struct mgag200_pll *pixpll, long clock, 189 + struct mgag200_pll_values *pixpllc) 190 + { 191 + static const unsigned int vcomax = 1600000; 192 + static const unsigned int vcomin = 800000; 193 + static const unsigned int pllreffreq = 25000; 194 + static const unsigned int pvalues_e4[] = {16, 14, 12, 10, 8, 6, 4, 2, 1}; 195 + 196 + unsigned int delta, tmpdelta, permitteddelta; 197 + unsigned int testp, testm, testn; 198 + unsigned int p, m, n, s; 199 + unsigned int computed; 200 + unsigned int fvv; 201 + unsigned int i; 202 + 203 + m = n = p = s = 0; 204 + delta = 0xffffffff; 205 + 206 + if (clock < 25000) 207 + clock = 25000; 208 + clock = clock * 2; 209 + 210 + /* Permited delta is 0.5% as VESA Specification */ 211 + permitteddelta = clock * 5 / 1000; 212 + 213 + for (i = 0 ; i < ARRAY_SIZE(pvalues_e4); i++) { 214 + testp = pvalues_e4[i]; 215 + 216 + if ((clock * testp) > vcomax) 217 + continue; 218 + if ((clock * testp) < vcomin) 219 + continue; 220 + 221 + for (testn = 50; testn <= 256; testn++) { 222 + for (testm = 1; testm <= 32; testm++) { 223 + computed = (pllreffreq * testn) / (testm * testp); 224 + if (computed > clock) 225 + tmpdelta = computed - clock; 226 + else 227 + tmpdelta = clock - computed; 228 + 229 + if (tmpdelta < delta) { 230 + delta = tmpdelta; 231 + m = testm; 232 + n = testn; 233 + p = testp; 234 + } 235 + } 236 + } 237 + } 238 + 239 + fvv = pllreffreq * n / m; 240 + fvv = (fvv - 800000) / 50000; 241 + if (fvv > 15) 242 + fvv = 15; 243 + s = fvv << 1; 244 + 245 + if (delta > permitteddelta) { 246 + pr_warn("PLL delta too large\n"); 247 + return -EINVAL; 248 + } 249 + 250 + pixpllc->m = m; 251 + pixpllc->n = n; 252 + pixpllc->p = p; 253 + pixpllc->s = s; 254 + 255 + return 0; 256 + } 257 + 258 + static void mgag200_pixpll_update_g200se_04(struct mgag200_pll *pixpll, 259 + const struct mgag200_pll_values *pixpllc) 260 + { 261 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 262 + u8 xpixpllcm, xpixpllcn, xpixpllcp; 263 + struct mga_device *mdev = pixpll->mdev; 264 + 265 + pixpllcm = pixpllc->m - 1; 266 + pixpllcn = pixpllc->n - 1; 267 + pixpllcp = pixpllc->p - 1; 268 + pixpllcs = pixpllc->s; 269 + 270 + xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); 271 + xpixpllcn = pixpllcn; 272 + xpixpllcp = (pixpllcs << 3) | pixpllcp; 273 + 274 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 275 + 276 + WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); 277 + WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); 278 + WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); 279 + 280 + WREG_DAC(0x1a, 0x09); 281 + msleep(20); 282 + WREG_DAC(0x1a, 0x01); 283 + } 284 + 285 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200se_00 = { 286 + .compute = mgag200_pixpll_compute_g200se_00, 287 + .update = mgag200_pixpll_update_g200se_00, 288 + }; 289 + 290 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200se_04 = { 291 + .compute = mgag200_pixpll_compute_g200se_04, 292 + .update = mgag200_pixpll_update_g200se_04, 293 + }; 294 + 295 + /* 296 + * G200WB 297 + */ 298 + 299 + static int mgag200_pixpll_compute_g200wb(struct mgag200_pll *pixpll, long clock, 300 + struct mgag200_pll_values *pixpllc) 301 + { 302 + static const unsigned int vcomax = 550000; 303 + static const unsigned int vcomin = 150000; 304 + static const unsigned int pllreffreq = 48000; 305 + 306 + unsigned int delta, tmpdelta; 307 + unsigned int testp, testm, testn; 308 + unsigned int p, m, n, s; 309 + unsigned int computed; 310 + 311 + m = n = p = s = 0; 312 + delta = 0xffffffff; 313 + 314 + for (testp = 1; testp < 9; testp++) { 315 + if (clock * testp > vcomax) 316 + continue; 317 + if (clock * testp < vcomin) 318 + continue; 319 + 320 + for (testm = 1; testm < 17; testm++) { 321 + for (testn = 1; testn < 151; testn++) { 322 + computed = (pllreffreq * testn) / (testm * testp); 323 + if (computed > clock) 324 + tmpdelta = computed - clock; 325 + else 326 + tmpdelta = clock - computed; 327 + if (tmpdelta < delta) { 328 + delta = tmpdelta; 329 + n = testn; 330 + m = testm; 331 + p = testp; 332 + s = 0; 333 + } 334 + } 335 + } 336 + } 337 + 338 + pixpllc->m = m; 339 + pixpllc->n = n; 340 + pixpllc->p = p; 341 + pixpllc->s = s; 342 + 343 + return 0; 344 + } 345 + 346 + static void 347 + mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc) 348 + { 349 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 350 + u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; 351 + int i, j, tmpcount, vcount; 352 + struct mga_device *mdev = pixpll->mdev; 353 + bool pll_locked = false; 354 + 355 + pixpllcm = pixpllc->m - 1; 356 + pixpllcn = pixpllc->n - 1; 357 + pixpllcp = pixpllc->p - 1; 358 + pixpllcs = pixpllc->s; 359 + 360 + xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm; 361 + xpixpllcn = pixpllcn; 362 + xpixpllcp = ((pixpllcn & GENMASK(10, 9)) >> 3) | (pixpllcs << 3) | pixpllcp; 363 + 364 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 365 + 366 + for (i = 0; i <= 32 && pll_locked == false; i++) { 367 + if (i > 0) { 368 + WREG8(MGAREG_CRTC_INDEX, 0x1e); 369 + tmp = RREG8(MGAREG_CRTC_DATA); 370 + if (tmp < 0xff) 371 + WREG8(MGAREG_CRTC_DATA, tmp+1); 372 + } 373 + 374 + /* set pixclkdis to 1 */ 375 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 376 + tmp = RREG8(DAC_DATA); 377 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 378 + WREG8(DAC_DATA, tmp); 379 + 380 + WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 381 + tmp = RREG8(DAC_DATA); 382 + tmp |= MGA1064_REMHEADCTL_CLKDIS; 383 + WREG8(DAC_DATA, tmp); 384 + 385 + /* select PLL Set C */ 386 + tmp = RREG8(MGAREG_MEM_MISC_READ); 387 + tmp |= 0x3 << 2; 388 + WREG8(MGAREG_MEM_MISC_WRITE, tmp); 389 + 390 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 391 + tmp = RREG8(DAC_DATA); 392 + tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 393 + WREG8(DAC_DATA, tmp); 394 + 395 + udelay(500); 396 + 397 + /* reset the PLL */ 398 + WREG8(DAC_INDEX, MGA1064_VREF_CTL); 399 + tmp = RREG8(DAC_DATA); 400 + tmp &= ~0x04; 401 + WREG8(DAC_DATA, tmp); 402 + 403 + udelay(50); 404 + 405 + /* program pixel pll register */ 406 + WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); 407 + WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); 408 + WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); 409 + 410 + udelay(50); 411 + 412 + /* turn pll on */ 413 + WREG8(DAC_INDEX, MGA1064_VREF_CTL); 414 + tmp = RREG8(DAC_DATA); 415 + tmp |= 0x04; 416 + WREG_DAC(MGA1064_VREF_CTL, tmp); 417 + 418 + udelay(500); 419 + 420 + /* select the pixel pll */ 421 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 422 + tmp = RREG8(DAC_DATA); 423 + tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 424 + tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 425 + WREG8(DAC_DATA, tmp); 426 + 427 + WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 428 + tmp = RREG8(DAC_DATA); 429 + tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 430 + tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 431 + WREG8(DAC_DATA, tmp); 432 + 433 + /* reset dotclock rate bit */ 434 + WREG8(MGAREG_SEQ_INDEX, 1); 435 + tmp = RREG8(MGAREG_SEQ_DATA); 436 + tmp &= ~0x8; 437 + WREG8(MGAREG_SEQ_DATA, tmp); 438 + 439 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 440 + tmp = RREG8(DAC_DATA); 441 + tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 442 + WREG8(DAC_DATA, tmp); 443 + 444 + vcount = RREG8(MGAREG_VCOUNT); 445 + 446 + for (j = 0; j < 30 && pll_locked == false; j++) { 447 + tmpcount = RREG8(MGAREG_VCOUNT); 448 + if (tmpcount < vcount) 449 + vcount = 0; 450 + if ((tmpcount - vcount) > 2) 451 + pll_locked = true; 452 + else 453 + udelay(5); 454 + } 455 + } 456 + 457 + WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 458 + tmp = RREG8(DAC_DATA); 459 + tmp &= ~MGA1064_REMHEADCTL_CLKDIS; 460 + WREG_DAC(MGA1064_REMHEADCTL, tmp); 461 + } 462 + 463 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200wb = { 464 + .compute = mgag200_pixpll_compute_g200wb, 465 + .update = mgag200_pixpll_update_g200wb, 466 + }; 467 + 468 + /* 469 + * G200EV 470 + */ 471 + 472 + static int mgag200_pixpll_compute_g200ev(struct mgag200_pll *pixpll, long clock, 473 + struct mgag200_pll_values *pixpllc) 474 + { 475 + static const unsigned int vcomax = 550000; 476 + static const unsigned int vcomin = 150000; 477 + static const unsigned int pllreffreq = 50000; 478 + 479 + unsigned int delta, tmpdelta; 480 + unsigned int testp, testm, testn; 481 + unsigned int p, m, n, s; 482 + unsigned int computed; 483 + 484 + m = n = p = s = 0; 485 + delta = 0xffffffff; 486 + 487 + for (testp = 16; testp > 0; testp--) { 488 + if (clock * testp > vcomax) 489 + continue; 490 + if (clock * testp < vcomin) 491 + continue; 492 + 493 + for (testn = 1; testn < 257; testn++) { 494 + for (testm = 1; testm < 17; testm++) { 495 + computed = (pllreffreq * testn) / 496 + (testm * testp); 497 + if (computed > clock) 498 + tmpdelta = computed - clock; 499 + else 500 + tmpdelta = clock - computed; 501 + if (tmpdelta < delta) { 502 + delta = tmpdelta; 503 + n = testn; 504 + m = testm; 505 + p = testp; 506 + } 507 + } 508 + } 509 + } 510 + 511 + pixpllc->m = m; 512 + pixpllc->n = n; 513 + pixpllc->p = p; 514 + pixpllc->s = s; 515 + 516 + return 0; 517 + } 518 + 519 + static void 520 + mgag200_pixpll_update_g200ev(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc) 521 + { 522 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 523 + u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; 524 + struct mga_device *mdev = pixpll->mdev; 525 + 526 + pixpllcm = pixpllc->m - 1; 527 + pixpllcn = pixpllc->n - 1; 528 + pixpllcp = pixpllc->p - 1; 529 + pixpllcs = pixpllc->s; 530 + 531 + xpixpllcm = pixpllcm; 532 + xpixpllcn = pixpllcn; 533 + xpixpllcp = (pixpllcs << 3) | pixpllcp; 534 + 535 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 536 + 537 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 538 + tmp = RREG8(DAC_DATA); 539 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 540 + WREG8(DAC_DATA, tmp); 541 + 542 + tmp = RREG8(MGAREG_MEM_MISC_READ); 543 + tmp |= 0x3 << 2; 544 + WREG8(MGAREG_MEM_MISC_WRITE, tmp); 545 + 546 + WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 547 + tmp = RREG8(DAC_DATA); 548 + WREG8(DAC_DATA, tmp & ~0x40); 549 + 550 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 551 + tmp = RREG8(DAC_DATA); 552 + tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 553 + WREG8(DAC_DATA, tmp); 554 + 555 + WREG_DAC(MGA1064_EV_PIX_PLLC_M, xpixpllcm); 556 + WREG_DAC(MGA1064_EV_PIX_PLLC_N, xpixpllcn); 557 + WREG_DAC(MGA1064_EV_PIX_PLLC_P, xpixpllcp); 558 + 559 + udelay(50); 560 + 561 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 562 + tmp = RREG8(DAC_DATA); 563 + tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 564 + WREG8(DAC_DATA, tmp); 565 + 566 + udelay(500); 567 + 568 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 569 + tmp = RREG8(DAC_DATA); 570 + tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 571 + tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 572 + WREG8(DAC_DATA, tmp); 573 + 574 + WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 575 + tmp = RREG8(DAC_DATA); 576 + WREG8(DAC_DATA, tmp | 0x40); 577 + 578 + tmp = RREG8(MGAREG_MEM_MISC_READ); 579 + tmp |= (0x3 << 2); 580 + WREG8(MGAREG_MEM_MISC_WRITE, tmp); 581 + 582 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 583 + tmp = RREG8(DAC_DATA); 584 + tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 585 + WREG8(DAC_DATA, tmp); 586 + } 587 + 588 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ev = { 589 + .compute = mgag200_pixpll_compute_g200ev, 590 + .update = mgag200_pixpll_update_g200ev, 591 + }; 592 + 593 + /* 594 + * G200EH 595 + */ 596 + 597 + static int mgag200_pixpll_compute_g200eh(struct mgag200_pll *pixpll, long clock, 598 + struct mgag200_pll_values *pixpllc) 599 + { 600 + static const unsigned int vcomax = 800000; 601 + static const unsigned int vcomin = 400000; 602 + static const unsigned int pllreffreq = 33333; 603 + 604 + unsigned int delta, tmpdelta; 605 + unsigned int testp, testm, testn; 606 + unsigned int p, m, n, s; 607 + unsigned int computed; 608 + 609 + m = n = p = s = 0; 610 + delta = 0xffffffff; 611 + 612 + for (testp = 16; testp > 0; testp >>= 1) { 613 + if (clock * testp > vcomax) 614 + continue; 615 + if (clock * testp < vcomin) 616 + continue; 617 + 618 + for (testm = 1; testm < 33; testm++) { 619 + for (testn = 17; testn < 257; testn++) { 620 + computed = (pllreffreq * testn) / (testm * testp); 621 + if (computed > clock) 622 + tmpdelta = computed - clock; 623 + else 624 + tmpdelta = clock - computed; 625 + if (tmpdelta < delta) { 626 + delta = tmpdelta; 627 + n = testn; 628 + m = testm; 629 + p = testp; 630 + } 631 + } 632 + } 633 + } 634 + 635 + pixpllc->m = m; 636 + pixpllc->n = n; 637 + pixpllc->p = p; 638 + pixpllc->s = s; 639 + 640 + return 0; 641 + } 642 + 643 + static void 644 + mgag200_pixpll_update_g200eh(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc) 645 + { 646 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 647 + u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; 648 + int i, j, tmpcount, vcount; 649 + struct mga_device *mdev = pixpll->mdev; 650 + bool pll_locked = false; 651 + 652 + pixpllcm = pixpllc->m - 1; 653 + pixpllcn = pixpllc->n - 1; 654 + pixpllcp = pixpllc->p - 1; 655 + pixpllcs = pixpllc->s; 656 + 657 + xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm; 658 + xpixpllcn = pixpllcn; 659 + xpixpllcp = (pixpllcs << 3) | pixpllcp; 660 + 661 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 662 + 663 + for (i = 0; i <= 32 && pll_locked == false; i++) { 664 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 665 + tmp = RREG8(DAC_DATA); 666 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 667 + WREG8(DAC_DATA, tmp); 668 + 669 + tmp = RREG8(MGAREG_MEM_MISC_READ); 670 + tmp |= 0x3 << 2; 671 + WREG8(MGAREG_MEM_MISC_WRITE, tmp); 672 + 673 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 674 + tmp = RREG8(DAC_DATA); 675 + tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 676 + WREG8(DAC_DATA, tmp); 677 + 678 + udelay(500); 679 + 680 + WREG_DAC(MGA1064_EH_PIX_PLLC_M, xpixpllcm); 681 + WREG_DAC(MGA1064_EH_PIX_PLLC_N, xpixpllcn); 682 + WREG_DAC(MGA1064_EH_PIX_PLLC_P, xpixpllcp); 683 + 684 + udelay(500); 685 + 686 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 687 + tmp = RREG8(DAC_DATA); 688 + tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 689 + tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 690 + WREG8(DAC_DATA, tmp); 691 + 692 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 693 + tmp = RREG8(DAC_DATA); 694 + tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 695 + tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 696 + WREG8(DAC_DATA, tmp); 697 + 698 + vcount = RREG8(MGAREG_VCOUNT); 699 + 700 + for (j = 0; j < 30 && pll_locked == false; j++) { 701 + tmpcount = RREG8(MGAREG_VCOUNT); 702 + if (tmpcount < vcount) 703 + vcount = 0; 704 + if ((tmpcount - vcount) > 2) 705 + pll_locked = true; 706 + else 707 + udelay(5); 708 + } 709 + } 710 + } 711 + 712 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200eh = { 713 + .compute = mgag200_pixpll_compute_g200eh, 714 + .update = mgag200_pixpll_update_g200eh, 715 + }; 716 + 717 + /* 718 + * G200EH3 719 + */ 720 + 721 + static int mgag200_pixpll_compute_g200eh3(struct mgag200_pll *pixpll, long clock, 722 + struct mgag200_pll_values *pixpllc) 723 + { 724 + static const unsigned int vcomax = 3000000; 725 + static const unsigned int vcomin = 1500000; 726 + static const unsigned int pllreffreq = 25000; 727 + 728 + unsigned int delta, tmpdelta; 729 + unsigned int testp, testm, testn; 730 + unsigned int p, m, n, s; 731 + unsigned int computed; 732 + 733 + m = n = p = s = 0; 734 + delta = 0xffffffff; 735 + testp = 0; 736 + 737 + for (testm = 150; testm >= 6; testm--) { 738 + if (clock * testm > vcomax) 739 + continue; 740 + if (clock * testm < vcomin) 741 + continue; 742 + for (testn = 120; testn >= 60; testn--) { 743 + computed = (pllreffreq * testn) / testm; 744 + if (computed > clock) 745 + tmpdelta = computed - clock; 746 + else 747 + tmpdelta = clock - computed; 748 + if (tmpdelta < delta) { 749 + delta = tmpdelta; 750 + n = testn + 1; 751 + m = testm + 1; 752 + p = testp + 1; 753 + } 754 + if (delta == 0) 755 + break; 756 + } 757 + if (delta == 0) 758 + break; 759 + } 760 + 761 + pixpllc->m = m; 762 + pixpllc->n = n; 763 + pixpllc->p = p; 764 + pixpllc->s = s; 765 + 766 + return 0; 767 + } 768 + 769 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200eh3 = { 770 + .compute = mgag200_pixpll_compute_g200eh3, 771 + .update = mgag200_pixpll_update_g200eh, // same as G200EH 772 + }; 773 + 774 + /* 775 + * G200ER 776 + */ 777 + 778 + static int mgag200_pixpll_compute_g200er(struct mgag200_pll *pixpll, long clock, 779 + struct mgag200_pll_values *pixpllc) 780 + { 781 + static const unsigned int vcomax = 1488000; 782 + static const unsigned int vcomin = 1056000; 783 + static const unsigned int pllreffreq = 48000; 784 + static const unsigned int m_div_val[] = { 1, 2, 4, 8 }; 785 + 786 + unsigned int delta, tmpdelta; 787 + int testr, testn, testm, testo; 788 + unsigned int p, m, n, s; 789 + unsigned int computed, vco; 790 + 791 + m = n = p = s = 0; 792 + delta = 0xffffffff; 793 + 794 + for (testr = 0; testr < 4; testr++) { 795 + if (delta == 0) 796 + break; 797 + for (testn = 5; testn < 129; testn++) { 798 + if (delta == 0) 799 + break; 800 + for (testm = 3; testm >= 0; testm--) { 801 + if (delta == 0) 802 + break; 803 + for (testo = 5; testo < 33; testo++) { 804 + vco = pllreffreq * (testn + 1) / 805 + (testr + 1); 806 + if (vco < vcomin) 807 + continue; 808 + if (vco > vcomax) 809 + continue; 810 + computed = vco / (m_div_val[testm] * (testo + 1)); 811 + if (computed > clock) 812 + tmpdelta = computed - clock; 813 + else 814 + tmpdelta = clock - computed; 815 + if (tmpdelta < delta) { 816 + delta = tmpdelta; 817 + m = (testm | (testo << 3)) + 1; 818 + n = testn + 1; 819 + p = testr + 1; 820 + s = testr; 821 + } 822 + } 823 + } 824 + } 825 + } 826 + 827 + pixpllc->m = m; 828 + pixpllc->n = n; 829 + pixpllc->p = p; 830 + pixpllc->s = s; 831 + 832 + return 0; 833 + } 834 + 835 + static void 836 + mgag200_pixpll_update_g200er(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc) 837 + { 838 + unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; 839 + u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; 840 + struct mga_device *mdev = pixpll->mdev; 841 + 842 + pixpllcm = pixpllc->m - 1; 843 + pixpllcn = pixpllc->n - 1; 844 + pixpllcp = pixpllc->p - 1; 845 + pixpllcs = pixpllc->s; 846 + 847 + xpixpllcm = pixpllcm; 848 + xpixpllcn = pixpllcn; 849 + xpixpllcp = (pixpllcs << 3) | pixpllcp; 850 + 851 + WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); 852 + 853 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 854 + tmp = RREG8(DAC_DATA); 855 + tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 856 + WREG8(DAC_DATA, tmp); 857 + 858 + WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 859 + tmp = RREG8(DAC_DATA); 860 + tmp |= MGA1064_REMHEADCTL_CLKDIS; 861 + WREG8(DAC_DATA, tmp); 862 + 863 + tmp = RREG8(MGAREG_MEM_MISC_READ); 864 + tmp |= (0x3<<2) | 0xc0; 865 + WREG8(MGAREG_MEM_MISC_WRITE, tmp); 866 + 867 + WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 868 + tmp = RREG8(DAC_DATA); 869 + tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 870 + tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 871 + WREG8(DAC_DATA, tmp); 872 + 873 + udelay(500); 874 + 875 + WREG_DAC(MGA1064_ER_PIX_PLLC_N, xpixpllcn); 876 + WREG_DAC(MGA1064_ER_PIX_PLLC_M, xpixpllcm); 877 + WREG_DAC(MGA1064_ER_PIX_PLLC_P, xpixpllcp); 878 + 879 + udelay(50); 880 + } 881 + 882 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200er = { 883 + .compute = mgag200_pixpll_compute_g200er, 884 + .update = mgag200_pixpll_update_g200er, 885 + }; 886 + 887 + /* 888 + * G200EW3 889 + */ 890 + 891 + static int mgag200_pixpll_compute_g200ew3(struct mgag200_pll *pixpll, long clock, 892 + struct mgag200_pll_values *pixpllc) 893 + { 894 + static const unsigned int vcomax = 800000; 895 + static const unsigned int vcomin = 400000; 896 + static const unsigned int pllreffreq = 25000; 897 + 898 + unsigned int delta, tmpdelta; 899 + unsigned int testp, testm, testn, testp2; 900 + unsigned int p, m, n, s; 901 + unsigned int computed; 902 + 903 + m = n = p = s = 0; 904 + delta = 0xffffffff; 905 + 906 + for (testp = 1; testp < 8; testp++) { 907 + for (testp2 = 1; testp2 < 8; testp2++) { 908 + if (testp < testp2) 909 + continue; 910 + if ((clock * testp * testp2) > vcomax) 911 + continue; 912 + if ((clock * testp * testp2) < vcomin) 913 + continue; 914 + for (testm = 1; testm < 26; testm++) { 915 + for (testn = 32; testn < 2048 ; testn++) { 916 + computed = (pllreffreq * testn) / (testm * testp * testp2); 917 + if (computed > clock) 918 + tmpdelta = computed - clock; 919 + else 920 + tmpdelta = clock - computed; 921 + if (tmpdelta < delta) { 922 + delta = tmpdelta; 923 + m = testm + 1; 924 + n = testn + 1; 925 + p = testp + 1; 926 + s = testp2; 927 + } 928 + } 929 + } 930 + } 931 + } 932 + 933 + pixpllc->m = m; 934 + pixpllc->n = n; 935 + pixpllc->p = p; 936 + pixpllc->s = s; 937 + 938 + return 0; 939 + } 940 + 941 + static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ew3 = { 942 + .compute = mgag200_pixpll_compute_g200ew3, 943 + .update = mgag200_pixpll_update_g200wb, // same as G200WB 944 + }; 945 + 946 + /* 947 + * PLL initialization 948 + */ 949 + 950 + int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev) 951 + { 952 + struct drm_device *dev = &mdev->base; 953 + 954 + pixpll->mdev = mdev; 955 + 956 + switch (mdev->type) { 957 + case G200_PCI: 958 + case G200_AGP: 959 + pixpll->funcs = &mgag200_pixpll_funcs_g200; 960 + break; 961 + case G200_SE_A: 962 + case G200_SE_B: 963 + if (mdev->model.g200se.unique_rev_id >= 0x04) 964 + pixpll->funcs = &mgag200_pixpll_funcs_g200se_04; 965 + else 966 + pixpll->funcs = &mgag200_pixpll_funcs_g200se_00; 967 + break; 968 + case G200_WB: 969 + pixpll->funcs = &mgag200_pixpll_funcs_g200wb; 970 + break; 971 + case G200_EV: 972 + pixpll->funcs = &mgag200_pixpll_funcs_g200ev; 973 + break; 974 + case G200_EH: 975 + pixpll->funcs = &mgag200_pixpll_funcs_g200eh; 976 + break; 977 + case G200_EH3: 978 + pixpll->funcs = &mgag200_pixpll_funcs_g200eh3; 979 + break; 980 + case G200_ER: 981 + pixpll->funcs = &mgag200_pixpll_funcs_g200er; 982 + break; 983 + case G200_EW3: 984 + pixpll->funcs = &mgag200_pixpll_funcs_g200ew3; 985 + break; 986 + default: 987 + drm_err(dev, "unknown device type %d\n", mdev->type); 988 + return -ENODEV; 989 + } 990 + 991 + return 0; 992 + }
+4 -5
drivers/gpu/drm/mgag200/mgag200_reg.h
··· 222 222 223 223 #define MGAREG_MISC_IOADSEL (0x1 << 0) 224 224 #define MGAREG_MISC_RAMMAPEN (0x1 << 1) 225 - #define MGAREG_MISC_CLK_SEL_MASK GENMASK(3, 2) 226 - #define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2) 227 - #define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2) 228 - #define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2) 229 - #define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2) 225 + #define MGAREG_MISC_CLKSEL_MASK GENMASK(3, 2) 226 + #define MGAREG_MISC_CLKSEL_VGA25 (0x0 << 2) 227 + #define MGAREG_MISC_CLKSEL_VGA28 (0x1 << 2) 228 + #define MGAREG_MISC_CLKSEL_MGA (0x3 << 2) 230 229 #define MGAREG_MISC_VIDEO_DIS (0x1 << 4) 231 230 #define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5) 232 231 #define MGAREG_MISC_HSYNCPOL BIT(6)
+68 -45
drivers/gpu/drm/msm/msm_drv.c
··· 14 14 #include <drm/drm_drv.h> 15 15 #include <drm/drm_file.h> 16 16 #include <drm/drm_ioctl.h> 17 - #include <drm/drm_irq.h> 18 17 #include <drm/drm_prime.h> 19 18 #include <drm/drm_of.h> 20 19 #include <drm/drm_vblank.h> ··· 200 201 msm_writel(val | or, addr); 201 202 } 202 203 204 + static irqreturn_t msm_irq(int irq, void *arg) 205 + { 206 + struct drm_device *dev = arg; 207 + struct msm_drm_private *priv = dev->dev_private; 208 + struct msm_kms *kms = priv->kms; 209 + 210 + BUG_ON(!kms); 211 + 212 + return kms->funcs->irq(kms); 213 + } 214 + 215 + static void msm_irq_preinstall(struct drm_device *dev) 216 + { 217 + struct msm_drm_private *priv = dev->dev_private; 218 + struct msm_kms *kms = priv->kms; 219 + 220 + BUG_ON(!kms); 221 + 222 + kms->funcs->irq_preinstall(kms); 223 + } 224 + 225 + static int msm_irq_postinstall(struct drm_device *dev) 226 + { 227 + struct msm_drm_private *priv = dev->dev_private; 228 + struct msm_kms *kms = priv->kms; 229 + 230 + BUG_ON(!kms); 231 + 232 + if (kms->funcs->irq_postinstall) 233 + return kms->funcs->irq_postinstall(kms); 234 + 235 + return 0; 236 + } 237 + 238 + static int msm_irq_install(struct drm_device *dev, unsigned int irq) 239 + { 240 + int ret; 241 + 242 + if (irq == IRQ_NOTCONNECTED) 243 + return -ENOTCONN; 244 + 245 + msm_irq_preinstall(dev); 246 + 247 + ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev); 248 + if (ret) 249 + return ret; 250 + 251 + ret = msm_irq_postinstall(dev); 252 + if (ret) { 253 + free_irq(irq, dev); 254 + return ret; 255 + } 256 + 257 + return 0; 258 + } 259 + 260 + static void msm_irq_uninstall(struct drm_device *dev) 261 + { 262 + struct msm_drm_private *priv = dev->dev_private; 263 + struct msm_kms *kms = priv->kms; 264 + 265 + kms->funcs->irq_uninstall(kms); 266 + free_irq(kms->irq, dev); 267 + } 268 + 203 269 struct msm_vblank_work { 204 270 struct work_struct work; 205 271 int crtc_id; ··· 329 265 } 330 266 331 267 /* We must cancel and cleanup any pending vblank enable/disable 332 - * work before drm_irq_uninstall() to avoid work re-enabling an 268 + * work before msm_irq_uninstall() to avoid work re-enabling an 333 269 * irq after uninstall has disabled it. 334 270 */ 335 271 ··· 358 294 drm_mode_config_cleanup(ddev); 359 295 360 296 pm_runtime_get_sync(dev); 361 - drm_irq_uninstall(ddev); 297 + msm_irq_uninstall(ddev); 362 298 pm_runtime_put_sync(dev); 363 299 364 300 if (kms && kms->funcs) ··· 617 553 618 554 if (kms) { 619 555 pm_runtime_get_sync(dev); 620 - ret = drm_irq_install(ddev, kms->irq); 556 + ret = msm_irq_install(ddev, kms->irq); 621 557 pm_runtime_put_sync(dev); 622 558 if (ret < 0) { 623 559 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); ··· 724 660 mutex_unlock(&dev->struct_mutex); 725 661 726 662 context_close(ctx); 727 - } 728 - 729 - static irqreturn_t msm_irq(int irq, void *arg) 730 - { 731 - struct drm_device *dev = arg; 732 - struct msm_drm_private *priv = dev->dev_private; 733 - struct msm_kms *kms = priv->kms; 734 - BUG_ON(!kms); 735 - return kms->funcs->irq(kms); 736 - } 737 - 738 - static void msm_irq_preinstall(struct drm_device *dev) 739 - { 740 - struct msm_drm_private *priv = dev->dev_private; 741 - struct msm_kms *kms = priv->kms; 742 - BUG_ON(!kms); 743 - kms->funcs->irq_preinstall(kms); 744 - } 745 - 746 - static int msm_irq_postinstall(struct drm_device *dev) 747 - { 748 - struct msm_drm_private *priv = dev->dev_private; 749 - struct msm_kms *kms = priv->kms; 750 - BUG_ON(!kms); 751 - 752 - if (kms->funcs->irq_postinstall) 753 - return kms->funcs->irq_postinstall(kms); 754 - 755 - return 0; 756 - } 757 - 758 - static void msm_irq_uninstall(struct drm_device *dev) 759 - { 760 - struct msm_drm_private *priv = dev->dev_private; 761 - struct msm_kms *kms = priv->kms; 762 - BUG_ON(!kms); 763 - kms->funcs->irq_uninstall(kms); 764 663 } 765 664 766 665 int msm_crtc_enable_vblank(struct drm_crtc *crtc) ··· 1078 1051 .open = msm_open, 1079 1052 .postclose = msm_postclose, 1080 1053 .lastclose = drm_fb_helper_lastclose, 1081 - .irq_handler = msm_irq, 1082 - .irq_preinstall = msm_irq_preinstall, 1083 - .irq_postinstall = msm_irq_postinstall, 1084 - .irq_uninstall = msm_irq_uninstall, 1085 1054 .dumb_create = msm_gem_dumb_create, 1086 1055 .dumb_map_offset = msm_gem_dumb_map_offset, 1087 1056 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+1 -1
drivers/gpu/drm/msm/msm_kms.h
··· 150 150 const struct msm_kms_funcs *funcs; 151 151 struct drm_device *dev; 152 152 153 - /* irq number to be passed on to drm_irq_install */ 153 + /* irq number to be passed on to msm_irq_install */ 154 154 int irq; 155 155 156 156 /* mapper-id used to request GEM buffer mapped for scanout: */
+50 -31
drivers/gpu/drm/mxsfb/mxsfb_drv.c
··· 24 24 #include <drm/drm_fourcc.h> 25 25 #include <drm/drm_gem_cma_helper.h> 26 26 #include <drm/drm_gem_framebuffer_helper.h> 27 - #include <drm/drm_irq.h> 28 27 #include <drm/drm_mode_config.h> 29 28 #include <drm/drm_of.h> 30 29 #include <drm/drm_probe_helper.h> ··· 152 153 return 0; 153 154 } 154 155 156 + static irqreturn_t mxsfb_irq_handler(int irq, void *data) 157 + { 158 + struct drm_device *drm = data; 159 + struct mxsfb_drm_private *mxsfb = drm->dev_private; 160 + u32 reg; 161 + 162 + reg = readl(mxsfb->base + LCDC_CTRL1); 163 + 164 + if (reg & CTRL1_CUR_FRAME_DONE_IRQ) 165 + drm_crtc_handle_vblank(&mxsfb->crtc); 166 + 167 + writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); 168 + 169 + return IRQ_HANDLED; 170 + } 171 + 172 + static void mxsfb_irq_disable(struct drm_device *drm) 173 + { 174 + struct mxsfb_drm_private *mxsfb = drm->dev_private; 175 + 176 + mxsfb_enable_axi_clk(mxsfb); 177 + mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc); 178 + mxsfb_disable_axi_clk(mxsfb); 179 + } 180 + 181 + static int mxsfb_irq_install(struct drm_device *dev, int irq) 182 + { 183 + if (irq == IRQ_NOTCONNECTED) 184 + return -ENOTCONN; 185 + 186 + mxsfb_irq_disable(dev); 187 + 188 + return request_irq(irq, mxsfb_irq_handler, 0, dev->driver->name, dev); 189 + } 190 + 191 + static void mxsfb_irq_uninstall(struct drm_device *dev) 192 + { 193 + struct mxsfb_drm_private *mxsfb = dev->dev_private; 194 + 195 + mxsfb_irq_disable(dev); 196 + free_irq(mxsfb->irq, dev); 197 + } 198 + 155 199 static int mxsfb_load(struct drm_device *drm, 156 200 const struct mxsfb_devdata *devdata) 157 201 { ··· 268 226 269 227 drm_mode_config_reset(drm); 270 228 229 + ret = platform_get_irq(pdev, 0); 230 + if (ret < 0) 231 + goto err_vblank; 232 + mxsfb->irq = ret; 233 + 271 234 pm_runtime_get_sync(drm->dev); 272 - ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); 235 + ret = mxsfb_irq_install(drm, mxsfb->irq); 273 236 pm_runtime_put_sync(drm->dev); 274 237 275 238 if (ret < 0) { ··· 302 255 drm_mode_config_cleanup(drm); 303 256 304 257 pm_runtime_get_sync(drm->dev); 305 - drm_irq_uninstall(drm); 258 + mxsfb_irq_uninstall(drm); 306 259 pm_runtime_put_sync(drm->dev); 307 260 308 261 drm->dev_private = NULL; ··· 310 263 pm_runtime_disable(drm->dev); 311 264 } 312 265 313 - static void mxsfb_irq_disable(struct drm_device *drm) 314 - { 315 - struct mxsfb_drm_private *mxsfb = drm->dev_private; 316 - 317 - mxsfb_enable_axi_clk(mxsfb); 318 - mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc); 319 - mxsfb_disable_axi_clk(mxsfb); 320 - } 321 - 322 - static irqreturn_t mxsfb_irq_handler(int irq, void *data) 323 - { 324 - struct drm_device *drm = data; 325 - struct mxsfb_drm_private *mxsfb = drm->dev_private; 326 - u32 reg; 327 - 328 - reg = readl(mxsfb->base + LCDC_CTRL1); 329 - 330 - if (reg & CTRL1_CUR_FRAME_DONE_IRQ) 331 - drm_crtc_handle_vblank(&mxsfb->crtc); 332 - 333 - writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); 334 - 335 - return IRQ_HANDLED; 336 - } 337 - 338 266 DEFINE_DRM_GEM_CMA_FOPS(fops); 339 267 340 268 static const struct drm_driver mxsfb_driver = { 341 269 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 342 - .irq_handler = mxsfb_irq_handler, 343 - .irq_preinstall = mxsfb_irq_disable, 344 - .irq_uninstall = mxsfb_irq_disable, 345 270 DRM_GEM_CMA_DRIVER_OPS, 346 271 .fops = &fops, 347 272 .name = "mxsfb-drm",
+2
drivers/gpu/drm/mxsfb/mxsfb_drv.h
··· 33 33 struct clk *clk_axi; 34 34 struct clk *clk_disp_axi; 35 35 36 + unsigned int irq; 37 + 36 38 struct drm_device *drm; 37 39 struct { 38 40 struct drm_plane primary;
+6 -2
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 1659 1659 nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) 1660 1660 { 1661 1661 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1662 - struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 1663 1662 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); 1664 1663 struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder); 1664 + #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1665 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 1665 1666 struct nouveau_backlight *backlight = nv_connector->backlight; 1667 + #endif 1666 1668 struct drm_dp_aux *aux = &nv_connector->aux; 1667 1669 int ret; 1668 1670 u8 pwr; 1669 1671 1672 + #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1670 1673 if (backlight && backlight->uses_dpcd) { 1671 1674 ret = drm_edp_backlight_disable(aux, &backlight->edp_info); 1672 1675 if (ret < 0) 1673 1676 NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n", 1674 1677 nv_connector->base.base.id, nv_connector->base.name, ret); 1675 1678 } 1679 + #endif 1676 1680 1677 1681 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 1678 - int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr); 1682 + ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr); 1679 1683 1680 1684 if (ret == 0) { 1681 1685 pwr &= ~DP_SET_POWER_MASK;
+69
drivers/gpu/drm/panel/panel-simple.c
··· 3126 3126 .connector_type = DRM_MODE_CONNECTOR_LVDS, 3127 3127 }; 3128 3128 3129 + static const struct drm_display_mode logictechno_lttd800480070_l6wh_rt_mode = { 3130 + .clock = 33000, 3131 + .hdisplay = 800, 3132 + .hsync_start = 800 + 154, 3133 + .hsync_end = 800 + 154 + 3, 3134 + .htotal = 800 + 154 + 3 + 43, 3135 + .vdisplay = 480, 3136 + .vsync_start = 480 + 47, 3137 + .vsync_end = 480 + 47 + 3, 3138 + .vtotal = 480 + 47 + 3 + 20, 3139 + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, 3140 + }; 3141 + 3142 + static const struct panel_desc logictechno_lttd800480070_l6wh_rt = { 3143 + .modes = &logictechno_lttd800480070_l6wh_rt_mode, 3144 + .num_modes = 1, 3145 + .bpc = 8, 3146 + .size = { 3147 + .width = 154, 3148 + .height = 86, 3149 + }, 3150 + .delay = { 3151 + .prepare = 45, 3152 + .enable = 100, 3153 + .disable = 100, 3154 + .unprepare = 45 3155 + }, 3156 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 3157 + .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, 3158 + .connector_type = DRM_MODE_CONNECTOR_DPI, 3159 + }; 3160 + 3129 3161 static const struct drm_display_mode mitsubishi_aa070mc01_mode = { 3130 3162 .clock = 30400, 3131 3163 .hdisplay = 800, ··· 3222 3190 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 3223 3191 .connector_type = DRM_MODE_CONNECTOR_LVDS, 3224 3192 .bus_flags = DRM_BUS_FLAG_DE_HIGH, 3193 + }; 3194 + 3195 + static const struct display_timing multi_inno_mi1010ait_1cp_timing = { 3196 + .pixelclock = { 68900000, 70000000, 73400000 }, 3197 + .hactive = { 1280, 1280, 1280 }, 3198 + .hfront_porch = { 30, 60, 71 }, 3199 + .hback_porch = { 30, 60, 71 }, 3200 + .hsync_len = { 10, 10, 48 }, 3201 + .vactive = { 800, 800, 800 }, 3202 + .vfront_porch = { 5, 10, 10 }, 3203 + .vback_porch = { 5, 10, 10 }, 3204 + .vsync_len = { 5, 6, 13 }, 3205 + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | 3206 + DISPLAY_FLAGS_DE_HIGH, 3207 + }; 3208 + 3209 + static const struct panel_desc multi_inno_mi1010ait_1cp = { 3210 + .timings = &multi_inno_mi1010ait_1cp_timing, 3211 + .num_timings = 1, 3212 + .bpc = 8, 3213 + .size = { 3214 + .width = 217, 3215 + .height = 136, 3216 + }, 3217 + .delay = { 3218 + .enable = 50, 3219 + .disable = 50, 3220 + }, 3221 + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 3222 + .bus_flags = DRM_BUS_FLAG_DE_HIGH, 3223 + .connector_type = DRM_MODE_CONNECTOR_LVDS, 3225 3224 }; 3226 3225 3227 3226 static const struct display_timing nec_nl12880bc20_05_timing = { ··· 4739 4676 .compatible = "logictechno,lt170410-2whc", 4740 4677 .data = &logictechno_lt170410_2whc, 4741 4678 }, { 4679 + .compatible = "logictechno,lttd800480070-l6wh-rt", 4680 + .data = &logictechno_lttd800480070_l6wh_rt, 4681 + }, { 4742 4682 .compatible = "mitsubishi,aa070mc01-ca1", 4743 4683 .data = &mitsubishi_aa070mc01, 4684 + }, { 4685 + .compatible = "multi-inno,mi1010ait-1cp", 4686 + .data = &multi_inno_mi1010ait_1cp, 4744 4687 }, { 4745 4688 .compatible = "nec,nl12880bc20-05", 4746 4689 .data = &nec_nl12880bc20_05,
+1 -2
drivers/gpu/drm/r128/r128_cce.c
··· 39 39 40 40 #include <drm/drm_device.h> 41 41 #include <drm/drm_file.h> 42 - #include <drm/drm_irq.h> 43 42 #include <drm/drm_legacy.h> 44 43 #include <drm/drm_print.h> 45 44 #include <drm/r128_drm.h> ··· 602 603 * is freed, it's too late. 603 604 */ 604 605 if (dev->irq_enabled) 605 - drm_irq_uninstall(dev); 606 + drm_legacy_irq_uninstall(dev); 606 607 607 608 if (dev->dev_private) { 608 609 drm_r128_private_t *dev_priv = dev->dev_private;
-4
drivers/gpu/drm/radeon/radeon_drv.c
··· 607 607 .postclose = radeon_driver_postclose_kms, 608 608 .lastclose = radeon_driver_lastclose_kms, 609 609 .unload = radeon_driver_unload_kms, 610 - .irq_preinstall = radeon_driver_irq_preinstall_kms, 611 - .irq_postinstall = radeon_driver_irq_postinstall_kms, 612 - .irq_uninstall = radeon_driver_irq_uninstall_kms, 613 - .irq_handler = radeon_driver_irq_handler_kms, 614 610 .ioctls = radeon_ioctls_kms, 615 611 .num_ioctls = ARRAY_SIZE(radeon_ioctls_kms), 616 612 .dumb_create = radeon_mode_dumb_create,
+37 -7
drivers/gpu/drm/radeon/radeon_irq_kms.c
··· 31 31 32 32 #include <drm/drm_crtc_helper.h> 33 33 #include <drm/drm_device.h> 34 - #include <drm/drm_irq.h> 34 + #include <drm/drm_drv.h> 35 35 #include <drm/drm_probe_helper.h> 36 36 #include <drm/drm_vblank.h> 37 37 #include <drm/radeon_drm.h> ··· 51 51 * radeon_irq_process is a macro that points to the per-asic 52 52 * irq handler callback. 53 53 */ 54 - irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) 54 + static irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) 55 55 { 56 56 struct drm_device *dev = (struct drm_device *) arg; 57 57 struct radeon_device *rdev = dev->dev_private; ··· 118 118 * Gets the hw ready to enable irqs (all asics). 119 119 * This function disables all interrupt sources on the GPU. 120 120 */ 121 - void radeon_driver_irq_preinstall_kms(struct drm_device *dev) 121 + static void radeon_driver_irq_preinstall_kms(struct drm_device *dev) 122 122 { 123 123 struct radeon_device *rdev = dev->dev_private; 124 124 unsigned long irqflags; ··· 150 150 * Handles stuff to be done after enabling irqs (all asics). 151 151 * Returns 0 on success. 152 152 */ 153 - int radeon_driver_irq_postinstall_kms(struct drm_device *dev) 153 + static int radeon_driver_irq_postinstall_kms(struct drm_device *dev) 154 154 { 155 155 struct radeon_device *rdev = dev->dev_private; 156 156 ··· 169 169 * 170 170 * This function disables all interrupt sources on the GPU (all asics). 171 171 */ 172 - void radeon_driver_irq_uninstall_kms(struct drm_device *dev) 172 + static void radeon_driver_irq_uninstall_kms(struct drm_device *dev) 173 173 { 174 174 struct radeon_device *rdev = dev->dev_private; 175 175 unsigned long irqflags; ··· 192 192 } 193 193 radeon_irq_set(rdev); 194 194 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 195 + } 196 + 197 + static int radeon_irq_install(struct radeon_device *rdev, int irq) 198 + { 199 + struct drm_device *dev = rdev->ddev; 200 + int ret; 201 + 202 + if (irq == IRQ_NOTCONNECTED) 203 + return -ENOTCONN; 204 + 205 + radeon_driver_irq_preinstall_kms(dev); 206 + 207 + /* PCI devices require shared interrupts. */ 208 + ret = request_irq(irq, radeon_driver_irq_handler_kms, 209 + IRQF_SHARED, dev->driver->name, dev); 210 + if (ret) 211 + return ret; 212 + 213 + radeon_driver_irq_postinstall_kms(dev); 214 + 215 + return 0; 216 + } 217 + 218 + static void radeon_irq_uninstall(struct radeon_device *rdev) 219 + { 220 + struct drm_device *dev = rdev->ddev; 221 + struct pci_dev *pdev = to_pci_dev(dev->dev); 222 + 223 + radeon_driver_irq_uninstall_kms(dev); 224 + free_irq(pdev->irq, dev); 195 225 } 196 226 197 227 /** ··· 344 314 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 345 315 346 316 rdev->irq.installed = true; 347 - r = drm_irq_install(rdev->ddev, rdev->pdev->irq); 317 + r = radeon_irq_install(rdev, rdev->pdev->irq); 348 318 if (r) { 349 319 rdev->irq.installed = false; 350 320 flush_delayed_work(&rdev->hotplug_work); ··· 365 335 void radeon_irq_kms_fini(struct radeon_device *rdev) 366 336 { 367 337 if (rdev->irq.installed) { 368 - drm_irq_uninstall(rdev->ddev); 338 + radeon_irq_uninstall(rdev); 369 339 rdev->irq.installed = false; 370 340 if (rdev->msi_enabled) 371 341 pci_disable_msi(rdev->pdev);
-4
drivers/gpu/drm/radeon/radeon_kms.h
··· 31 31 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc); 32 32 int radeon_enable_vblank_kms(struct drm_crtc *crtc); 33 33 void radeon_disable_vblank_kms(struct drm_crtc *crtc); 34 - irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg); 35 - void radeon_driver_irq_preinstall_kms(struct drm_device *dev); 36 - int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 37 - void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 38 34 39 35 #endif /* __RADEON_KMS_H__ */
+5 -10
drivers/gpu/drm/tidss/tidss_drv.c
··· 16 16 #include <drm/drm_drv.h> 17 17 #include <drm/drm_fb_helper.h> 18 18 #include <drm/drm_gem_cma_helper.h> 19 - #include <drm/drm_irq.h> 20 19 #include <drm/drm_managed.h> 21 20 #include <drm/drm_probe_helper.h> 22 21 ··· 117 118 .date = "20180215", 118 119 .major = 1, 119 120 .minor = 0, 120 - 121 - .irq_preinstall = tidss_irq_preinstall, 122 - .irq_postinstall = tidss_irq_postinstall, 123 - .irq_handler = tidss_irq_handler, 124 - .irq_uninstall = tidss_irq_uninstall, 125 121 }; 126 122 127 123 static int tidss_probe(struct platform_device *pdev) ··· 166 172 ret = irq; 167 173 goto err_runtime_suspend; 168 174 } 175 + tidss->irq = irq; 169 176 170 - ret = drm_irq_install(ddev, irq); 177 + ret = tidss_irq_install(ddev, irq); 171 178 if (ret) { 172 - dev_err(dev, "drm_irq_install failed: %d\n", ret); 179 + dev_err(dev, "tidss_irq_install failed: %d\n", ret); 173 180 goto err_runtime_suspend; 174 181 } 175 182 ··· 191 196 return 0; 192 197 193 198 err_irq_uninstall: 194 - drm_irq_uninstall(ddev); 199 + tidss_irq_uninstall(ddev); 195 200 196 201 err_runtime_suspend: 197 202 #ifndef CONFIG_PM ··· 214 219 215 220 drm_atomic_helper_shutdown(ddev); 216 221 217 - drm_irq_uninstall(ddev); 222 + tidss_irq_uninstall(ddev); 218 223 219 224 #ifndef CONFIG_PM 220 225 /* If we don't have PM, we need to call suspend manually */
+2
drivers/gpu/drm/tidss/tidss_drv.h
··· 27 27 unsigned int num_planes; 28 28 struct drm_plane *planes[TIDSS_MAX_PLANES]; 29 29 30 + unsigned int irq; 31 + 30 32 spinlock_t wait_lock; /* protects the irq masks */ 31 33 dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */ 32 34 };
+24 -3
drivers/gpu/drm/tidss/tidss_irq.c
··· 4 4 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> 5 5 */ 6 6 7 + #include <linux/platform_device.h> 8 + 9 + #include <drm/drm_drv.h> 7 10 #include <drm/drm_print.h> 8 11 9 12 #include "tidss_crtc.h" ··· 53 50 spin_unlock_irqrestore(&tidss->wait_lock, flags); 54 51 } 55 52 56 - irqreturn_t tidss_irq_handler(int irq, void *arg) 53 + static irqreturn_t tidss_irq_handler(int irq, void *arg) 57 54 { 58 55 struct drm_device *ddev = (struct drm_device *)arg; 59 56 struct tidss_device *tidss = to_tidss(ddev); ··· 93 90 spin_unlock_irqrestore(&tidss->wait_lock, flags); 94 91 } 95 92 96 - void tidss_irq_preinstall(struct drm_device *ddev) 93 + static void tidss_irq_preinstall(struct drm_device *ddev) 97 94 { 98 95 struct tidss_device *tidss = to_tidss(ddev); 99 96 ··· 107 104 tidss_runtime_put(tidss); 108 105 } 109 106 110 - int tidss_irq_postinstall(struct drm_device *ddev) 107 + static void tidss_irq_postinstall(struct drm_device *ddev) 111 108 { 112 109 struct tidss_device *tidss = to_tidss(ddev); 113 110 unsigned long flags; ··· 132 129 spin_unlock_irqrestore(&tidss->wait_lock, flags); 133 130 134 131 tidss_runtime_put(tidss); 132 + } 133 + 134 + int tidss_irq_install(struct drm_device *ddev, unsigned int irq) 135 + { 136 + int ret; 137 + 138 + if (irq == IRQ_NOTCONNECTED) 139 + return -ENOTCONN; 140 + 141 + tidss_irq_preinstall(ddev); 142 + 143 + ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev); 144 + if (ret) 145 + return ret; 146 + 147 + tidss_irq_postinstall(ddev); 135 148 136 149 return 0; 137 150 } ··· 159 140 tidss_runtime_get(tidss); 160 141 dispc_set_irqenable(tidss->dispc, 0); 161 142 tidss_runtime_put(tidss); 143 + 144 + free_irq(tidss->irq, ddev); 162 145 }
+1 -3
drivers/gpu/drm/tidss/tidss_irq.h
··· 67 67 void tidss_irq_enable_vblank(struct drm_crtc *crtc); 68 68 void tidss_irq_disable_vblank(struct drm_crtc *crtc); 69 69 70 - void tidss_irq_preinstall(struct drm_device *ddev); 71 - int tidss_irq_postinstall(struct drm_device *ddev); 70 + int tidss_irq_install(struct drm_device *ddev, unsigned int irq); 72 71 void tidss_irq_uninstall(struct drm_device *ddev); 73 - irqreturn_t tidss_irq_handler(int irq, void *arg); 74 72 75 73 void tidss_irq_resume(struct tidss_device *tidss); 76 74
+40 -11
drivers/gpu/drm/tilcdc/tilcdc_drv.c
··· 20 20 #include <drm/drm_fourcc.h> 21 21 #include <drm/drm_gem_cma_helper.h> 22 22 #include <drm/drm_gem_framebuffer_helper.h> 23 - #include <drm/drm_irq.h> 24 23 #include <drm/drm_mm.h> 25 24 #include <drm/drm_probe_helper.h> 26 25 #include <drm/drm_vblank.h> ··· 123 124 } 124 125 #endif 125 126 127 + static irqreturn_t tilcdc_irq(int irq, void *arg) 128 + { 129 + struct drm_device *dev = arg; 130 + struct tilcdc_drm_private *priv = dev->dev_private; 131 + 132 + return tilcdc_crtc_irq(priv->crtc); 133 + } 134 + 135 + static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq) 136 + { 137 + struct tilcdc_drm_private *priv = dev->dev_private; 138 + int ret; 139 + 140 + ret = request_irq(irq, tilcdc_irq, 0, dev->driver->name, dev); 141 + if (ret) 142 + return ret; 143 + 144 + priv->irq_enabled = false; 145 + 146 + return 0; 147 + } 148 + 149 + static void tilcdc_irq_uninstall(struct drm_device *dev) 150 + { 151 + struct tilcdc_drm_private *priv = dev->dev_private; 152 + 153 + if (!priv->irq_enabled) 154 + return; 155 + 156 + free_irq(priv->irq, dev); 157 + priv->irq_enabled = false; 158 + } 159 + 126 160 /* 127 161 * DRM operations: 128 162 */ ··· 177 145 drm_dev_unregister(dev); 178 146 179 147 drm_kms_helper_poll_fini(dev); 180 - drm_irq_uninstall(dev); 148 + tilcdc_irq_uninstall(dev); 181 149 drm_mode_config_cleanup(dev); 182 150 183 151 if (priv->clk) ··· 368 336 goto init_failed; 369 337 } 370 338 371 - ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); 339 + ret = platform_get_irq(pdev, 0); 340 + if (ret < 0) 341 + goto init_failed; 342 + priv->irq = ret; 343 + 344 + ret = tilcdc_irq_install(ddev, priv->irq); 372 345 if (ret < 0) { 373 346 dev_err(dev, "failed to install IRQ handler\n"); 374 347 goto init_failed; ··· 395 358 tilcdc_fini(ddev); 396 359 397 360 return ret; 398 - } 399 - 400 - static irqreturn_t tilcdc_irq(int irq, void *arg) 401 - { 402 - struct drm_device *dev = arg; 403 - struct tilcdc_drm_private *priv = dev->dev_private; 404 - return tilcdc_crtc_irq(priv->crtc); 405 361 } 406 362 407 363 #if defined(CONFIG_DEBUG_FS) ··· 484 454 485 455 static const struct drm_driver tilcdc_driver = { 486 456 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 487 - .irq_handler = tilcdc_irq, 488 457 DRM_GEM_CMA_DRIVER_OPS, 489 458 #ifdef CONFIG_DEBUG_FS 490 459 .debugfs_init = tilcdc_debugfs_init,
+3
drivers/gpu/drm/tilcdc/tilcdc_drv.h
··· 46 46 struct clk *clk; /* functional clock */ 47 47 int rev; /* IP revision */ 48 48 49 + unsigned int irq; 50 + 49 51 /* don't attempt resolutions w/ higher W * H * Hz: */ 50 52 uint32_t max_bandwidth; 51 53 /* ··· 84 82 85 83 bool is_registered; 86 84 bool is_componentized; 85 + bool irq_enabled; 87 86 }; 88 87 89 88 /* Sub-module for display. Since we don't know at compile time what panels
+2 -2
drivers/gpu/drm/tiny/cirrus.c
··· 435 435 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 436 436 437 437 cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb); 438 - cirrus_fb_blit_fullscreen(plane_state->fb, &shadow_plane_state->map[0]); 438 + cirrus_fb_blit_fullscreen(plane_state->fb, &shadow_plane_state->data[0]); 439 439 } 440 440 441 441 static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe, ··· 451 451 cirrus_mode_set(cirrus, &crtc->mode, state->fb); 452 452 453 453 if (drm_atomic_helper_damage_merged(old_state, state, &rect)) 454 - cirrus_fb_blit_rect(state->fb, &shadow_plane_state->map[0], &rect); 454 + cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect); 455 455 } 456 456 457 457 static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
+2 -2
drivers/gpu/drm/tiny/gm12u320.c
··· 554 554 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 555 555 556 556 gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT; 557 - gm12u320_fb_mark_dirty(plane_state->fb, &shadow_plane_state->map[0], &rect); 557 + gm12u320_fb_mark_dirty(plane_state->fb, &shadow_plane_state->data[0], &rect); 558 558 } 559 559 560 560 static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe) ··· 572 572 struct drm_rect rect; 573 573 574 574 if (drm_atomic_helper_damage_merged(old_state, state, &rect)) 575 - gm12u320_fb_mark_dirty(state->fb, &shadow_plane_state->map[0], &rect); 575 + gm12u320_fb_mark_dirty(state->fb, &shadow_plane_state->data[0], &rect); 576 576 } 577 577 578 578 static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
+2 -2
drivers/gpu/drm/tiny/simpledrm.c
··· 639 639 struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev); 640 640 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 641 641 struct drm_framebuffer *fb = plane_state->fb; 642 - void *vmap = shadow_plane_state->map[0].vaddr; /* TODO: Use mapping abstraction properly */ 642 + void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */ 643 643 struct drm_device *dev = &sdev->dev; 644 644 int idx; 645 645 ··· 677 677 struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev); 678 678 struct drm_plane_state *plane_state = pipe->plane.state; 679 679 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 680 - void *vmap = shadow_plane_state->map[0].vaddr; /* TODO: Use mapping abstraction properly */ 680 + void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */ 681 681 struct drm_framebuffer *fb = plane_state->fb; 682 682 struct drm_device *dev = &sdev->dev; 683 683 struct drm_rect clip;
+2 -2
drivers/gpu/drm/udl/udl_modeset.c
··· 379 379 380 380 udl->mode_buf_len = wrptr - buf; 381 381 382 - udl_handle_damage(fb, &shadow_plane_state->map[0], 0, 0, fb->width, fb->height); 382 + udl_handle_damage(fb, &shadow_plane_state->data[0], 0, 0, fb->width, fb->height); 383 383 384 384 if (!crtc_state->mode_changed) 385 385 return; ··· 422 422 return; 423 423 424 424 if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect)) 425 - udl_handle_damage(fb, &shadow_plane_state->map[0], rect.x1, rect.y1, 425 + udl_handle_damage(fb, &shadow_plane_state->data[0], rect.x1, rect.y1, 426 426 rect.x2 - rect.x1, rect.y2 - rect.y1); 427 427 } 428 428
+1 -1
drivers/gpu/drm/vboxvideo/vbox_mode.c
··· 398 398 u32 height = new_state->crtc_h; 399 399 struct drm_shadow_plane_state *shadow_plane_state = 400 400 to_drm_shadow_plane_state(new_state); 401 - struct dma_buf_map map = shadow_plane_state->map[0]; 401 + struct dma_buf_map map = shadow_plane_state->data[0]; 402 402 u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */ 403 403 size_t data_size, mask_size; 404 404 u32 flags;
-4
drivers/gpu/drm/vc4/vc4_drv.c
··· 168 168 DRIVER_SYNCOBJ), 169 169 .open = vc4_open, 170 170 .postclose = vc4_close, 171 - .irq_handler = vc4_irq, 172 - .irq_preinstall = vc4_irq_preinstall, 173 - .irq_postinstall = vc4_irq_postinstall, 174 - .irq_uninstall = vc4_irq_uninstall, 175 171 176 172 #if defined(CONFIG_DEBUG_FS) 177 173 .debugfs_init = vc4_debugfs_init,
+5 -3
drivers/gpu/drm/vc4/vc4_drv.h
··· 74 74 struct vc4_dev { 75 75 struct drm_device base; 76 76 77 + unsigned int irq; 78 + 77 79 struct vc4_hvs *hvs; 78 80 struct vc4_v3d *v3d; 79 81 struct vc4_dpi *dpi; ··· 897 895 extern struct platform_driver vc4_txp_driver; 898 896 899 897 /* vc4_irq.c */ 900 - irqreturn_t vc4_irq(int irq, void *arg); 901 - void vc4_irq_preinstall(struct drm_device *dev); 902 - int vc4_irq_postinstall(struct drm_device *dev); 898 + void vc4_irq_enable(struct drm_device *dev); 899 + void vc4_irq_disable(struct drm_device *dev); 900 + int vc4_irq_install(struct drm_device *dev, int irq); 903 901 void vc4_irq_uninstall(struct drm_device *dev); 904 902 void vc4_irq_reset(struct drm_device *dev); 905 903
+38 -10
drivers/gpu/drm/vc4/vc4_irq.c
··· 45 45 * current job can make progress. 46 46 */ 47 47 48 + #include <linux/platform_device.h> 49 + 50 + #include <drm/drm_drv.h> 51 + 48 52 #include "vc4_drv.h" 49 53 #include "vc4_regs.h" 50 54 ··· 196 192 schedule_work(&vc4->job_done_work); 197 193 } 198 194 199 - irqreturn_t 195 + static irqreturn_t 200 196 vc4_irq(int irq, void *arg) 201 197 { 202 198 struct drm_device *dev = arg; ··· 238 234 return status; 239 235 } 240 236 241 - void 242 - vc4_irq_preinstall(struct drm_device *dev) 237 + static void 238 + vc4_irq_prepare(struct drm_device *dev) 243 239 { 244 240 struct vc4_dev *vc4 = to_vc4_dev(dev); 245 241 ··· 255 251 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 256 252 } 257 253 258 - int 259 - vc4_irq_postinstall(struct drm_device *dev) 254 + void 255 + vc4_irq_enable(struct drm_device *dev) 260 256 { 261 257 struct vc4_dev *vc4 = to_vc4_dev(dev); 262 258 263 259 if (!vc4->v3d) 264 - return 0; 260 + return; 265 261 266 262 /* Enable the render done interrupts. The out-of-memory interrupt is 267 263 * enabled as soon as we have a binner BO allocated. 268 264 */ 269 265 V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE); 270 - 271 - return 0; 272 266 } 273 267 274 268 void 275 - vc4_irq_uninstall(struct drm_device *dev) 269 + vc4_irq_disable(struct drm_device *dev) 276 270 { 277 271 struct vc4_dev *vc4 = to_vc4_dev(dev); 278 272 ··· 284 282 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 285 283 286 284 /* Finish any interrupt handler still in flight. */ 287 - disable_irq(dev->irq); 285 + disable_irq(vc4->irq); 288 286 289 287 cancel_work_sync(&vc4->overflow_mem_work); 288 + } 289 + 290 + int vc4_irq_install(struct drm_device *dev, int irq) 291 + { 292 + int ret; 293 + 294 + if (irq == IRQ_NOTCONNECTED) 295 + return -ENOTCONN; 296 + 297 + vc4_irq_prepare(dev); 298 + 299 + ret = request_irq(irq, vc4_irq, 0, dev->driver->name, dev); 300 + if (ret) 301 + return ret; 302 + 303 + vc4_irq_enable(dev); 304 + 305 + return 0; 306 + } 307 + 308 + void vc4_irq_uninstall(struct drm_device *dev) 309 + { 310 + struct vc4_dev *vc4 = to_vc4_dev(dev); 311 + 312 + vc4_irq_disable(dev); 313 + free_irq(vc4->irq, dev); 290 314 } 291 315 292 316 /** Reinitializes interrupt registers when a GPU reset is performed. */
+10 -7
drivers/gpu/drm/vc4/vc4_v3d.c
··· 10 10 #include <linux/platform_device.h> 11 11 #include <linux/pm_runtime.h> 12 12 13 - #include <drm/drm_irq.h> 14 - 15 13 #include "vc4_drv.h" 16 14 #include "vc4_regs.h" 17 15 ··· 359 361 struct vc4_v3d *v3d = dev_get_drvdata(dev); 360 362 struct vc4_dev *vc4 = v3d->vc4; 361 363 362 - vc4_irq_uninstall(&vc4->base); 364 + vc4_irq_disable(&vc4->base); 363 365 364 366 clk_disable_unprepare(v3d->clk); 365 367 ··· 379 381 vc4_v3d_init_hw(&vc4->base); 380 382 381 383 /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ 382 - enable_irq(vc4->base.irq); 383 - vc4_irq_postinstall(&vc4->base); 384 + enable_irq(vc4->irq); 385 + vc4_irq_enable(&vc4->base); 384 386 385 387 return 0; 386 388 } ··· 446 448 447 449 vc4_v3d_init_hw(drm); 448 450 449 - ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); 451 + ret = platform_get_irq(pdev, 0); 452 + if (ret < 0) 453 + return ret; 454 + vc4->irq = ret; 455 + 456 + ret = vc4_irq_install(drm, vc4->irq); 450 457 if (ret) { 451 458 DRM_ERROR("Failed to install IRQ handler\n"); 452 459 return ret; ··· 476 473 477 474 pm_runtime_disable(dev); 478 475 479 - drm_irq_uninstall(drm); 476 + vc4_irq_uninstall(drm); 480 477 481 478 /* Disable the binner's overflow memory address, so the next 482 479 * driver probe (if any) doesn't try to reuse our old
+1 -2
drivers/gpu/drm/via/via_mm.c
··· 29 29 30 30 #include <drm/drm_device.h> 31 31 #include <drm/drm_file.h> 32 - #include <drm/drm_irq.h> 33 32 #include <drm/via_drm.h> 34 33 35 34 #include "via_drv.h" ··· 85 86 /* Last context, perform cleanup */ 86 87 if (list_is_singular(&dev->ctxlist)) { 87 88 DRM_DEBUG("Last Context\n"); 88 - drm_irq_uninstall(dev); 89 + drm_legacy_irq_uninstall(dev); 89 90 via_cleanup_futex(dev_priv); 90 91 via_do_cleanup_map(dev); 91 92 }
+2
drivers/gpu/drm/virtio/virtgpu_prime.c
··· 98 98 } else { 99 99 bo->uuid_state = STATE_ERR; 100 100 } 101 + } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) { 102 + bo->uuid_state = STATE_ERR; 101 103 } 102 104 103 105 exp_info.ops = &virtgpu_dmabuf_ops.ops;
+1 -1
drivers/gpu/drm/vkms/vkms_composer.c
··· 257 257 return; 258 258 259 259 if (wb_pending) 260 - vaddr_out = crtc_state->active_writeback->map[0].vaddr; 260 + vaddr_out = crtc_state->active_writeback->data[0].vaddr; 261 261 262 262 ret = compose_active_planes(&vaddr_out, primary_composer, 263 263 crtc_state);
+1
drivers/gpu/drm/vkms/vkms_drv.h
··· 22 22 23 23 struct vkms_writeback_job { 24 24 struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; 25 + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; 25 26 }; 26 27 27 28 struct vkms_composer {
+1 -1
drivers/gpu/drm/vkms/vkms_plane.c
··· 111 111 memcpy(&composer->src, &new_state->src, sizeof(struct drm_rect)); 112 112 memcpy(&composer->dst, &new_state->dst, sizeof(struct drm_rect)); 113 113 memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer)); 114 - memcpy(&composer->map, &shadow_plane_state->map, sizeof(composer->map)); 114 + memcpy(&composer->map, &shadow_plane_state->data, sizeof(composer->map)); 115 115 drm_framebuffer_get(&composer->fb); 116 116 composer->offset = fb->offsets[0]; 117 117 composer->pitch = fb->pitches[0];
+1 -1
drivers/gpu/drm/vkms/vkms_writeback.c
··· 75 75 if (!vkmsjob) 76 76 return -ENOMEM; 77 77 78 - ret = drm_gem_fb_vmap(job->fb, vkmsjob->map); 78 + ret = drm_gem_fb_vmap(job->fb, vkmsjob->map, vkmsjob->data); 79 79 if (ret) { 80 80 DRM_ERROR("vmap failed: %d\n", ret); 81 81 goto err_kfree;
+6 -7
drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
··· 97 97 } MKSGuestStatInfoEntry; 98 98 99 99 #define INVALID_PPN64 ((PPN64)0x000fffffffffffffULL) 100 - #define vmw_num_pages(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 101 100 102 101 #define MKS_GUEST_STAT_INSTANCE_DESC_LENGTH 1024 103 102 #define MKS_GUEST_STAT_INSTANCE_MAX_STATS 4096 104 - #define MKS_GUEST_STAT_INSTANCE_MAX_STAT_PPNS \ 105 - (vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ 103 + #define MKS_GUEST_STAT_INSTANCE_MAX_STAT_PPNS \ 104 + (PFN_UP(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ 106 105 sizeof(MKSGuestStatCounterTime))) 107 - #define MKS_GUEST_STAT_INSTANCE_MAX_INFO_PPNS \ 108 - (vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ 106 + #define MKS_GUEST_STAT_INSTANCE_MAX_INFO_PPNS \ 107 + (PFN_UP(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ 109 108 sizeof(MKSGuestStatInfoEntry))) 110 109 #define MKS_GUEST_STAT_AVERAGE_NAME_LENGTH 40 111 - #define MKS_GUEST_STAT_INSTANCE_MAX_STRS_PPNS \ 112 - (vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ 110 + #define MKS_GUEST_STAT_INSTANCE_MAX_STRS_PPNS \ 111 + (PFN_UP(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ 113 112 MKS_GUEST_STAT_AVERAGE_NAME_LENGTH)) 114 113 115 114 /*
+2 -3
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 405 405 bool user) 406 406 { 407 407 static size_t struct_size, user_struct_size; 408 - size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 408 + size_t num_pages = PFN_UP(size); 409 409 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); 410 410 411 411 if (unlikely(struct_size == 0)) { ··· 474 474 struct ttm_placement *placement, 475 475 struct ttm_buffer_object **p_bo) 476 476 { 477 - unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT; 478 477 struct ttm_operation_ctx ctx = { false, false }; 479 478 struct ttm_buffer_object *bo; 480 479 size_t acc_size; ··· 484 485 return -ENOMEM; 485 486 486 487 acc_size = ttm_round_pot(sizeof(*bo)); 487 - acc_size += ttm_round_pot(npages * sizeof(void *)); 488 + acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *)); 488 489 acc_size += ttm_round_pot(sizeof(struct ttm_tt)); 489 490 490 491 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+2 -3
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 358 358 break; 359 359 } 360 360 361 - list_del(&entry->list); 362 - list_add_tail(&entry->list, &ctx->hw_submitted); 361 + list_move_tail(&entry->list, &ctx->hw_submitted); 363 362 ctx->num_hw_submitted++; 364 363 } 365 364 ··· 801 802 { 802 803 struct vmw_cmdbuf_alloc_info info; 803 804 804 - info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; 805 + info.page_size = PFN_UP(size); 805 806 info.node = node; 806 807 info.done = false; 807 808
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
··· 169 169 case VMW_CMDBUF_RES_DEL: 170 170 ret = drm_ht_insert_item(&entry->man->resources, &entry->hash); 171 171 BUG_ON(ret); 172 - list_del(&entry->head); 173 - list_add_tail(&entry->head, &entry->man->list); 172 + list_move_tail(&entry->head, &entry->man->list); 174 173 entry->state = VMW_CMDBUF_RES_COMMITTED; 175 174 break; 176 175 default:
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 607 607 if (num_entries < co_info[type].min_initial_entries) { 608 608 vcotbl->res.backup_size = co_info[type].min_initial_entries * 609 609 co_info[type].size; 610 - vcotbl->res.backup_size = 611 - (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK; 610 + vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size); 612 611 } 613 612 614 613 vcotbl->scrubbed = true;
-1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 1295 1295 vmw_context_res_man(struct vmw_resource *ctx); 1296 1296 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 1297 1297 SVGACOTableType cotable_type); 1298 - extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1299 1298 struct vmw_ctx_binding_state; 1300 1299 extern struct vmw_ctx_binding_state * 1301 1300 vmw_context_binding_state(struct vmw_resource *ctx);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 100 100 int ret; 101 101 102 102 kmap_offset = 0; 103 - kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 103 + kmap_num = PFN_UP(width*height*4); 104 104 105 105 ret = ttm_bo_reserve(&bo->base, true, false, NULL); 106 106 if (unlikely(ret != 0)) {
+2 -3
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 256 256 if (!otables[i].enabled) 257 257 continue; 258 258 259 - otables[i].size = 260 - (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 259 + otables[i].size = PFN_ALIGN(otables[i].size); 261 260 bo_size += otables[i].size; 262 261 } 263 262 ··· 384 385 while (likely(data_size > PAGE_SIZE)) { 385 386 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); 386 387 data_size *= VMW_PPN_SIZE; 387 - tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; 388 + tot_size += PFN_ALIGN(data_size); 388 389 } 389 390 390 391 return tot_size >> PAGE_SHIFT;
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
··· 1016 1016 1017 1017 struct page *page; 1018 1018 MKSGuestStatInstanceDescriptor *pdesc; 1019 - const size_t num_pages_stat = vmw_num_pages(arg->stat_len); 1020 - const size_t num_pages_info = vmw_num_pages(arg->info_len); 1021 - const size_t num_pages_strs = vmw_num_pages(arg->strs_len); 1019 + const size_t num_pages_stat = PFN_UP(arg->stat_len); 1020 + const size_t num_pages_info = PFN_UP(arg->info_len); 1021 + const size_t num_pages_strs = PFN_UP(arg->strs_len); 1022 1022 long desc_len; 1023 1023 long nr_pinned_stat; 1024 1024 long nr_pinned_info;
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 353 353 static int vmw_resource_buf_alloc(struct vmw_resource *res, 354 354 bool interruptible) 355 355 { 356 - unsigned long size = 357 - (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; 356 + unsigned long size = PFN_ALIGN(res->backup_size); 358 357 struct vmw_buffer_object *backup; 359 358 int ret; 360 359
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 981 981 goto no_reserve; 982 982 983 983 /* Map and copy shader bytecode. */ 984 - ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, 985 - &map); 984 + ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map); 986 985 if (unlikely(ret != 0)) { 987 986 ttm_bo_unreserve(&buf->base); 988 987 goto no_reserve;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 865 865 user_srf->prime.base.shareable = false; 866 866 user_srf->prime.base.tfile = NULL; 867 867 if (drm_is_primary_client(file_priv)) 868 - user_srf->master = drm_master_get(file_priv->master); 868 + user_srf->master = drm_file_get_master(file_priv); 869 869 870 870 /** 871 871 * From this point, the generic resource management functions ··· 1534 1534 1535 1535 user_srf = container_of(srf, struct vmw_user_surface, srf); 1536 1536 if (drm_is_primary_client(file_priv)) 1537 - user_srf->master = drm_master_get(file_priv->master); 1537 + user_srf->master = drm_file_get_master(file_priv); 1538 1538 1539 1539 res = &user_srf->srf.res; 1540 1540
+4 -14
include/drm/drm_device.h
··· 192 192 struct list_head clientlist; 193 193 194 194 /** 195 - * @irq_enabled: 196 - * 197 - * Indicates that interrupt handling is enabled, specifically vblank 198 - * handling. Drivers which don't use drm_irq_install() need to set this 199 - * to true manually. 200 - */ 201 - bool irq_enabled; 202 - 203 - /** 204 - * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers. 205 - */ 206 - int irq; 207 - 208 - /** 209 195 * @vblank_disable_immediate: 210 196 * 211 197 * If true, vblank interrupt will be disabled immediately when the ··· 358 372 359 373 /* Scatter gather memory */ 360 374 struct drm_sg_mem *sg; 375 + 376 + /* IRQs */ 377 + bool irq_enabled; 378 + int irq; 361 379 #endif 362 380 }; 363 381
+4 -40
include/drm/drm_drv.h
··· 137 137 * @DRIVER_HAVE_IRQ: 138 138 * 139 139 * Legacy irq support. Only for legacy drivers. Do not use. 140 - * 141 - * New drivers can either use the drm_irq_install() and 142 - * drm_irq_uninstall() helper functions, or roll their own irq support 143 - * code by calling request_irq() directly. 144 140 */ 145 141 DRIVER_HAVE_IRQ = BIT(30), 146 142 /** ··· 266 270 * managed resources functions. 267 271 */ 268 272 void (*release) (struct drm_device *); 269 - 270 - /** 271 - * @irq_handler: 272 - * 273 - * Interrupt handler called when using drm_irq_install(). Not used by 274 - * drivers which implement their own interrupt handling. 275 - */ 276 - irqreturn_t(*irq_handler) (int irq, void *arg); 277 - 278 - /** 279 - * @irq_preinstall: 280 - * 281 - * Optional callback used by drm_irq_install() which is called before 282 - * the interrupt handler is registered. This should be used to clear out 283 - * any pending interrupts (from e.g. firmware based drives) and reset 284 - * the interrupt handling registers. 285 - */ 286 - void (*irq_preinstall) (struct drm_device *dev); 287 - 288 - /** 289 - * @irq_postinstall: 290 - * 291 - * Optional callback used by drm_irq_install() which is called after 292 - * the interrupt handler is registered. This should be used to enable 293 - * interrupt generation in the hardware. 294 - */ 295 - int (*irq_postinstall) (struct drm_device *dev); 296 - 297 - /** 298 - * @irq_uninstall: 299 - * 300 - * Optional callback used by drm_irq_uninstall() which is called before 301 - * the interrupt handler is unregistered. This should be used to disable 302 - * interrupt generation in the hardware. 303 - */ 304 - void (*irq_uninstall) (struct drm_device *dev); 305 273 306 274 /** 307 275 * @master_set: ··· 464 504 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 465 505 int (*dma_quiescent) (struct drm_device *); 466 506 int (*context_dtor) (struct drm_device *dev, int context); 507 + irqreturn_t (*irq_handler)(int irq, void *arg); 508 + void (*irq_preinstall)(struct drm_device *dev); 509 + int (*irq_postinstall)(struct drm_device *dev); 510 + void (*irq_uninstall)(struct drm_device *dev); 467 511 u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); 468 512 int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); 469 513 void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
+1 -1
include/drm/drm_edid.h
··· 336 336 u8 features; 337 337 /* Color characteristics */ 338 338 u8 red_green_lo; 339 - u8 black_white_lo; 339 + u8 blue_white_lo; 340 340 u8 red_x; 341 341 u8 red_y; 342 342 u8 green_x;
+4
include/drm/drm_file.h
··· 233 233 * this only matches &drm_device.master if the master is the currently 234 234 * active one. 235 235 * 236 + * To update @master, both &drm_device.master_mutex and 237 + * @master_lookup_lock need to be held, therefore holding either of 238 + * them is safe and enough for the read side. 239 + * 236 240 * When dereferencing this pointer, either hold struct 237 241 * &drm_device.master_mutex for the duration of the pointer's use, or 238 242 * use drm_file_get_master() if struct &drm_device.master_mutex is not
+8
include/drm/drm_gem_atomic_helper.h
··· 42 42 * prepare_fb callback and removed in the cleanup_fb callback. 43 43 */ 44 44 struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; 45 + 46 + /** 47 + * @data: Address of each framebuffer BO's data 48 + * 49 + * The address of the data stored in each mapping. This is different 50 + * for framebuffers with non-zero offset fields. 51 + */ 52 + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; 45 53 }; 46 54 47 55 /**
+2 -1
include/drm/drm_gem_framebuffer_helper.h
··· 40 40 const struct drm_mode_fb_cmd2 *mode_cmd); 41 41 42 42 int drm_gem_fb_vmap(struct drm_framebuffer *fb, 43 - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]); 43 + struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], 44 + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]); 44 45 void drm_gem_fb_vunmap(struct drm_framebuffer *fb, 45 46 struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]); 46 47 int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
-32
include/drm/drm_irq.h
··· 1 - /* 2 - * Copyright 2016 Intel Corp. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 - * OTHER DEALINGS IN THE SOFTWARE. 22 - */ 23 - 24 - #ifndef _DRM_IRQ_H_ 25 - #define _DRM_IRQ_H_ 26 - 27 - struct drm_device; 28 - 29 - int drm_irq_install(struct drm_device *dev, int irq); 30 - int drm_irq_uninstall(struct drm_device *dev); 31 - int devm_drm_irq_install(struct drm_device *dev, int irq); 32 - #endif
+3
include/drm/drm_legacy.h
··· 192 192 void drm_legacy_idlelock_take(struct drm_lock_data *lock); 193 193 void drm_legacy_idlelock_release(struct drm_lock_data *lock); 194 194 195 + /* drm_irq.c */ 196 + int drm_legacy_irq_uninstall(struct drm_device *dev); 197 + 195 198 /* drm_pci.c */ 196 199 197 200 #ifdef CONFIG_PCI
+27
include/drm/drm_simple_kms_helper.h
··· 154 154 void (*disable_vblank)(struct drm_simple_display_pipe *pipe); 155 155 156 156 /** 157 + * @reset_crtc: 158 + * 159 + * Optional, called by &drm_crtc_funcs.reset. Please read the 160 + * documentation for the &drm_crtc_funcs.reset hook for more details. 161 + */ 162 + void (*reset_crtc)(struct drm_simple_display_pipe *pipe); 163 + 164 + /** 165 + * @duplicate_crtc_state: 166 + * 167 + * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please 168 + * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state 169 + * hook for more details. 170 + */ 171 + struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe); 172 + 173 + /** 174 + * @destroy_crtc_state: 175 + * 176 + * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please 177 + * read the documentation for the &drm_crtc_funcs.atomic_destroy_state 178 + * hook for more details. 179 + */ 180 + void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe, 181 + struct drm_crtc_state *crtc_state); 182 + 183 + /** 157 184 * @reset_plane: 158 185 * 159 186 * Optional, called by &drm_plane_funcs.reset. Please read the
+5 -5
include/linux/dma-buf.h
··· 54 54 * device), and otherwise need to fail the attach operation. 55 55 * 56 56 * The exporter should also in general check whether the current 57 - * allocation fullfills the DMA constraints of the new device. If this 57 + * allocation fulfills the DMA constraints of the new device. If this 58 58 * is not the case, and the allocation cannot be moved, it should also 59 59 * fail the attach operation. 60 60 * ··· 161 161 * 162 162 * Returns: 163 163 * 164 - * A &sg_table scatter list of or the backing storage of the DMA buffer, 164 + * A &sg_table scatter list of the backing storage of the DMA buffer, 165 165 * already mapped into the device address space of the &device attached 166 166 * with the provided &dma_buf_attachment. The addresses and lengths in 167 167 * the scatter list are PAGE_SIZE aligned. ··· 183 183 * 184 184 * This is called by dma_buf_unmap_attachment() and should unmap and 185 185 * release the &sg_table allocated in @map_dma_buf, and it is mandatory. 186 - * For static dma_buf handling this might also unpins the backing 186 + * For static dma_buf handling this might also unpin the backing 187 187 * storage if this is the last mapping of the DMA buffer. 188 188 */ 189 189 void (*unmap_dma_buf)(struct dma_buf_attachment *, ··· 252 252 * This callback is used by the dma_buf_mmap() function 253 253 * 254 254 * Note that the mapping needs to be incoherent, userspace is expected 255 - * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. 255 + * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface. 256 256 * 257 257 * Because dma-buf buffers have invariant size over their lifetime, the 258 258 * dma-buf core checks whether a vma is too large and rejects such ··· 580 580 581 581 /** 582 582 * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic 583 - * mappinsg 583 + * mappings 584 584 * @attach: the DMA-buf attachment to check 585 585 * 586 586 * Returns true if a DMA-buf importer wants to call the map/unmap functions with
+21 -20
include/linux/lockdep.h
··· 306 306 307 307 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 308 308 309 - #define lockdep_assert_held(l) do { \ 310 - WARN_ON(debug_locks && \ 311 - lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ 312 - } while (0) 309 + #define lockdep_assert(cond) \ 310 + do { WARN_ON(debug_locks && !(cond)); } while (0) 313 311 314 - #define lockdep_assert_not_held(l) do { \ 315 - WARN_ON(debug_locks && \ 316 - lockdep_is_held(l) == LOCK_STATE_HELD); \ 317 - } while (0) 312 + #define lockdep_assert_once(cond) \ 313 + do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) 318 314 319 - #define lockdep_assert_held_write(l) do { \ 320 - WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 321 - } while (0) 315 + #define lockdep_assert_held(l) \ 316 + lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) 322 317 323 - #define lockdep_assert_held_read(l) do { \ 324 - WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ 325 - } while (0) 318 + #define lockdep_assert_not_held(l) \ 319 + lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) 326 320 327 - #define lockdep_assert_held_once(l) do { \ 328 - WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ 329 - } while (0) 321 + #define lockdep_assert_held_write(l) \ 322 + lockdep_assert(lockdep_is_held_type(l, 0)) 330 323 331 - #define lockdep_assert_none_held_once() do { \ 332 - WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ 333 - } while (0) 324 + #define lockdep_assert_held_read(l) \ 325 + lockdep_assert(lockdep_is_held_type(l, 1)) 326 + 327 + #define lockdep_assert_held_once(l) \ 328 + lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) 329 + 330 + #define lockdep_assert_none_held_once() \ 331 + lockdep_assert_once(!current->lockdep_depth) 334 332 335 333 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 336 334 ··· 404 406 extern int lock_is_held(const void *); 405 407 extern int lockdep_is_held(const void *); 406 408 #define lockdep_is_held_type(l, r) (1) 409 + 410 + #define lockdep_assert(c) do { } while (0) 411 + #define lockdep_assert_once(c) do { } while (0) 407 412 408 413 #define lockdep_assert_held(l) do { (void)(l); } while (0) 409 414 #define lockdep_assert_not_held(l) do { (void)(l); } while (0)