Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2024-06-20' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for 6.11:

UAPI Changes:
- New monochrome TV mode variant

Cross-subsystem Changes:
- dma heaps: Change slightly the allocation hook prototype

Core Changes:

Driver Changes:
- ivpu: various improvements over firmware handling, clocks, power
management, scheduling and logging.
- mgag200: Add BMC output, enable polling
- panfrost: Enable MT8188 support
- tidss: drm_panic support
- zynqmp_dp: IRQ cleanups, debugfs DP compliance testing API
- bridge:
- sii902x: state validation improvements
- panel:
- edp: Drop legacy panel compatibles
- simple-bridge: Switch to devm_drm_bridge_add

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240620-heretic-honored-macaque-b40f8a@houat

+1227 -827
-10
Documentation/devicetree/bindings/display/panel/panel-edp-legacy.yaml
··· 31 31 # AUO B116XAK01 eDP TFT LCD panel 32 32 - auo,b116xa01 33 33 # AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel 34 - - auo,b133han05 35 - # AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel 36 34 - auo,b133htn01 37 35 # AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel 38 36 - auo,b133xtn01 39 - # AU Optronics Corporation 14.0" FHD (1920x1080) color TFT-LCD panel 40 - - auo,b140han06 41 37 # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel 42 38 - boe,nv101wxmn51 43 39 # BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel ··· 52 56 - innolux,n125hce-gn1 53 57 # Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel 54 58 - innolux,p120zdg-bf1 55 - # InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel 56 - - ivo,m133nwf4-r0 57 59 # King & Display KD116N21-30NV-A010 eDP TFT LCD panel 58 60 - kingdisplay,kd116n21-30nv-a010 59 61 # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel ··· 72 78 - sharp,ld-d5116z01b 73 79 # Sharp 12.3" (2400x1600 pixels) TFT LCD panel 74 80 - sharp,lq123p1jx31 75 - # Sharp 14" (1920x1080 pixels) TFT LCD panel 76 - - sharp,lq140m1jw46 77 - # Starry 12.2" (1920x1200 pixels) TFT LCD panel 78 - - starry,kr122ea0sra 79 81 80 82 backlight: true 81 83 ddc-i2c-bus: true
+4 -1
Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
··· 34 34 - const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable 35 35 - items: 36 36 - enum: 37 + - mediatek,mt8188-mali 37 38 - mediatek,mt8192-mali 38 39 - const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable 39 40 ··· 196 195 properties: 197 196 compatible: 198 197 contains: 199 - const: mediatek,mt8183b-mali 198 + enum: 199 + - mediatek,mt8183b-mali 200 + - mediatek,mt8188-mali 200 201 then: 201 202 properties: 202 203 power-domains:
+1
MAINTAINERS
··· 7196 7196 S: Maintained 7197 7197 T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 7198 7198 F: Documentation/gpu/vkms.rst 7199 + F: drivers/gpu/drm/ci/xfails/vkms* 7199 7200 F: drivers/gpu/drm/vkms/ 7200 7201 7201 7202 DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
+37 -2
drivers/accel/ivpu/ivpu_debugfs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #include <linux/debugfs.h> ··· 381 381 .write = ivpu_resume_engine_fn, 382 382 }; 383 383 384 + static int dct_active_get(void *data, u64 *active_percent) 385 + { 386 + struct ivpu_device *vdev = data; 387 + 388 + *active_percent = vdev->pm->dct_active_percent; 389 + 390 + return 0; 391 + } 392 + 393 + static int dct_active_set(void *data, u64 active_percent) 394 + { 395 + struct ivpu_device *vdev = data; 396 + int ret; 397 + 398 + if (active_percent > 100) 399 + return -EINVAL; 400 + 401 + ret = ivpu_rpm_get(vdev); 402 + if (ret) 403 + return ret; 404 + 405 + if (active_percent) 406 + ret = ivpu_pm_dct_enable(vdev, active_percent); 407 + else 408 + ret = ivpu_pm_dct_disable(vdev); 409 + 410 + ivpu_rpm_put(vdev); 411 + 412 + return ret; 413 + } 414 + 415 + DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); 416 + 384 417 void ivpu_debugfs_init(struct ivpu_device *vdev) 385 418 { 386 419 struct dentry *debugfs_root = vdev->drm.debugfs_root; ··· 442 409 debugfs_create_file("resume_engine", 0200, debugfs_root, vdev, 443 410 &ivpu_resume_engine_fops); 444 411 445 - if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) 412 + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) { 446 413 debugfs_create_file("fw_profiling_freq_drive", 0200, 447 414 debugfs_root, vdev, &fw_profiling_freq_fops); 415 + debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops); 416 + } 448 417 }
+36 -5
drivers/accel/ivpu/ivpu_drv.c
··· 58 58 MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler"); 59 59 60 60 bool ivpu_disable_mmu_cont_pages; 61 - module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644); 61 + module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); 62 62 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization"); 63 63 64 64 bool ivpu_force_snoop; 65 - module_param_named(force_snoop, ivpu_force_snoop, bool, 0644); 65 + module_param_named(force_snoop, ivpu_force_snoop, bool, 0444); 66 66 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access"); 67 67 68 68 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) ··· 391 391 ivpu_hw_irq_enable(vdev); 392 392 ivpu_ipc_enable(vdev); 393 393 394 - if (ivpu_fw_is_cold_boot(vdev)) 394 + if (ivpu_fw_is_cold_boot(vdev)) { 395 + ret = ivpu_pm_dct_init(vdev); 396 + if (ret) 397 + return ret; 398 + 395 399 return ivpu_hw_sched_init(vdev); 400 + } 396 401 397 402 return 0; 398 403 } ··· 451 446 .minor = DRM_IVPU_DRIVER_MINOR, 452 447 }; 453 448 449 + static void ivpu_context_abort_invalid(struct ivpu_device *vdev) 450 + { 451 + struct ivpu_file_priv *file_priv; 452 + unsigned long ctx_id; 453 + 454 + mutex_lock(&vdev->context_list_lock); 455 + 456 + xa_for_each(&vdev->context_xa, ctx_id, file_priv) { 457 + if (!file_priv->has_mmu_faults || file_priv->aborted) 458 + continue; 459 + 460 + mutex_lock(&file_priv->lock); 461 + ivpu_context_abort_locked(file_priv); 462 + file_priv->aborted = true; 463 + mutex_unlock(&file_priv->lock); 464 + } 465 + 466 + mutex_unlock(&vdev->context_list_lock); 467 + } 468 + 454 469 static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg) 455 470 { 456 471 struct ivpu_device *vdev = arg; ··· 483 458 switch (irq_src) { 484 459 case IVPU_HW_IRQ_SRC_IPC: 485 460 ivpu_ipc_irq_thread_handler(vdev); 461 + break; 462 + case IVPU_HW_IRQ_SRC_MMU_EVTQ: 463 + ivpu_context_abort_invalid(vdev); 464 + break; 465 + case IVPU_HW_IRQ_SRC_DCT: 466 + ivpu_pm_dct_irq_thread_handler(vdev); 486 467 break; 487 468 default: 488 469 ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src); ··· 695 664 696 665 static void ivpu_dev_fini(struct ivpu_device *vdev) 697 666 { 667 + ivpu_jobs_abort_all(vdev); 668 + ivpu_pm_cancel_recovery(vdev); 698 669 ivpu_pm_disable(vdev); 699 670 ivpu_prepare_for_reset(vdev); 700 671 ivpu_shutdown(vdev); 701 672 702 673 ivpu_ms_cleanup_all(vdev); 703 - ivpu_jobs_abort_all(vdev); 704 674 ivpu_job_done_consumer_fini(vdev); 705 - ivpu_pm_cancel_recovery(vdev); 706 675 ivpu_bo_unbind_all_user_contexts(vdev); 707 676 708 677 ivpu_ipc_fini(vdev);
+5 -1
drivers/accel/ivpu/ivpu_drv.h
··· 32 32 #define IVPU_HW_IP_50XX 50 33 33 #define IVPU_HW_IP_60XX 60 34 34 35 + #define IVPU_HW_IP_REV_LNL_B0 4 36 + 35 37 #define IVPU_HW_BTRS_MTL 1 36 38 #define IVPU_HW_BTRS_LNL 2 37 39 ··· 104 102 bool interrupt_clear_with_0; 105 103 bool disable_clock_relinquish; 106 104 bool disable_d0i3_msg; 105 + bool wp0_during_power_up; 107 106 }; 108 107 109 108 struct ivpu_hw_info; ··· 150 147 int boot; 151 148 int jsm; 152 149 int tdr; 153 - int reschedule_suspend; 154 150 int autosuspend; 155 151 int d0i3_entry_msg; 156 152 } timeout; ··· 170 168 struct ivpu_bo *ms_info_bo; 171 169 bool has_mmu_faults; 172 170 bool bound; 171 + bool aborted; 173 172 }; 174 173 175 174 extern int ivpu_dbg_mask; ··· 187 184 #define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5) 188 185 #define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6) 189 186 #define IVPU_TEST_MODE_HWS_EXTRA_EVENTS BIT(7) 187 + #define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8) 190 188 extern int ivpu_test_mode; 191 189 192 190 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
+30 -1
drivers/accel/ivpu/ivpu_fw.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #include <linux/firmware.h> ··· 123 123 return false; 124 124 } 125 125 126 + static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size) 127 + { 128 + if (addr < range_start || addr + size > range_start + range_size) 129 + return false; 130 + 131 + return true; 132 + } 133 + 126 134 static int ivpu_fw_parse(struct ivpu_device *vdev) 127 135 { 128 136 struct ivpu_fw_info *fw = vdev->fw; ··· 213 205 fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; 214 206 fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; 215 207 208 + if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, 209 + fw_hdr->ro_section_size, 210 + fw_hdr->image_load_address, 211 + fw_hdr->image_size)) { 212 + ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n", 213 + fw_hdr->ro_section_start_address, fw_hdr->ro_section_size); 214 + return -EINVAL; 215 + } 216 + 217 + fw->read_only_addr = fw_hdr->ro_section_start_address; 218 + fw->read_only_size = fw_hdr->ro_section_size; 219 + 216 220 ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", 217 221 fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); 218 222 ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", 219 223 fw->runtime_addr, image_load_addr, fw->entry_point); 224 + ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n", 225 + fw->read_only_addr, fw->read_only_size); 220 226 221 227 return 0; 222 228 } ··· 290 268 if (!fw->mem) { 291 269 ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n"); 292 270 return -ENOMEM; 271 + } 272 + 273 + ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr, 274 + fw->read_only_size); 275 + if (ret) { 276 + ivpu_err(vdev, "Failed to set firmware image read-only\n"); 277 + goto err_free_fw_mem; 293 278 } 294 279 295 280 fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
+2
drivers/accel/ivpu/ivpu_fw.h
··· 30 30 u32 dvfs_mode; 31 31 u32 primary_preempt_buf_size; 32 32 u32 secondary_preempt_buf_size; 33 + u64 read_only_addr; 34 + u32 read_only_size; 33 35 }; 34 36 35 37 int ivpu_fw_init(struct ivpu_device *vdev);
+24 -6
drivers/accel/ivpu/ivpu_hw.c
··· 61 61 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 62 62 vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev); 63 63 64 - if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL) 64 + if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL && 65 + ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0) 65 66 vdev->wa.disable_clock_relinquish = true; 67 + 68 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 69 + vdev->wa.wp0_during_power_up = true; 66 70 67 71 IVPU_PRINT_WA(punit_disabled); 68 72 IVPU_PRINT_WA(clear_runtime_mem); 69 73 IVPU_PRINT_WA(interrupt_clear_with_0); 70 74 IVPU_PRINT_WA(disable_clock_relinquish); 75 + IVPU_PRINT_WA(wp0_during_power_up); 71 76 } 72 77 73 78 static void timeouts_init(struct ivpu_device *vdev) 74 79 { 75 - if (ivpu_is_fpga(vdev)) { 80 + if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) { 81 + vdev->timeout.boot = -1; 82 + vdev->timeout.jsm = -1; 83 + vdev->timeout.tdr = -1; 84 + vdev->timeout.autosuspend = -1; 85 + vdev->timeout.d0i3_entry_msg = -1; 86 + } else if (ivpu_is_fpga(vdev)) { 76 87 vdev->timeout.boot = 100000; 77 88 vdev->timeout.jsm = 50000; 78 89 vdev->timeout.tdr = 2000000; 79 - vdev->timeout.reschedule_suspend = 1000; 80 90 vdev->timeout.autosuspend = -1; 81 91 vdev->timeout.d0i3_entry_msg = 500; 82 92 } else if (ivpu_is_simics(vdev)) { 83 93 vdev->timeout.boot = 50; 84 94 vdev->timeout.jsm = 500; 85 95 vdev->timeout.tdr = 10000; 86 - vdev->timeout.reschedule_suspend = 10; 87 96 vdev->timeout.autosuspend = -1; 88 97 vdev->timeout.d0i3_entry_msg = 100; 89 98 } else { 90 99 vdev->timeout.boot = 1000; 91 100 vdev->timeout.jsm = 500; 92 101 vdev->timeout.tdr = 2000; 93 - vdev->timeout.reschedule_suspend = 10; 94 - vdev->timeout.autosuspend = 10; 102 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 103 + vdev->timeout.autosuspend = 10; 104 + else 105 + vdev->timeout.autosuspend = 100; 95 106 vdev->timeout.d0i3_entry_msg = 5; 96 107 } 97 108 } ··· 135 124 int ivpu_hw_power_up(struct ivpu_device *vdev) 136 125 { 137 126 int ret; 127 + 128 + if (IVPU_WA(wp0_during_power_up)) { 129 + /* WP requests may fail when powering down, so issue WP 0 here */ 130 + ret = wp_disable(vdev); 131 + if (ret) 132 + ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret); 133 + } 138 134 139 135 ret = ivpu_hw_btrs_d0i3_disable(vdev); 140 136 if (ret)
+3 -1
drivers/accel/ivpu/ivpu_hw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020 - 2024 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_HW_H__ ··· 15 15 #define IVPU_HW_IRQ_FIFO_LENGTH 1024 16 16 17 17 #define IVPU_HW_IRQ_SRC_IPC 1 18 + #define IVPU_HW_IRQ_SRC_MMU_EVTQ 2 19 + #define IVPU_HW_IRQ_SRC_DCT 3 18 20 19 21 struct ivpu_addr_range { 20 22 resource_size_t start;
+34 -10
drivers/accel/ivpu/ivpu_hw_btrs.c
··· 504 504 int ret; 505 505 u32 val; 506 506 507 + ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev); 508 + 507 509 ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 508 510 if (ret) { 509 511 ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); ··· 643 641 if (!status) 644 642 return false; 645 643 646 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) 644 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) { 647 645 ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); 646 + if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT)) 647 + ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); 648 + } 648 649 649 650 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) 650 651 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ)); ··· 697 692 return true; 698 693 } 699 694 700 - static void dct_drive_40xx(struct ivpu_device *vdev, u32 dct_val) 695 + int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable) 701 696 { 702 - u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX); 697 + u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW); 698 + u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val); 699 + u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val); 703 700 704 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, CMD, DCT_REQ, val); 705 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, PARAM1, 706 - dct_val ? DCT_ENABLE : DCT_DISABLE, val); 707 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, PARAM2, dct_val, val); 701 + if (cmd != DCT_REQ) { 702 + ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd); 703 + return -EBADR; 704 + } 708 705 709 - REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX, val); 706 + switch (param1) { 707 + case DCT_ENABLE: 708 + *enable = true; 709 + return 0; 710 + case DCT_DISABLE: 711 + *enable = false; 712 + return 0; 713 + default: 714 + ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1); 715 + return -EINVAL; 716 + } 710 717 } 711 718 712 - void ivpu_hw_btrs_dct_drive(struct ivpu_device *vdev, u32 dct_val) 719 + void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) 713 720 { 714 - return dct_drive_40xx(vdev, dct_val); 721 + u32 val = 0; 722 + u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE; 723 + 724 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val); 725 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val); 726 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val); 727 + 728 + REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val); 715 729 } 716 730 717 731 static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
+5 -1
drivers/accel/ivpu/ivpu_hw_btrs.h
··· 15 15 #define PLL_PROFILING_FREQ_HIGH 400000000 16 16 #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) 17 17 18 + #define DCT_DEFAULT_ACTIVE_PERCENT 15u 19 + #define DCT_PERIOD_US 35300u 20 + 18 21 int ivpu_hw_btrs_info_init(struct ivpu_device *vdev); 19 22 void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev); 20 23 int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev); ··· 34 31 void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev); 35 32 bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); 36 33 bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); 37 - void ivpu_hw_btrs_dct_drive(struct ivpu_device *vdev, u32 dct_val); 34 + int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); 35 + void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent); 38 36 u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev); 39 37 u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio); 40 38 u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
+5 -5
drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
··· 44 44 #define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH 0x0000005cu 45 45 #define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR 0x00000060u 46 46 47 - #define VPU_HW_BTRS_LNL_PCODE_MAILBOX 0x00000070u 48 - #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_CMD_MASK GENMASK(7, 0) 49 - #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_PARAM1_MASK GENMASK(15, 8) 50 - #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_PARAM2_MASK GENMASK(23, 16) 51 - #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_PARAM3_MASK GENMASK(31, 24) 47 + #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS 0x00000070u 48 + #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_CMD_MASK GENMASK(7, 0) 49 + #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM1_MASK GENMASK(15, 8) 50 + #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM2_MASK GENMASK(23, 16) 51 + #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM3_MASK GENMASK(31, 24) 52 52 53 53 #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW 0x00000074u 54 54 #define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_CMD_MASK GENMASK(7, 0)
+1 -2
drivers/accel/ivpu/ivpu_ipc.c
··· 210 210 ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr); 211 211 } 212 212 213 - static int 214 - ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req) 213 + int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req) 215 214 { 216 215 struct ivpu_ipc_info *ipc = vdev->ipc; 217 216 int ret;
+3 -1
drivers/accel/ivpu/ivpu_ipc.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_IPC_H__ ··· 96 96 u32 channel, ivpu_ipc_rx_callback_t callback); 97 97 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons); 98 98 99 + int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, 100 + struct vpu_jsm_msg *req); 99 101 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, 100 102 struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg, 101 103 unsigned long timeout_ms);
+28 -1
drivers/accel/ivpu/ivpu_job.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #include <drm/drm_file.h> ··· 310 310 ivpu_cmdq_reset(file_priv); 311 311 312 312 mutex_unlock(&vdev->context_list_lock); 313 + } 314 + 315 + static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv) 316 + { 317 + u16 engine; 318 + u8 priority; 319 + 320 + for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) { 321 + for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) { 322 + int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); 323 + 324 + if (file_priv->cmdq[cmdq_idx]) 325 + ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]); 326 + } 327 + } 328 + } 329 + 330 + void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv) 331 + { 332 + struct ivpu_device *vdev = file_priv->vdev; 333 + 334 + lockdep_assert_held(&file_priv->lock); 335 + 336 + ivpu_cmdq_fini_all(file_priv); 337 + 338 + if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS) 339 + ivpu_jsm_context_release(vdev, file_priv->ctx.id); 313 340 } 314 341 315 342 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+3 -1
drivers/accel/ivpu/ivpu_job.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_JOB_H__ ··· 56 56 }; 57 57 58 58 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file); 59 + 60 + void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv); 59 61 60 62 void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv); 61 63 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+36 -16
drivers/accel/ivpu/ivpu_jsm_msg.c
··· 103 103 104 104 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp, 105 105 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 106 - if (ret) { 107 - ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret); 108 - return ret; 109 - } 106 + if (ret) 107 + ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret); 110 108 111 - ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id); 112 - 113 - return 0; 109 + return ret; 114 110 } 115 111 116 112 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id) ··· 119 123 120 124 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp, 121 125 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 122 - if (ret) { 123 - ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret); 124 - return ret; 125 - } 126 + if (ret) 127 + ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret); 126 128 127 - ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id); 128 - 129 - return 0; 129 + return ret; 130 130 } 131 131 132 132 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat) ··· 247 255 { 248 256 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE }; 249 257 struct vpu_jsm_msg resp; 258 + int ret; 250 259 251 260 req.payload.ssid_release.host_ssid = host_ssid; 252 261 253 - return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp, 254 - VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 262 + ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp, 263 + VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 264 + if (ret) 265 + ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret); 266 + 267 + return ret; 255 268 } 256 269 257 270 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev) ··· 534 537 *info_size = resp.payload.metric_streamer_done.bytes_written; 535 538 536 539 return ret; 540 + } 541 + 542 + int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us) 543 + { 544 + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE }; 545 + struct vpu_jsm_msg resp; 546 + 547 + req.payload.pwr_dct_control.dct_active_us = active_us; 548 + req.payload.pwr_dct_control.dct_inactive_us = inactive_us; 549 + 550 + return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, 551 + &resp, VPU_IPC_CHAN_ASYNC_CMD, 552 + vdev->timeout.jsm); 553 + } 554 + 555 + int ivpu_jsm_dct_disable(struct ivpu_device *vdev) 556 + { 557 + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE }; 558 + struct vpu_jsm_msg resp; 559 + 560 + return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, 561 + &resp, VPU_IPC_CHAN_ASYNC_CMD, 562 + vdev->timeout.jsm); 537 563 }
+2
drivers/accel/ivpu/ivpu_jsm_msg.h
··· 41 41 u64 buffer_addr, u64 buffer_size, u64 *bytes_written); 42 42 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr, 43 43 u64 buffer_size, u32 *sample_size, u64 *info_size); 44 + int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us); 45 + int ivpu_jsm_dct_disable(struct ivpu_device *vdev); 44 46 #endif
+7 -3
drivers/accel/ivpu/ivpu_mmu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #include <linux/circ_buf.h> ··· 878 878 u64 in_addr = ((u64)event[5]) << 32 | event[4]; 879 879 u32 sid = event[1]; 880 880 881 - ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n", 882 - op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr); 881 + ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n", 882 + op, ivpu_mmu_event_to_str(op), ssid, sid, 883 + event[2], event[3], in_addr, fetch_addr); 883 884 } 884 885 885 886 static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) ··· 916 915 ivpu_mmu_user_context_mark_invalid(vdev, ssid); 917 916 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); 918 917 } 918 + 919 + if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ)) 920 + ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); 919 921 } 920 922 921 923 void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+86
drivers/accel/ivpu/ivpu_mmu_context.c
··· 24 24 #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) 25 25 #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) 26 26 #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) 27 + #define IVPU_MMU_ENTRY_FLAG_RO BIT(7) 27 28 #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) 28 29 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) 29 30 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) ··· 316 315 dma_addr += map_size; 317 316 size -= map_size; 318 317 } 318 + 319 + return 0; 320 + } 321 + 322 + static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 323 + u64 vpu_addr) 324 + { 325 + int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 326 + int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 327 + int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 328 + int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 329 + 330 + ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO; 331 + } 332 + 333 + static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 334 + u64 vpu_addr) 335 + { 336 + int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 337 + int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 338 + int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 339 + int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 340 + 341 + ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT; 342 + } 343 + 344 + static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 345 + u64 vpu_addr) 346 + { 347 + u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); 348 + u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); 349 + u64 offset = 0; 350 + 351 + ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr); 352 + 353 + while (start + offset < end) { 354 + ivpu_mmu_context_split_page(vdev, ctx, start + offset); 355 + offset += IVPU_MMU_PAGE_SIZE; 356 + } 357 + } 358 + 359 + int 360 + ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, 361 + size_t size) 362 + { 363 + u64 end = vpu_addr + size; 364 + size_t size_left = size; 365 + int ret; 366 + 367 + if (size == 0) 368 + return 0; 369 + 370 + if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE))) 371 + return -EINVAL; 372 + 373 + mutex_lock(&ctx->lock); 374 + 375 + ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n", 376 + ctx->id, vpu_addr, size); 377 + 378 + if (!ivpu_disable_mmu_cont_pages) { 379 + /* Split 64K contiguous page at the beginning if needed */ 380 + if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE)) 381 + ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr); 382 + 383 + /* Split 64K contiguous page at the end if needed */ 384 + if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE)) 385 + ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size); 386 + } 387 + 388 + while (size_left) { 389 + if (vpu_addr < end) 390 + ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr); 391 + 392 + vpu_addr += IVPU_MMU_PAGE_SIZE; 393 + size_left -= IVPU_MMU_PAGE_SIZE; 394 + } 395 + 396 + /* Ensure page table modifications are flushed from wc buffers to memory */ 397 + wmb(); 398 + 399 + mutex_unlock(&ctx->lock); 400 + ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 401 + if (ret) 402 + ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 319 403 320 404 return 0; 321 405 }
+2
drivers/accel/ivpu/ivpu_mmu_context.h
··· 46 46 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); 47 47 void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 48 48 u64 vpu_addr, struct sg_table *sgt); 49 + int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 50 + u64 vpu_addr, size_t size); 49 51 50 52 #endif /* __IVPU_MMU_CONTEXT_H__ */
+77 -29
drivers/accel/ivpu/ivpu_pm.c
··· 237 237 { 238 238 struct drm_device *drm = dev_get_drvdata(dev); 239 239 struct ivpu_device *vdev = to_ivpu_device(drm); 240 - bool hw_is_idle = true; 241 - int ret; 240 + int ret, ret_d0i3; 241 + bool is_idle; 242 242 243 243 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 244 244 drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work)); 245 245 246 246 ivpu_dbg(vdev, PM, "Runtime suspend..\n"); 247 247 248 - if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { 249 - ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", 250 - vdev->pm->suspend_reschedule_counter); 251 - pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); 252 - vdev->pm->suspend_reschedule_counter--; 253 - return -EAGAIN; 254 - } 248 + ivpu_mmu_disable(vdev); 255 249 256 - if (!vdev->pm->suspend_reschedule_counter) 257 - hw_is_idle = false; 258 - else if (ivpu_jsm_pwr_d0i3_enter(vdev)) 259 - hw_is_idle = false; 250 + is_idle = ivpu_hw_is_idle(vdev) || vdev->pm->dct_active_percent; 251 + if (!is_idle) 252 + ivpu_err(vdev, "NPU is not idle before autosuspend\n"); 253 + 254 + ret_d0i3 = ivpu_jsm_pwr_d0i3_enter(vdev); 255 + if (ret_d0i3) 256 + ivpu_err(vdev, "Failed to prepare for d0i3: %d\n", ret_d0i3); 260 257 261 258 ret = ivpu_suspend(vdev); 262 259 if (ret) 263 260 ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret); 264 261 265 - if (!hw_is_idle) { 266 - ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n"); 262 + if (!is_idle || ret_d0i3) { 263 + ivpu_err(vdev, "Forcing cold boot due to previous errors\n"); 267 264 atomic_inc(&vdev->pm->reset_counter); 268 265 ivpu_fw_log_dump(vdev); 269 266 ivpu_pm_prepare_cold_boot(vdev); 270 267 } else { 271 268 ivpu_pm_prepare_warm_boot(vdev); 272 269 } 273 - 274 - vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; 275 270 276 271 ivpu_dbg(vdev, PM, "Runtime suspend done.\n"); 277 272 ··· 295 300 int ret; 296 301 297 302 ret = pm_runtime_resume_and_get(vdev->drm.dev); 298 - if (!drm_WARN_ON(&vdev->drm, ret < 0)) 299 - vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; 300 - 301 - return ret; 302 - } 303 - 304 - int ivpu_rpm_get_if_active(struct ivpu_device *vdev) 305 - { 306 - int ret; 307 - 308 - ret = pm_runtime_get_if_in_use(vdev->drm.dev); 309 303 drm_WARN_ON(&vdev->drm, ret < 0); 310 304 311 305 return ret; ··· 349 365 int delay; 350 366 351 367 pm->vdev = vdev; 352 - pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; 353 368 354 369 init_rwsem(&pm->reset_lock); 355 370 atomic_set(&pm->reset_pending, 0); ··· 388 405 { 389 406 pm_runtime_get_noresume(vdev->drm.dev); 390 407 pm_runtime_forbid(vdev->drm.dev); 408 + } 409 + 410 + int ivpu_pm_dct_init(struct ivpu_device *vdev) 411 + { 412 + if (vdev->pm->dct_active_percent) 413 + return ivpu_pm_dct_enable(vdev, vdev->pm->dct_active_percent); 414 + 415 + return 0; 416 + } 417 + 418 + int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent) 419 + { 420 + u32 active_us, inactive_us; 421 + int ret; 422 + 423 + if (active_percent == 0 || active_percent > 100) 424 + return -EINVAL; 425 + 426 + active_us = (DCT_PERIOD_US * active_percent) / 100; 427 + inactive_us = DCT_PERIOD_US - active_us; 428 + 429 + ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us); 430 + if (ret) { 431 + ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret); 432 + return ret; 433 + } 434 + 435 + vdev->pm->dct_active_percent = active_percent; 436 + 437 + ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n", 438 + active_percent, active_us, inactive_us); 439 + return 0; 440 + } 441 + 442 + int ivpu_pm_dct_disable(struct ivpu_device *vdev) 443 + { 444 + int ret; 445 + 446 + ret = ivpu_jsm_dct_disable(vdev); 447 + if (ret) { 448 + ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret); 449 + return ret; 450 + } 451 + 452 + vdev->pm->dct_active_percent = 0; 453 + 454 + ivpu_dbg(vdev, PM, "DCT disabled\n"); 455 + return 0; 456 + } 457 + 458 + void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev) 459 + { 460 + bool enable; 461 + int ret; 462 + 463 + if (ivpu_hw_btrs_dct_get_request(vdev, &enable)) 464 + return; 465 + 466 + if (vdev->pm->dct_active_percent) 467 + ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT); 468 + else 469 + ret = ivpu_pm_dct_disable(vdev); 470 + 471 + if (!ret) 472 + ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent); 391 473 }
+7 -3
drivers/accel/ivpu/ivpu_pm.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2023 Intel Corporation 3 + * Copyright (C) 2020-2024 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_PM_H__ ··· 19 19 atomic_t reset_counter; 20 20 atomic_t reset_pending; 21 21 bool is_warmboot; 22 - u32 suspend_reschedule_counter; 22 + u8 dct_active_percent; 23 23 }; 24 24 25 25 void ivpu_pm_init(struct ivpu_device *vdev); ··· 36 36 void ivpu_pm_reset_done_cb(struct pci_dev *pdev); 37 37 38 38 int __must_check ivpu_rpm_get(struct ivpu_device *vdev); 39 - int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev); 40 39 void ivpu_rpm_put(struct ivpu_device *vdev); 41 40 42 41 void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason); 43 42 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev); 44 43 void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev); 44 + 45 + int ivpu_pm_dct_init(struct ivpu_device *vdev); 46 + int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent); 47 + int ivpu_pm_dct_disable(struct ivpu_device *vdev); 48 + void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev); 45 49 46 50 #endif /* __IVPU_PM_H__ */
+14 -2
drivers/accel/ivpu/vpu_boot_api.h
··· 27 27 * Minor version changes when API backward compatibility is preserved. 28 28 * Resets to 0 if Major version is incremented. 29 29 */ 30 - #define VPU_BOOT_API_VER_MINOR 22 30 + #define VPU_BOOT_API_VER_MINOR 24 31 31 32 32 /* 33 33 * API header changed (field names, documentation, formatting) but API itself has not been changed ··· 80 80 u32 preemption_buffer_2_size; 81 81 /* Space reserved for future preemption-related fields. */ 82 82 u32 preemption_reserved[6]; 83 + /* FW image read only section start address, 4KB aligned */ 84 + u64 ro_section_start_address; 85 + /* FW image read only section size, 4KB aligned */ 86 + u32 ro_section_size; 87 + u32 reserved; 83 88 }; 84 89 85 90 /* ··· 338 333 * The KMD is required to update this value on every VPU reset. 339 334 */ 340 335 u64 system_time_us; 341 - u32 pad4[18]; 336 + u32 pad4[2]; 337 + /* 338 + * The delta between device monotonic time and the current value of the 339 + * HW timestamp register, in ticks. Written by the firmware during boot. 340 + * Can be used by the KMD to calculate device time. 341 + */ 342 + u64 device_time_delta_ticks; 343 + u32 pad7[14]; 342 344 /* Warm boot information: 0x400 - 0x43F */ 343 345 u32 warm_boot_sections_count; 344 346 u32 warm_boot_start_address_reference;
+2 -2
drivers/dma-buf/heaps/cma_heap.c
··· 274 274 275 275 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, 276 276 unsigned long len, 277 - unsigned long fd_flags, 278 - unsigned long heap_flags) 277 + u32 fd_flags, 278 + u64 heap_flags) 279 279 { 280 280 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); 281 281 struct cma_heap_buffer *buffer;
+2 -2
drivers/dma-buf/heaps/system_heap.c
··· 333 333 334 334 static struct dma_buf *system_heap_allocate(struct dma_heap *heap, 335 335 unsigned long len, 336 - unsigned long fd_flags, 337 - unsigned long heap_flags) 336 + u32 fd_flags, 337 + u64 heap_flags) 338 338 { 339 339 struct system_heap_buffer *buffer; 340 340 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+1 -1
drivers/gpu/drm/Kconfig
··· 107 107 108 108 config DRM_PANIC 109 109 bool "Display a user-friendly message when a kernel panic occurs" 110 - depends on DRM && !FRAMEBUFFER_CONSOLE 110 + depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE) 111 111 select DRM_KMS_HELPER 112 112 select FONT_SUPPORT 113 113 help
+33 -13
drivers/gpu/drm/bridge/sii902x.c
··· 163 163 164 164 #define SII902X_AUDIO_PORT_INDEX 3 165 165 166 + /* 167 + * The maximum resolution supported by the HDMI bridge is 1080p@60Hz 168 + * and 1920x1200 requiring a pixel clock of 165MHz and the minimum 169 + * resolution supported is 480p@60Hz requiring a pixel clock of 25MHz 170 + */ 171 + #define SII902X_MIN_PIXEL_CLOCK_KHZ 25000 172 + #define SII902X_MAX_PIXEL_CLOCK_KHZ 165000 173 + 166 174 struct sii902x { 167 175 struct i2c_client *i2c; 168 176 struct regmap *regmap; ··· 318 310 return num; 319 311 } 320 312 321 - static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector, 322 - struct drm_display_mode *mode) 323 - { 324 - /* TODO: check mode */ 325 - 326 - return MODE_OK; 327 - } 328 - 329 313 static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = { 330 314 .get_modes = sii902x_get_modes, 331 - .mode_valid = sii902x_mode_valid, 332 315 }; 333 316 334 - static void sii902x_bridge_disable(struct drm_bridge *bridge) 317 + static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge, 318 + struct drm_bridge_state *old_bridge_state) 335 319 { 336 320 struct sii902x *sii902x = bridge_to_sii902x(bridge); 337 321 ··· 336 336 mutex_unlock(&sii902x->mutex); 337 337 } 338 338 339 - static void sii902x_bridge_enable(struct drm_bridge *bridge) 339 + static void sii902x_bridge_atomic_enable(struct drm_bridge *bridge, 340 + struct drm_bridge_state *old_bridge_state) 340 341 { 341 342 struct sii902x *sii902x = bridge_to_sii902x(bridge); 342 343 ··· 496 495 struct drm_crtc_state *crtc_state, 497 496 struct drm_connector_state *conn_state) 498 497 { 498 + if (crtc_state->mode.clock < SII902X_MIN_PIXEL_CLOCK_KHZ || 499 + crtc_state->mode.clock > SII902X_MAX_PIXEL_CLOCK_KHZ) 500 + return -EINVAL; 501 + 499 502 /* 500 503 * There might be flags negotiation supported in future but 501 504 * set the bus flags in atomic_check statically for now. ··· 509 504 return 0; 510 505 } 511 506 507 + static enum drm_mode_status 508 + sii902x_bridge_mode_valid(struct drm_bridge *bridge, 509 + const struct drm_display_info *info, 510 + const struct drm_display_mode *mode) 511 + { 512 + if (mode->clock < SII902X_MIN_PIXEL_CLOCK_KHZ) 513 + return MODE_CLOCK_LOW; 514 + 515 + if (mode->clock > SII902X_MAX_PIXEL_CLOCK_KHZ) 516 + return MODE_CLOCK_HIGH; 517 + 518 + return MODE_OK; 519 + } 520 + 512 521 static const struct drm_bridge_funcs sii902x_bridge_funcs = { 513 522 .attach = sii902x_bridge_attach, 514 523 .mode_set = sii902x_bridge_mode_set, 515 - .disable = sii902x_bridge_disable, 516 - .enable = sii902x_bridge_enable, 524 + .atomic_disable = sii902x_bridge_atomic_disable, 525 + .atomic_enable = sii902x_bridge_atomic_enable, 517 526 .detect = sii902x_bridge_detect, 518 527 .edid_read = sii902x_bridge_edid_read, 519 528 .atomic_reset = drm_atomic_helper_bridge_reset, ··· 535 516 .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 536 517 .atomic_get_input_bus_fmts = sii902x_bridge_atomic_get_input_bus_fmts, 537 518 .atomic_check = sii902x_bridge_atomic_check, 519 + .mode_valid = sii902x_bridge_mode_valid, 538 520 }; 539 521 540 522 static int sii902x_mute(struct sii902x *sii902x, bool mute)
+1 -12
drivers/gpu/drm/bridge/simple-bridge.c
··· 170 170 sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL); 171 171 if (!sbridge) 172 172 return -ENOMEM; 173 - platform_set_drvdata(pdev, sbridge); 174 173 175 174 sbridge->info = of_device_get_match_data(&pdev->dev); 176 175 ··· 207 208 sbridge->bridge.of_node = pdev->dev.of_node; 208 209 sbridge->bridge.timings = sbridge->info->timings; 209 210 210 - drm_bridge_add(&sbridge->bridge); 211 - 212 - return 0; 213 - } 214 - 215 - static void simple_bridge_remove(struct platform_device *pdev) 216 - { 217 - struct simple_bridge *sbridge = platform_get_drvdata(pdev); 218 - 219 - drm_bridge_remove(&sbridge->bridge); 211 + return devm_drm_bridge_add(&pdev->dev, &sbridge->bridge); 220 212 } 221 213 222 214 /* ··· 284 294 285 295 static struct platform_driver simple_bridge_driver = { 286 296 .probe = simple_bridge_probe, 287 - .remove_new = simple_bridge_remove, 288 297 .driver = { 289 298 .name = "simple-bridge", 290 299 .of_match_table = simple_bridge_match,
-1
drivers/gpu/drm/ci/build.sh
··· 160 160 161 161 mkdir -p artifacts/install/lib 162 162 mv install/* artifacts/install/. 163 - rm -rf artifacts/install/modules 164 163 ln -s common artifacts/install/ci-common 165 164 cp .config artifacts/${CI_JOB_NAME}_config 166 165
+1
drivers/gpu/drm/ci/gitlab-ci.yml
··· 123 123 - msm 124 124 - rockchip 125 125 - virtio-gpu 126 + - software-driver 126 127 127 128 # YAML anchors for rule conditions 128 129 # --------------------------------
+3 -3
drivers/gpu/drm/ci/igt_runner.sh
··· 30 30 export IGT_FORCE_DRIVER="panfrost" 31 31 fi 32 32 ;; 33 - amdgpu) 33 + amdgpu|vkms) 34 34 # Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib 35 - mv /install/modules/lib/modules/* /lib/modules/. 36 - modprobe amdgpu 35 + mv /install/modules/lib/modules/* /lib/modules/. || true 36 + modprobe --first-time $DRIVER_NAME 37 37 ;; 38 38 esac 39 39
+1 -1
drivers/gpu/drm/ci/image-tags.yml
··· 4 4 DEBIAN_BASE_TAG: "${CONTAINER_TAG}" 5 5 6 6 DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build" 7 - DEBIAN_BUILD_TAG: "2023-10-08-config" 7 + DEBIAN_BUILD_TAG: "2024-06-10-vkms" 8 8 9 9 KERNEL_ROOTFS_TAG: "2023-10-06-amd" 10 10
+23 -1
drivers/gpu/drm/ci/test.yml
··· 338 338 RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3 339 339 340 340 virtio_gpu:none: 341 - stage: virtio-gpu 341 + stage: software-driver 342 342 variables: 343 343 CROSVM_GALLIUM_DRIVER: llvmpipe 344 344 DRIVER_NAME: virtio_gpu ··· 354 354 - mkdir -p $CI_PROJECT_DIR/results 355 355 - ln -sf $CI_PROJECT_DIR/results /results 356 356 - install/crosvm-runner.sh install/igt_runner.sh 357 + needs: 358 + - debian/x86_64_test-gl 359 + - testing:x86_64 360 + - igt:x86_64 361 + 362 + vkms:none: 363 + stage: software-driver 364 + variables: 365 + DRIVER_NAME: vkms 366 + GPU_VERSION: none 367 + extends: 368 + - .test-gl 369 + - .test-rules 370 + tags: 371 + - kvm 372 + script: 373 + - ln -sf $CI_PROJECT_DIR/install /install 374 + - mv install/bzImage /lava-files/bzImage 375 + - mkdir -p /lib/modules 376 + - mkdir -p $CI_PROJECT_DIR/results 377 + - ln -sf $CI_PROJECT_DIR/results /results 378 + - ./install/crosvm-runner.sh ./install/igt_runner.sh 357 379 needs: 358 380 - debian/x86_64_test-gl 359 381 - testing:x86_64
+1
drivers/gpu/drm/ci/x86_64.config
··· 24 24 CONFIG_DRM_PANEL_SIMPLE=y 25 25 CONFIG_PWM_CROS_EC=y 26 26 CONFIG_BACKLIGHT_PWM=y 27 + CONFIG_DRM_VKMS=m 27 28 28 29 # Strip out some stuff we don't need for graphics testing, to reduce 29 30 # the build.
-1
drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
··· 4 4 device_reset@unbind-reset-rebind,Fail 5 5 dumb_buffer@invalid-bpp,Fail 6 6 kms_3d,Fail 7 - kms_addfb_basic@addfb25-bad-modifier,Fail 8 7 kms_cursor_legacy@forked-move,Fail 9 8 kms_cursor_legacy@single-bo,Fail 10 9 kms_cursor_legacy@torture-bo,Fail
-1
drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
··· 4 4 device_reset@unbind-reset-rebind,Fail 5 5 dumb_buffer@invalid-bpp,Fail 6 6 kms_3d,Fail 7 - kms_addfb_basic@addfb25-bad-modifier,Fail 8 7 kms_lease@lease-uevent,Fail 9 8 tools_test@tools_test,Fail
+57
drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
··· 1 + core_hotunplug@hotrebind,Fail 2 + core_hotunplug@hotrebind-lateclose,Fail 3 + core_hotunplug@hotreplug,Fail 4 + core_hotunplug@hotreplug-lateclose,Fail 5 + core_hotunplug@hotunbind-rebind,Fail 6 + core_hotunplug@hotunplug-rescan,Fail 7 + core_hotunplug@unbind-rebind,Fail 8 + core_hotunplug@unplug-rescan,Fail 9 + device_reset@cold-reset-bound,Fail 10 + device_reset@reset-bound,Fail 11 + device_reset@unbind-cold-reset-rebind,Fail 12 + device_reset@unbind-reset-rebind,Fail 13 + dumb_buffer@invalid-bpp,Fail 14 + kms_content_protection@atomic,Crash 15 + kms_content_protection@atomic-dpms,Crash 16 + kms_content_protection@content-type-change,Crash 17 + kms_content_protection@lic-type-0,Crash 18 + kms_content_protection@lic-type-1,Crash 19 + kms_content_protection@srm,Crash 20 + kms_content_protection@type1,Crash 21 + kms_content_protection@uevent,Crash 22 + kms_cursor_crc@cursor-rapid-movement-128x128,Fail 23 + kms_cursor_crc@cursor-rapid-movement-128x42,Fail 24 + kms_cursor_crc@cursor-rapid-movement-256x256,Fail 25 + kms_cursor_crc@cursor-rapid-movement-256x85,Fail 26 + kms_cursor_crc@cursor-rapid-movement-32x10,Fail 27 + kms_cursor_crc@cursor-rapid-movement-32x32,Fail 28 + kms_cursor_crc@cursor-rapid-movement-512x170,Fail 29 + kms_cursor_crc@cursor-rapid-movement-512x512,Fail 30 + kms_cursor_crc@cursor-rapid-movement-64x21,Fail 31 + kms_cursor_crc@cursor-rapid-movement-64x64,Fail 32 + kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail 33 + kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail 34 + kms_cursor_legacy@cursor-vs-flip-atomic,Fail 35 + kms_cursor_legacy@cursor-vs-flip-legacy,Fail 36 + kms_cursor_legacy@cursor-vs-flip-toggle,Fail 37 + kms_cursor_legacy@cursor-vs-flip-varying-size,Fail 38 + kms_cursor_legacy@flip-vs-cursor-atomic,Fail 39 + kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail 40 + kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail 41 + kms_cursor_legacy@flip-vs-cursor-legacy,Fail 42 + kms_flip@flip-vs-modeset-vs-hang,Fail 43 + kms_flip@flip-vs-panning-vs-hang,Fail 44 + kms_flip@flip-vs-suspend,Timeout 45 + kms_flip@flip-vs-suspend-interruptible,Timeout 46 + kms_flip@plain-flip-fb-recreate,Fail 47 + kms_lease@lease-uevent,Fail 48 + kms_pipe_crc_basic@nonblocking-crc,Fail 49 + kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail 50 + kms_writeback@writeback-check-output,Fail 51 + kms_writeback@writeback-check-output-XRGB2101010,Fail 52 + kms_writeback@writeback-fb-id,Fail 53 + kms_writeback@writeback-fb-id-XRGB2101010,Fail 54 + kms_writeback@writeback-invalid-parameters,Fail 55 + kms_writeback@writeback-pixel-formats,Fail 56 + perf@i915-ref-count,Fail 57 + tools_test@tools_test,Fail
+69
drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
··· 1 + # Board Name: vkms 2 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 3 + # Failure Rate: 50 4 + # IGT Version: 1.28-g0df7b9b97 5 + # Linux Version: 6.9.0-rc7 6 + kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic 7 + 8 + # Board Name: vkms 9 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 10 + # Failure Rate: 50 11 + # IGT Version: 1.28-g0df7b9b97 12 + # Linux Version: 6.9.0-rc7 13 + kms_flip@basic-flip-vs-wf_vblank 14 + 15 + # Board Name: vkms 16 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 17 + # Failure Rate: 50 18 + # IGT Version: 1.28-g0df7b9b97 19 + # Linux Version: 6.9.0-rc7 20 + kms_flip@flip-vs-expired-vblank-interruptible 21 + 22 + # Board Name: vkms 23 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 24 + # Failure Rate: 50 25 + # IGT Version: 1.28-g0df7b9b97 26 + # Linux Version: 6.9.0-rc7 27 + kms_flip@flip-vs-wf_vblank-interruptible 28 + 29 + # Board Name: vkms 30 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 31 + # Failure Rate: 50 32 + # IGT Version: 1.28-g0df7b9b97 33 + # Linux Version: 6.9.0-rc7 34 + kms_flip@plain-flip-fb-recreate-interruptible 35 + 36 + # Board Name: vkms 37 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 38 + # Failure Rate: 50 39 + # IGT Version: 1.28-g0df7b9b97 40 + # Linux Version: 6.9.0-rc7 41 + kms_flip@plain-flip-ts-check 42 + 43 + # Board Name: vkms 44 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 45 + # Failure Rate: 50 46 + # IGT Version: 1.28-g0df7b9b97 47 + # Linux Version: 6.9.0-rc7 48 + kms_flip@plain-flip-ts-check-interruptible 49 + 50 + # Board Name: vkms 51 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 52 + # Failure Rate: 50 53 + # IGT Version: 1.28-g0df7b9b97 54 + # Linux Version: 6.9.0-rc7 55 + kms_flip@flip-vs-absolute-wf_vblank 56 + 57 + # Board Name: vkms 58 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 59 + # Failure Rate: 50 60 + # IGT Version: 1.28-g0df7b9b97 61 + # Linux Version: 6.9.0-rc7 62 + kms_flip@flip-vs-absolute-wf_vblank-interruptible 63 + 64 + # Board Name: vkms 65 + # Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u 66 + # Failure Rate: 50 67 + # IGT Version: 1.28-g0df7b9b97 68 + # Linux Version: 6.9.0-rc7 69 + kms_flip@flip-vs-blocking-wf-vblank
+119
drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
··· 1 + # keeps printing vkms_vblank_simulate: vblank timer overrun and never ends 2 + kms_invalid_mode@int-max-clock 3 + 4 + # Kernel panic 5 + kms_cursor_crc@cursor-rapid-movement-32x10 6 + # Oops: 0000 [#1] PREEMPT SMP NOPTI 7 + # CPU: 0 PID: 2635 Comm: kworker/u8:13 Not tainted 6.9.0-rc7-g40935263a1fd #1 8 + # Hardware name: ChromiumOS crosvm, BIOS 0 9 + # Workqueue: vkms_composer vkms_composer_worker [vkms] 10 + # RIP: 0010:compose_active_planes+0x1c7/0x4e0 [vkms] 11 + # Code: c9 0f 84 6a 01 00 00 8b 42 30 2b 42 28 41 39 c5 0f 8c 6f 01 00 00 49 83 c7 01 49 39 df 74 3b 4b 8b 34 fc 48 8b 96 48 01 00 00 <8b> 42 78 89 c1 83 e1 0a a8 20 74 b1 45 89 f5 41 f7 d5 44 03 6a 34 12 + # RSP: 0018:ffffbb4700c17d58 EFLAGS: 00010246 13 + # RAX: 0000000000000400 RBX: 0000000000000002 RCX: 0000000000000002 14 + # RDX: 0000000000000000 RSI: ffffa2ad0788c000 RDI: 00000000fff479a8 15 + # RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000 16 + # R10: ffffa2ad0bb14000 R11: 0000000000000000 R12: ffffa2ad03e21700 17 + # R13: 0000000000000003 R14: 0000000000000004 R15: 0000000000000000 18 + # FS: 0000000000000000(0000) GS:ffffa2ad2bc00000(0000) knlGS:0000000000000000 19 + # CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 20 + # CR2: 0000000000000078 CR3: 000000010bd30000 CR4: 0000000000350ef0 21 + # Call Trace: 22 + # <TASK> 23 + # ? __die+0x1e/0x60 24 + # ? page_fault_oops+0x17b/0x490 25 + # ? exc_page_fault+0x6d/0x230 26 + # ? asm_exc_page_fault+0x26/0x30 27 + # ? compose_active_planes+0x1c7/0x4e0 [vkms] 28 + # ? compose_active_planes+0x2a3/0x4e0 [vkms] 29 + # ? srso_return_thunk+0x5/0x5f 30 + # vkms_composer_worker+0x205/0x240 [vkms] 31 + # process_one_work+0x1f4/0x6b0 32 + # ? lock_is_held_type+0x9e/0x110 33 + # worker_thread+0x17e/0x350 34 + # ? __pfx_worker_thread+0x10/0x10 35 + # kthread+0xce/0x100 36 + # ? __pfx_kthread+0x10/0x10 37 + # ret_from_fork+0x2f/0x50 38 + # ? __pfx_kthread+0x10/0x10 39 + # ret_from_fork_asm+0x1a/0x30 40 + # </TASK> 41 + # Modules linked in: vkms 42 + # CR2: 0000000000000078 43 + # ---[ end trace 0000000000000000 ]--- 44 + # RIP: 0010:compose_active_planes+0x1c7/0x4e0 [vkms] 45 + # Code: c9 0f 84 6a 01 00 00 8b 42 30 2b 42 28 41 39 c5 0f 8c 6f 01 00 00 49 83 c7 01 49 39 df 74 3b 4b 8b 34 fc 48 8b 96 48 01 00 00 <8b> 42 78 89 c1 83 e1 0a a8 20 74 b1 45 89 f5 41 f7 d5 44 03 6a 34 46 + # RSP: 0018:ffffbb4700c17d58 EFLAGS: 00010246 47 + # RAX: 0000000000000400 RBX: 0000000000000002 RCX: 0000000000000002 48 + # RDX: 0000000000000000 RSI: ffffa2ad0788c000 RDI: 00000000fff479a8 49 + # RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000 50 + # R10: ffffa2ad0bb14000 R11: 0000000000000000 R12: ffffa2ad03e21700 51 + # R13: 0000000000000003 R14: 0000000000000004 R15: 0000000000000000 52 + # FS: 0000000000000000(0000) GS:ffffa2ad2bc00000(0000) knlGS:0000000000000000 53 + # CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 54 + 55 + kms_cursor_crc@cursor-rapid-movement-256x85 56 + # [drm:drm_crtc_add_crc_entry] *ERROR* Overflow of CRC buffer, userspace reads too slow. 57 + # Oops: 0000 [#1] PREEMPT SMP NOPTI 58 + # CPU: 1 PID: 10 Comm: kworker/u8:0 Not tainted 6.9.0-rc7-g646381cde463 #1 59 + # Hardware name: ChromiumOS crosvm, BIOS 0 60 + # Workqueue: vkms_composer vkms_composer_worker [vkms] 61 + # RIP: 0010:compose_active_planes+0x1c7/0x4e0 [vkms] 62 + # Code: c9 0f 84 6a 01 00 00 8b 42 30 2b 42 28 41 39 c5 0f 8c 6f 01 00 00 49 83 c7 01 49 39 df 74 3b 4b 8b 34 fc 48 8b 96 48 01 00 00 <8b> 42 78 89 c1 83 e1 0a a8 20 74 b1 45 89 f5 41 f7 d5 44 03 6a 34 63 + # RSP: 0018:ffffa7e980057d58 EFLAGS: 00010246 64 + # RAX: 0000000000000400 RBX: 0000000000000002 RCX: 0000000000000002 65 + # RDX: 0000000000000000 RSI: ffff977987aa5c00 RDI: 000000001b43a85f 66 + # RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000000000 67 + # R10: ffff977981bf0000 R11: 0000000000000000 R12: ffff977989622590 68 + # R13: 0000000000000000 R14: 0000000000000001 R15: 0000000000000000 69 + # FS: 0000000000000000(0000) GS:ffff9779abd00000(0000) knlGS:0000000000000000 70 + # CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 71 + # CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0 72 + # Call Trace: 73 + # <TASK> 74 + # ? __die+0x1e/0x60 75 + # ? page_fault_oops+0x17b/0x490 76 + # ? exc_page_fault+0x6d/0x230 77 + # ? asm_exc_page_fault+0x26/0x30 78 + # ? compose_active_planes+0x1c7/0x4e0 [vkms] 79 + # ? compose_active_planes+0x2a3/0x4e0 [vkms] 80 + # ? srso_return_thunk+0x5/0x5f 81 + # vkms_composer_worker+0x205/0x240 [vkms] 82 + # process_one_work+0x1f4/0x6b0 83 + # ? lock_is_held_type+0x9e/0x110 84 + # worker_thread+0x17e/0x350 85 + # ? __pfx_worker_thread+0x10/0x10 86 + # kthread+0xce/0x100 87 + # ? __pfx_kthread+0x10/0x10 88 + # ret_from_fork+0x2f/0x50 89 + # ? __pfx_kthread+0x10/0x10 90 + # ret_from_fork_asm+0x1a/0x30 91 + # </TASK> 92 + # Modules linked in: vkms 93 + # CR2: 0000000000000078 94 + # ---[ end trace 0000000000000000 ]--- 95 + # RIP: 0010:compose_active_planes+0x1c7/0x4e0 [vkms] 96 + # Code: c9 0f 84 6a 01 00 00 8b 42 30 2b 42 28 41 39 c5 0f 8c 6f 01 00 00 49 83 c7 01 49 39 df 74 3b 4b 8b 34 fc 48 8b 96 48 01 00 00 <8b> 42 78 89 c1 83 e1 0a a8 20 74 b1 45 89 f5 41 f7 d5 44 03 6a 34 97 + # RSP: 0018:ffffa7e980057d58 EFLAGS: 00010246 98 + # RAX: 0000000000000400 RBX: 0000000000000002 RCX: 0000000000000002 99 + # RDX: 0000000000000000 RSI: ffff977987aa5c00 RDI: 000000001b43a85f 100 + # RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000000000 101 + # R10: ffff977981bf0000 R11: 0000000000000000 R12: ffff977989622590 102 + # R13: 0000000000000000 R14: 0000000000000001 R15: 0000000000000000 103 + # FS: 0000000000000000(0000) GS:ffff9779abd00000(0000) knlGS:0000000000000000 104 + # CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 105 + # CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0 106 + 107 + # Skip driver specific tests 108 + ^amdgpu.* 109 + msm_.* 110 + nouveau_.* 111 + panfrost_.* 112 + ^v3d.* 113 + ^vc4.* 114 + ^vmwgfx* 115 + 116 + # Skip intel specific tests 117 + gem_.* 118 + i915_.* 119 + xe_.*
+7
drivers/gpu/drm/drm_connector.c
··· 1087 1087 { DRM_MODE_TV_MODE_PAL_M, "PAL-M" }, 1088 1088 { DRM_MODE_TV_MODE_PAL_N, "PAL-N" }, 1089 1089 { DRM_MODE_TV_MODE_SECAM, "SECAM" }, 1090 + { DRM_MODE_TV_MODE_MONOCHROME, "Mono" }, 1090 1091 }; 1091 1092 DRM_ENUM_NAME_FN(drm_get_tv_mode_name, drm_tv_mode_enum_list) 1092 1093 ··· 1858 1857 * 1859 1858 * TV Mode is CCIR System B (aka 625-lines) together with 1860 1859 * the SECAM Color Encoding. 1860 + * 1861 + * Mono: 1862 + * 1863 + * Use timings appropriate to the DRM mode, including 1864 + * equalizing pulses for a 525-line or 625-line mode, 1865 + * with no pedestal or color encoding. 1861 1866 * 1862 1867 * Drivers can set up this property by calling 1863 1868 * drm_mode_create_tv_properties().
+4 -1
drivers/gpu/drm/drm_modes.c
··· 531 531 * @interlace: whether to compute an interlaced mode 532 532 * 533 533 * This function creates a struct drm_display_mode instance suited for 534 - * an analog TV output, for one of the usual analog TV mode. 534 + * an analog TV output, for one of the usual analog TV modes. Where 535 + * this is DRM_MODE_TV_MODE_MONOCHROME, a 625-line mode will be created. 535 536 * 536 537 * Note that @hdisplay is larger than the usual constraints for the PAL 537 538 * and NTSC timings, and we'll choose to ignore most timings constraints ··· 570 569 case DRM_MODE_TV_MODE_PAL_N: 571 570 fallthrough; 572 571 case DRM_MODE_TV_MODE_SECAM: 572 + fallthrough; 573 + case DRM_MODE_TV_MODE_MONOCHROME: 573 574 analog = DRM_MODE_ANALOG_PAL; 574 575 break; 575 576
+3 -2
drivers/gpu/drm/drm_probe_helper.c
··· 1259 1259 for (i = 0; i < tv_mode_property->num_values; i++) 1260 1260 supported_tv_modes |= BIT(tv_mode_property->values[i]); 1261 1261 1262 - if ((supported_tv_modes & ntsc_modes) && 1263 - (supported_tv_modes & pal_modes)) { 1262 + if (((supported_tv_modes & ntsc_modes) && 1263 + (supported_tv_modes & pal_modes)) || 1264 + (supported_tv_modes & BIT(DRM_MODE_TV_MODE_MONOCHROME))) { 1264 1265 uint64_t default_mode; 1265 1266 1266 1267 if (drm_object_property_get_default_value(&connector->base,
+2 -1
drivers/gpu/drm/mgag200/Makefile
··· 11 11 mgag200_g200ew3.o \ 12 12 mgag200_g200se.o \ 13 13 mgag200_g200wb.o \ 14 - mgag200_mode.o 14 + mgag200_mode.o \ 15 + mgag200_vga.o 15 16 16 17 obj-$(CONFIG_DRM_MGAG200) += mgag200.o
+107
drivers/gpu/drm/mgag200/mgag200_bmc.c
··· 2 2 3 3 #include <linux/delay.h> 4 4 5 + #include <drm/drm_atomic_helper.h> 6 + #include <drm/drm_edid.h> 7 + #include <drm/drm_managed.h> 8 + #include <drm/drm_probe_helper.h> 9 + 5 10 #include "mgag200_drv.h" 11 + 12 + static struct mgag200_bmc_connector *to_mgag200_bmc_connector(struct drm_connector *connector) 13 + { 14 + return container_of(connector, struct mgag200_bmc_connector, base); 15 + } 6 16 7 17 void mgag200_bmc_disable_vidrst(struct mga_device *mdev) 8 18 { ··· 106 96 tmp = RREG8(DAC_DATA); 107 97 tmp &= ~0x10; 108 98 WREG_DAC(MGA1064_GEN_IO_DATA, tmp); 99 + } 100 + 101 + static const struct drm_encoder_funcs mgag200_bmc_encoder_funcs = { 102 + .destroy = drm_encoder_cleanup, 103 + }; 104 + 105 + static int mgag200_bmc_connector_helper_detect_ctx(struct drm_connector *connector, 106 + struct drm_modeset_acquire_ctx *ctx, 107 + bool force) 108 + { 109 + struct mgag200_bmc_connector *bmc_connector = to_mgag200_bmc_connector(connector); 110 + struct drm_connector *physical_connector = bmc_connector->physical_connector; 111 + 112 + /* 113 + * Most user-space compositors cannot handle more than one connected 114 + * connector per CRTC. Hence, we only mark the BMC as connected if the 115 + * physical connector is disconnected. If the physical connector's status 116 + * is connected or unknown, the BMC remains disconnected. This has no 117 + * effect on the output of the BMC. 118 + * 119 + * FIXME: Remove this logic once user-space compositors can handle more 120 + * than one connector per CRTC. The BMC should always be connected. 121 + */ 122 + 123 + if (physical_connector && physical_connector->status == connector_status_disconnected) 124 + return connector_status_connected; 125 + 126 + return connector_status_disconnected; 127 + } 128 + 129 + static int mgag200_bmc_connector_helper_get_modes(struct drm_connector *connector) 130 + { 131 + struct drm_device *dev = connector->dev; 132 + struct mga_device *mdev = to_mga_device(dev); 133 + const struct mgag200_device_info *minfo = mdev->info; 134 + 135 + return drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay); 136 + } 137 + 138 + static const struct drm_connector_helper_funcs mgag200_bmc_connector_helper_funcs = { 139 + .get_modes = mgag200_bmc_connector_helper_get_modes, 140 + .detect_ctx = mgag200_bmc_connector_helper_detect_ctx, 141 + }; 142 + 143 + static const struct drm_connector_funcs mgag200_bmc_connector_funcs = { 144 + .reset = drm_atomic_helper_connector_reset, 145 + .fill_modes = drm_helper_probe_single_connector_modes, 146 + .destroy = drm_connector_cleanup, 147 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 148 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 149 + }; 150 + 151 + static int mgag200_bmc_connector_init(struct drm_device *dev, 152 + struct mgag200_bmc_connector *bmc_connector, 153 + struct drm_connector *physical_connector) 154 + { 155 + struct drm_connector *connector = &bmc_connector->base; 156 + int ret; 157 + 158 + ret = drm_connector_init(dev, connector, &mgag200_bmc_connector_funcs, 159 + DRM_MODE_CONNECTOR_VIRTUAL); 160 + if (ret) 161 + return ret; 162 + drm_connector_helper_add(connector, &mgag200_bmc_connector_helper_funcs); 163 + 164 + bmc_connector->physical_connector = physical_connector; 165 + 166 + return 0; 167 + } 168 + 169 + int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector) 170 + { 171 + struct drm_device *dev = &mdev->base; 172 + struct drm_crtc *crtc = &mdev->crtc; 173 + struct drm_encoder *encoder; 174 + struct mgag200_bmc_connector *bmc_connector; 175 + struct drm_connector *connector; 176 + int ret; 177 + 178 + encoder = &mdev->output.bmc.encoder; 179 + ret = drm_encoder_init(dev, encoder, &mgag200_bmc_encoder_funcs, 180 + DRM_MODE_ENCODER_VIRTUAL, NULL); 181 + if (ret) 182 + return ret; 183 + encoder->possible_crtcs = drm_crtc_mask(crtc); 184 + 185 + bmc_connector = &mdev->output.bmc.bmc_connector; 186 + ret = mgag200_bmc_connector_init(dev, bmc_connector, physical_connector); 187 + if (ret) 188 + return ret; 189 + connector = &bmc_connector->base; 190 + 191 + ret = drm_connector_attach_encoder(connector, encoder); 192 + if (ret) 193 + return ret; 194 + 195 + return 0; 109 196 }
+19 -15
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 186 186 return container_of(base, struct mgag200_crtc_state, base); 187 187 } 188 188 189 + struct mgag200_bmc_connector { 190 + struct drm_connector base; 191 + struct drm_connector *physical_connector; 192 + }; 193 + 189 194 enum mga_type { 190 195 G200_PCI, 191 196 G200_AGP, ··· 288 283 289 284 struct drm_plane primary_plane; 290 285 struct drm_crtc crtc; 291 - struct drm_encoder encoder; 292 - struct drm_connector connector; 286 + struct { 287 + struct { 288 + struct drm_encoder encoder; 289 + struct drm_connector connector; 290 + } vga; 291 + struct { 292 + struct drm_encoder encoder; 293 + struct mgag200_bmc_connector bmc_connector; 294 + } bmc; 295 + } output; 293 296 }; 294 297 295 298 static inline struct mga_device *to_mga_device(struct drm_device *dev) ··· 430 417 .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \ 431 418 .atomic_destroy_state = mgag200_crtc_atomic_destroy_state 432 419 433 - #define MGAG200_DAC_ENCODER_FUNCS \ 434 - .destroy = drm_encoder_cleanup 435 - 436 - #define MGAG200_VGA_CONNECTOR_HELPER_FUNCS \ 437 - .get_modes = drm_connector_helper_get_modes 438 - 439 - #define MGAG200_VGA_CONNECTOR_FUNCS \ 440 - .reset = drm_atomic_helper_connector_reset, \ 441 - .fill_modes = drm_helper_probe_single_connector_modes, \ 442 - .destroy = drm_connector_cleanup, \ 443 - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \ 444 - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state 445 - 446 420 void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode); 447 421 void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format); 448 422 void mgag200_enable_display(struct mga_device *mdev); 449 423 void mgag200_init_registers(struct mga_device *mdev); 450 424 int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available); 451 425 426 + /* mgag200_vga.c */ 427 + int mgag200_vga_output_init(struct mga_device *mdev); 428 + 452 429 /* mgag200_bmc.c */ 453 430 void mgag200_bmc_disable_vidrst(struct mga_device *mdev); 454 431 void mgag200_bmc_enable_vidrst(struct mga_device *mdev); 432 + int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector); 455 433 456 434 #endif /* __MGAG200_DRV_H__ */
+3 -44
drivers/gpu/drm/mgag200/mgag200_g200.c
··· 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_probe_helper.h> 11 11 12 - #include "mgag200_ddc.h" 13 12 #include "mgag200_drv.h" 14 13 15 14 static int mgag200_g200_init_pci_options(struct pci_dev *pdev) ··· 183 184 MGAG200_CRTC_FUNCS, 184 185 }; 185 186 186 - static const struct drm_encoder_funcs mgag200_g200_dac_encoder_funcs = { 187 - MGAG200_DAC_ENCODER_FUNCS, 188 - }; 189 - 190 - static const struct drm_connector_helper_funcs mgag200_g200_vga_connector_helper_funcs = { 191 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 192 - }; 193 - 194 - static const struct drm_connector_funcs mgag200_g200_vga_connector_funcs = { 195 - MGAG200_VGA_CONNECTOR_FUNCS, 196 - }; 197 - 198 187 static int mgag200_g200_pipeline_init(struct mga_device *mdev) 199 188 { 200 189 struct drm_device *dev = &mdev->base; 201 190 struct drm_plane *primary_plane = &mdev->primary_plane; 202 191 struct drm_crtc *crtc = &mdev->crtc; 203 - struct drm_encoder *encoder = &mdev->encoder; 204 - struct drm_connector *connector = &mdev->connector; 205 - struct i2c_adapter *ddc; 206 192 int ret; 207 193 208 194 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 215 231 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 216 232 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 217 233 218 - encoder->possible_crtcs = drm_crtc_mask(crtc); 219 - ret = drm_encoder_init(dev, encoder, &mgag200_g200_dac_encoder_funcs, 220 - DRM_MODE_ENCODER_DAC, NULL); 221 - if (ret) { 222 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 234 + ret = mgag200_vga_output_init(mdev); 235 + if (ret) 223 236 return ret; 224 - } 225 - 226 - ddc = mgag200_ddc_create(mdev); 227 - if (IS_ERR(ddc)) { 228 - ret = PTR_ERR(ddc); 229 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 230 - return ret; 231 - } 232 - 233 - ret = drm_connector_init_with_ddc(dev, connector, 234 - &mgag200_g200_vga_connector_funcs, 235 - DRM_MODE_CONNECTOR_VGA, ddc); 236 - if (ret) { 237 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 238 - return ret; 239 - } 240 - drm_connector_helper_add(connector, &mgag200_g200_vga_connector_helper_funcs); 241 - 242 - ret = drm_connector_attach_encoder(connector, encoder); 243 - if (ret) { 244 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 245 - return ret; 246 - } 247 237 248 238 return 0; 249 239 } ··· 401 443 return ERR_PTR(ret); 402 444 403 445 drm_mode_config_reset(dev); 446 + drm_kms_helper_poll_init(dev); 404 447 405 448 return mdev; 406 449 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200eh.c
··· 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_probe_helper.h> 11 11 12 - #include "mgag200_ddc.h" 13 12 #include "mgag200_drv.h" 14 13 15 14 void mgag200_g200eh_init_registers(struct mga_device *mdev) ··· 182 183 MGAG200_CRTC_FUNCS, 183 184 }; 184 185 185 - static const struct drm_encoder_funcs mgag200_g200eh_dac_encoder_funcs = { 186 - MGAG200_DAC_ENCODER_FUNCS, 187 - }; 188 - 189 - static const struct drm_connector_helper_funcs mgag200_g200eh_vga_connector_helper_funcs = { 190 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 191 - }; 192 - 193 - static const struct drm_connector_funcs mgag200_g200eh_vga_connector_funcs = { 194 - MGAG200_VGA_CONNECTOR_FUNCS, 195 - }; 196 - 197 186 static int mgag200_g200eh_pipeline_init(struct mga_device *mdev) 198 187 { 199 188 struct drm_device *dev = &mdev->base; 200 189 struct drm_plane *primary_plane = &mdev->primary_plane; 201 190 struct drm_crtc *crtc = &mdev->crtc; 202 - struct drm_encoder *encoder = &mdev->encoder; 203 - struct drm_connector *connector = &mdev->connector; 204 - struct i2c_adapter *ddc; 205 191 int ret; 206 192 207 193 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 214 230 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 215 231 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 216 232 217 - encoder->possible_crtcs = drm_crtc_mask(crtc); 218 - ret = drm_encoder_init(dev, encoder, &mgag200_g200eh_dac_encoder_funcs, 219 - DRM_MODE_ENCODER_DAC, NULL); 220 - if (ret) { 221 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 233 + ret = mgag200_vga_output_init(mdev); 234 + if (ret) 222 235 return ret; 223 - } 224 236 225 - ddc = mgag200_ddc_create(mdev); 226 - if (IS_ERR(ddc)) { 227 - ret = PTR_ERR(ddc); 228 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 237 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 238 + if (ret) 229 239 return ret; 230 - } 231 - 232 - ret = drm_connector_init_with_ddc(dev, connector, 233 - &mgag200_g200eh_vga_connector_funcs, 234 - DRM_MODE_CONNECTOR_VGA, ddc); 235 - if (ret) { 236 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 237 - return ret; 238 - } 239 - drm_connector_helper_add(connector, &mgag200_g200eh_vga_connector_helper_funcs); 240 - 241 - ret = drm_connector_attach_encoder(connector, encoder); 242 - if (ret) { 243 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 244 - return ret; 245 - } 246 240 247 241 return 0; 248 242 } ··· 277 315 return ERR_PTR(ret); 278 316 279 317 drm_mode_config_reset(dev); 318 + drm_kms_helper_poll_init(dev); 280 319 281 320 return mdev; 282 321 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200eh3.c
··· 8 8 #include <drm/drm_gem_atomic_helper.h> 9 9 #include <drm/drm_probe_helper.h> 10 10 11 - #include "mgag200_ddc.h" 12 11 #include "mgag200_drv.h" 13 12 14 13 /* ··· 86 87 MGAG200_CRTC_FUNCS, 87 88 }; 88 89 89 - static const struct drm_encoder_funcs mgag200_g200eh3_dac_encoder_funcs = { 90 - MGAG200_DAC_ENCODER_FUNCS, 91 - }; 92 - 93 - static const struct drm_connector_helper_funcs mgag200_g200eh3_vga_connector_helper_funcs = { 94 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 95 - }; 96 - 97 - static const struct drm_connector_funcs mgag200_g200eh3_vga_connector_funcs = { 98 - MGAG200_VGA_CONNECTOR_FUNCS, 99 - }; 100 - 101 90 static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev) 102 91 { 103 92 struct drm_device *dev = &mdev->base; 104 93 struct drm_plane *primary_plane = &mdev->primary_plane; 105 94 struct drm_crtc *crtc = &mdev->crtc; 106 - struct drm_encoder *encoder = &mdev->encoder; 107 - struct drm_connector *connector = &mdev->connector; 108 - struct i2c_adapter *ddc; 109 95 int ret; 110 96 111 97 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 118 134 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 119 135 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 120 136 121 - encoder->possible_crtcs = drm_crtc_mask(crtc); 122 - ret = drm_encoder_init(dev, encoder, &mgag200_g200eh3_dac_encoder_funcs, 123 - DRM_MODE_ENCODER_DAC, NULL); 124 - if (ret) { 125 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 137 + ret = mgag200_vga_output_init(mdev); 138 + if (ret) 126 139 return ret; 127 - } 128 140 129 - ddc = mgag200_ddc_create(mdev); 130 - if (IS_ERR(ddc)) { 131 - ret = PTR_ERR(ddc); 132 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 141 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 142 + if (ret) 133 143 return ret; 134 - } 135 - 136 - ret = drm_connector_init_with_ddc(dev, connector, 137 - &mgag200_g200eh3_vga_connector_funcs, 138 - DRM_MODE_CONNECTOR_VGA, ddc); 139 - if (ret) { 140 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 141 - return ret; 142 - } 143 - drm_connector_helper_add(connector, &mgag200_g200eh3_vga_connector_helper_funcs); 144 - 145 - ret = drm_connector_attach_encoder(connector, encoder); 146 - if (ret) { 147 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 148 - return ret; 149 - } 150 144 151 145 return 0; 152 146 } ··· 182 220 return ERR_PTR(ret); 183 221 184 222 drm_mode_config_reset(dev); 223 + drm_kms_helper_poll_init(dev); 185 224 186 225 return mdev; 187 226 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200er.c
··· 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_probe_helper.h> 11 11 12 - #include "mgag200_ddc.h" 13 12 #include "mgag200_drv.h" 14 13 15 14 static void mgag200_g200er_init_registers(struct mga_device *mdev) ··· 225 226 MGAG200_CRTC_FUNCS, 226 227 }; 227 228 228 - static const struct drm_encoder_funcs mgag200_g200er_dac_encoder_funcs = { 229 - MGAG200_DAC_ENCODER_FUNCS, 230 - }; 231 - 232 - static const struct drm_connector_helper_funcs mgag200_g200er_vga_connector_helper_funcs = { 233 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 234 - }; 235 - 236 - static const struct drm_connector_funcs mgag200_g200er_vga_connector_funcs = { 237 - MGAG200_VGA_CONNECTOR_FUNCS, 238 - }; 239 - 240 229 static int mgag200_g200er_pipeline_init(struct mga_device *mdev) 241 230 { 242 231 struct drm_device *dev = &mdev->base; 243 232 struct drm_plane *primary_plane = &mdev->primary_plane; 244 233 struct drm_crtc *crtc = &mdev->crtc; 245 - struct drm_encoder *encoder = &mdev->encoder; 246 - struct drm_connector *connector = &mdev->connector; 247 - struct i2c_adapter *ddc; 248 234 int ret; 249 235 250 236 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 257 273 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 258 274 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 259 275 260 - encoder->possible_crtcs = drm_crtc_mask(crtc); 261 - ret = drm_encoder_init(dev, encoder, &mgag200_g200er_dac_encoder_funcs, 262 - DRM_MODE_ENCODER_DAC, NULL); 263 - if (ret) { 264 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 276 + ret = mgag200_vga_output_init(mdev); 277 + if (ret) 265 278 return ret; 266 - } 267 279 268 - ddc = mgag200_ddc_create(mdev); 269 - if (IS_ERR(ddc)) { 270 - ret = PTR_ERR(ddc); 271 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 280 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 281 + if (ret) 272 282 return ret; 273 - } 274 - 275 - ret = drm_connector_init_with_ddc(dev, connector, 276 - &mgag200_g200er_vga_connector_funcs, 277 - DRM_MODE_CONNECTOR_VGA, ddc); 278 - if (ret) { 279 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 280 - return ret; 281 - } 282 - drm_connector_helper_add(connector, &mgag200_g200er_vga_connector_helper_funcs); 283 - 284 - ret = drm_connector_attach_encoder(connector, encoder); 285 - if (ret) { 286 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 287 - return ret; 288 - } 289 283 290 284 return 0; 291 285 } ··· 316 354 return ERR_PTR(ret); 317 355 318 356 drm_mode_config_reset(dev); 357 + drm_kms_helper_poll_init(dev); 319 358 320 359 return mdev; 321 360 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200ev.c
··· 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_probe_helper.h> 11 11 12 - #include "mgag200_ddc.h" 13 12 #include "mgag200_drv.h" 14 13 15 14 static void mgag200_g200ev_init_registers(struct mga_device *mdev) ··· 226 227 MGAG200_CRTC_FUNCS, 227 228 }; 228 229 229 - static const struct drm_encoder_funcs mgag200_g200ev_dac_encoder_funcs = { 230 - MGAG200_DAC_ENCODER_FUNCS, 231 - }; 232 - 233 - static const struct drm_connector_helper_funcs mgag200_g200ev_vga_connector_helper_funcs = { 234 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 235 - }; 236 - 237 - static const struct drm_connector_funcs mgag200_g200ev_vga_connector_funcs = { 238 - MGAG200_VGA_CONNECTOR_FUNCS, 239 - }; 240 - 241 230 static int mgag200_g200ev_pipeline_init(struct mga_device *mdev) 242 231 { 243 232 struct drm_device *dev = &mdev->base; 244 233 struct drm_plane *primary_plane = &mdev->primary_plane; 245 234 struct drm_crtc *crtc = &mdev->crtc; 246 - struct drm_encoder *encoder = &mdev->encoder; 247 - struct drm_connector *connector = &mdev->connector; 248 - struct i2c_adapter *ddc; 249 235 int ret; 250 236 251 237 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 258 274 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 259 275 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 260 276 261 - encoder->possible_crtcs = drm_crtc_mask(crtc); 262 - ret = drm_encoder_init(dev, encoder, &mgag200_g200ev_dac_encoder_funcs, 263 - DRM_MODE_ENCODER_DAC, NULL); 264 - if (ret) { 265 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 277 + ret = mgag200_vga_output_init(mdev); 278 + if (ret) 266 279 return ret; 267 - } 268 280 269 - ddc = mgag200_ddc_create(mdev); 270 - if (IS_ERR(ddc)) { 271 - ret = PTR_ERR(ddc); 272 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 281 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 282 + if (ret) 273 283 return ret; 274 - } 275 - 276 - ret = drm_connector_init_with_ddc(dev, connector, 277 - &mgag200_g200ev_vga_connector_funcs, 278 - DRM_MODE_CONNECTOR_VGA, ddc); 279 - if (ret) { 280 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 281 - return ret; 282 - } 283 - drm_connector_helper_add(connector, &mgag200_g200ev_vga_connector_helper_funcs); 284 - 285 - ret = drm_connector_attach_encoder(connector, encoder); 286 - if (ret) { 287 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 288 - return ret; 289 - } 290 284 291 285 return 0; 292 286 } ··· 321 359 return ERR_PTR(ret); 322 360 323 361 drm_mode_config_reset(dev); 362 + drm_kms_helper_poll_init(dev); 324 363 325 364 return mdev; 326 365 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200ew3.c
··· 8 8 #include <drm/drm_gem_atomic_helper.h> 9 9 #include <drm/drm_probe_helper.h> 10 10 11 - #include "mgag200_ddc.h" 12 11 #include "mgag200_drv.h" 13 12 14 13 static void mgag200_g200ew3_init_registers(struct mga_device *mdev) ··· 95 96 MGAG200_CRTC_FUNCS, 96 97 }; 97 98 98 - static const struct drm_encoder_funcs mgag200_g200ew3_dac_encoder_funcs = { 99 - MGAG200_DAC_ENCODER_FUNCS, 100 - }; 101 - 102 - static const struct drm_connector_helper_funcs mgag200_g200ew3_vga_connector_helper_funcs = { 103 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 104 - }; 105 - 106 - static const struct drm_connector_funcs mgag200_g200ew3_vga_connector_funcs = { 107 - MGAG200_VGA_CONNECTOR_FUNCS, 108 - }; 109 - 110 99 static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev) 111 100 { 112 101 struct drm_device *dev = &mdev->base; 113 102 struct drm_plane *primary_plane = &mdev->primary_plane; 114 103 struct drm_crtc *crtc = &mdev->crtc; 115 - struct drm_encoder *encoder = &mdev->encoder; 116 - struct drm_connector *connector = &mdev->connector; 117 - struct i2c_adapter *ddc; 118 104 int ret; 119 105 120 106 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 127 143 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 128 144 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 129 145 130 - encoder->possible_crtcs = drm_crtc_mask(crtc); 131 - ret = drm_encoder_init(dev, encoder, &mgag200_g200ew3_dac_encoder_funcs, 132 - DRM_MODE_ENCODER_DAC, NULL); 133 - if (ret) { 134 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 146 + ret = mgag200_vga_output_init(mdev); 147 + if (ret) 135 148 return ret; 136 - } 137 149 138 - ddc = mgag200_ddc_create(mdev); 139 - if (IS_ERR(ddc)) { 140 - ret = PTR_ERR(ddc); 141 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 150 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 151 + if (ret) 142 152 return ret; 143 - } 144 - 145 - ret = drm_connector_init_with_ddc(dev, connector, 146 - &mgag200_g200ew3_vga_connector_funcs, 147 - DRM_MODE_CONNECTOR_VGA, ddc); 148 - if (ret) { 149 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 150 - return ret; 151 - } 152 - drm_connector_helper_add(connector, &mgag200_g200ew3_vga_connector_helper_funcs); 153 - 154 - ret = drm_connector_attach_encoder(connector, encoder); 155 - if (ret) { 156 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 157 - return ret; 158 - } 159 153 160 154 return 0; 161 155 } ··· 202 240 return ERR_PTR(ret); 203 241 204 242 drm_mode_config_reset(dev); 243 + drm_kms_helper_poll_init(dev); 205 244 206 245 return mdev; 207 246 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200se.c
··· 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_probe_helper.h> 11 11 12 - #include "mgag200_ddc.h" 13 12 #include "mgag200_drv.h" 14 13 15 14 static int mgag200_g200se_init_pci_options(struct pci_dev *pdev) ··· 357 358 MGAG200_CRTC_FUNCS, 358 359 }; 359 360 360 - static const struct drm_encoder_funcs mgag200_g200se_dac_encoder_funcs = { 361 - MGAG200_DAC_ENCODER_FUNCS, 362 - }; 363 - 364 - static const struct drm_connector_helper_funcs mgag200_g200se_vga_connector_helper_funcs = { 365 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 366 - }; 367 - 368 - static const struct drm_connector_funcs mgag200_g200se_vga_connector_funcs = { 369 - MGAG200_VGA_CONNECTOR_FUNCS, 370 - }; 371 - 372 361 static int mgag200_g200se_pipeline_init(struct mga_device *mdev) 373 362 { 374 363 struct drm_device *dev = &mdev->base; 375 364 struct drm_plane *primary_plane = &mdev->primary_plane; 376 365 struct drm_crtc *crtc = &mdev->crtc; 377 - struct drm_encoder *encoder = &mdev->encoder; 378 - struct drm_connector *connector = &mdev->connector; 379 - struct i2c_adapter *ddc; 380 366 int ret; 381 367 382 368 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 389 405 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 390 406 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 391 407 392 - encoder->possible_crtcs = drm_crtc_mask(crtc); 393 - ret = drm_encoder_init(dev, encoder, &mgag200_g200se_dac_encoder_funcs, 394 - DRM_MODE_ENCODER_DAC, NULL); 395 - if (ret) { 396 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 408 + ret = mgag200_vga_output_init(mdev); 409 + if (ret) 397 410 return ret; 398 - } 399 411 400 - ddc = mgag200_ddc_create(mdev); 401 - if (IS_ERR(ddc)) { 402 - ret = PTR_ERR(ddc); 403 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 412 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 413 + if (ret) 404 414 return ret; 405 - } 406 - 407 - ret = drm_connector_init_with_ddc(dev, connector, 408 - &mgag200_g200se_vga_connector_funcs, 409 - DRM_MODE_CONNECTOR_VGA, ddc); 410 - if (ret) { 411 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 412 - return ret; 413 - } 414 - drm_connector_helper_add(connector, &mgag200_g200se_vga_connector_helper_funcs); 415 - 416 - ret = drm_connector_attach_encoder(connector, encoder); 417 - if (ret) { 418 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 419 - return ret; 420 - } 421 415 422 416 return 0; 423 417 } ··· 521 559 return ERR_PTR(ret); 522 560 523 561 drm_mode_config_reset(dev); 562 + drm_kms_helper_poll_init(dev); 524 563 525 564 return mdev; 526 565 }
+5 -42
drivers/gpu/drm/mgag200/mgag200_g200wb.c
··· 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_probe_helper.h> 11 11 12 - #include "mgag200_ddc.h" 13 12 #include "mgag200_drv.h" 14 13 15 14 void mgag200_g200wb_init_registers(struct mga_device *mdev) ··· 229 230 MGAG200_CRTC_FUNCS, 230 231 }; 231 232 232 - static const struct drm_encoder_funcs mgag200_g200wb_dac_encoder_funcs = { 233 - MGAG200_DAC_ENCODER_FUNCS, 234 - }; 235 - 236 - static const struct drm_connector_helper_funcs mgag200_g200wb_vga_connector_helper_funcs = { 237 - MGAG200_VGA_CONNECTOR_HELPER_FUNCS, 238 - }; 239 - 240 - static const struct drm_connector_funcs mgag200_g200wb_vga_connector_funcs = { 241 - MGAG200_VGA_CONNECTOR_FUNCS, 242 - }; 243 - 244 233 static int mgag200_g200wb_pipeline_init(struct mga_device *mdev) 245 234 { 246 235 struct drm_device *dev = &mdev->base; 247 236 struct drm_plane *primary_plane = &mdev->primary_plane; 248 237 struct drm_crtc *crtc = &mdev->crtc; 249 - struct drm_encoder *encoder = &mdev->encoder; 250 - struct drm_connector *connector = &mdev->connector; 251 - struct i2c_adapter *ddc; 252 238 int ret; 253 239 254 240 ret = drm_universal_plane_init(dev, primary_plane, 0, ··· 261 277 drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); 262 278 drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); 263 279 264 - encoder->possible_crtcs = drm_crtc_mask(crtc); 265 - ret = drm_encoder_init(dev, encoder, &mgag200_g200wb_dac_encoder_funcs, 266 - DRM_MODE_ENCODER_DAC, NULL); 267 - if (ret) { 268 - drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 280 + ret = mgag200_vga_output_init(mdev); 281 + if (ret) 269 282 return ret; 270 - } 271 283 272 - ddc = mgag200_ddc_create(mdev); 273 - if (IS_ERR(ddc)) { 274 - ret = PTR_ERR(ddc); 275 - drm_err(dev, "failed to add DDC bus: %d\n", ret); 284 + ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); 285 + if (ret) 276 286 return ret; 277 - } 278 - 279 - ret = drm_connector_init_with_ddc(dev, connector, 280 - &mgag200_g200wb_vga_connector_funcs, 281 - DRM_MODE_CONNECTOR_VGA, ddc); 282 - if (ret) { 283 - drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 284 - return ret; 285 - } 286 - drm_connector_helper_add(connector, &mgag200_g200wb_vga_connector_helper_funcs); 287 - 288 - ret = drm_connector_attach_encoder(connector, encoder); 289 - if (ret) { 290 - drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 291 - return ret; 292 - } 293 287 294 288 return 0; 295 289 } ··· 326 364 return ERR_PTR(ret); 327 365 328 366 drm_mode_config_reset(dev); 367 + drm_kms_helper_poll_init(dev); 329 368 330 369 return mdev; 331 370 }
+72
drivers/gpu/drm/mgag200/mgag200_vga.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <drm/drm_atomic_helper.h> 4 + #include <drm/drm_modeset_helper_vtables.h> 5 + #include <drm/drm_probe_helper.h> 6 + 7 + #include "mgag200_ddc.h" 8 + #include "mgag200_drv.h" 9 + 10 + static const struct drm_encoder_funcs mgag200_dac_encoder_funcs = { 11 + .destroy = drm_encoder_cleanup 12 + }; 13 + 14 + static const struct drm_connector_helper_funcs mgag200_vga_connector_helper_funcs = { 15 + .get_modes = drm_connector_helper_get_modes, 16 + .detect_ctx = drm_connector_helper_detect_from_ddc 17 + }; 18 + 19 + static const struct drm_connector_funcs mgag200_vga_connector_funcs = { 20 + .reset = drm_atomic_helper_connector_reset, 21 + .fill_modes = drm_helper_probe_single_connector_modes, 22 + .destroy = drm_connector_cleanup, 23 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 24 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state 25 + }; 26 + 27 + int mgag200_vga_output_init(struct mga_device *mdev) 28 + { 29 + struct drm_device *dev = &mdev->base; 30 + struct drm_crtc *crtc = &mdev->crtc; 31 + struct drm_encoder *encoder; 32 + struct drm_connector *connector; 33 + struct i2c_adapter *ddc; 34 + int ret; 35 + 36 + encoder = &mdev->output.vga.encoder; 37 + ret = drm_encoder_init(dev, encoder, &mgag200_dac_encoder_funcs, 38 + DRM_MODE_ENCODER_DAC, NULL); 39 + if (ret) { 40 + drm_err(dev, "drm_encoder_init() failed: %d\n", ret); 41 + return ret; 42 + } 43 + encoder->possible_crtcs = drm_crtc_mask(crtc); 44 + 45 + ddc = mgag200_ddc_create(mdev); 46 + if (IS_ERR(ddc)) { 47 + ret = PTR_ERR(ddc); 48 + drm_err(dev, "failed to add DDC bus: %d\n", ret); 49 + return ret; 50 + } 51 + 52 + connector = &mdev->output.vga.connector; 53 + ret = drm_connector_init_with_ddc(dev, connector, 54 + &mgag200_vga_connector_funcs, 55 + DRM_MODE_CONNECTOR_VGA, ddc); 56 + if (ret) { 57 + drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); 58 + return ret; 59 + } 60 + drm_connector_helper_add(connector, &mgag200_vga_connector_helper_funcs); 61 + 62 + connector->polled = DRM_CONNECTOR_POLL_CONNECT | 63 + DRM_CONNECTOR_POLL_DISCONNECT; 64 + 65 + ret = drm_connector_attach_encoder(connector, encoder); 66 + if (ret) { 67 + drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); 68 + return ret; 69 + } 70 + 71 + return 0; 72 + }
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 898 898 * Without this the operation can timeout and we'll fallback to a 899 899 * software copy, which might take several minutes to finish. 900 900 */ 901 - nouveau_fence_wait(fence, false, false); 901 + nouveau_fence_wait(fence, false); 902 902 ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, 903 903 new_reg); 904 904 nouveau_fence_unref(&fence);
+1 -1
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 72 72 73 73 ret = nouveau_fence_new(&fence, chan); 74 74 if (!ret) { 75 - ret = nouveau_fence_wait(fence, false, false); 75 + ret = nouveau_fence_wait(fence, false); 76 76 nouveau_fence_unref(&fence); 77 77 } 78 78
+1 -1
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 128 128 static void nouveau_dmem_fence_done(struct nouveau_fence **fence) 129 129 { 130 130 if (fence) { 131 - nouveau_fence_wait(*fence, true, false); 131 + nouveau_fence_wait(*fence, false); 132 132 nouveau_fence_unref(fence); 133 133 } else { 134 134 /*
+1 -1
drivers/gpu/drm/nouveau/nouveau_exec.c
··· 188 188 return DRM_GPU_SCHED_STAT_NOMINAL; 189 189 } 190 190 191 - static struct nouveau_job_ops nouveau_exec_job_ops = { 191 + static const struct nouveau_job_ops nouveau_exec_job_ops = { 192 192 .submit = nouveau_exec_job_submit, 193 193 .armed_submit = nouveau_exec_job_armed_submit, 194 194 .run = nouveau_exec_job_run,
+1 -29
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 311 311 return timeout - t; 312 312 } 313 313 314 - static int 315 - nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr) 316 - { 317 - int ret = 0; 318 - 319 - while (!nouveau_fence_done(fence)) { 320 - if (time_after_eq(jiffies, fence->timeout)) { 321 - ret = -EBUSY; 322 - break; 323 - } 324 - 325 - __set_current_state(intr ? 326 - TASK_INTERRUPTIBLE : 327 - TASK_UNINTERRUPTIBLE); 328 - 329 - if (intr && signal_pending(current)) { 330 - ret = -ERESTARTSYS; 331 - break; 332 - } 333 - } 334 - 335 - __set_current_state(TASK_RUNNING); 336 - return ret; 337 - } 338 - 339 314 int 340 - nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) 315 + nouveau_fence_wait(struct nouveau_fence *fence, bool intr) 341 316 { 342 317 long ret; 343 - 344 - if (!lazy) 345 - return nouveau_fence_wait_busy(fence, intr); 346 318 347 319 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); 348 320 if (ret < 0)
+1 -1
drivers/gpu/drm/nouveau/nouveau_fence.h
··· 23 23 24 24 int nouveau_fence_emit(struct nouveau_fence *); 25 25 bool nouveau_fence_done(struct nouveau_fence *); 26 - int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 26 + int nouveau_fence_wait(struct nouveau_fence *, bool intr); 27 27 int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); 28 28 29 29 struct nouveau_fence_chan {
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 928 928 } 929 929 930 930 if (sync) { 931 - if (!(ret = nouveau_fence_wait(fence, false, false))) { 931 + if (!(ret = nouveau_fence_wait(fence, false))) { 932 932 if ((ret = dma_fence_get_status(&fence->base)) == 1) 933 933 ret = 0; 934 934 }
+2 -2
drivers/gpu/drm/nouveau/nouveau_sched.h
··· 42 42 u32 count; 43 43 } out_sync; 44 44 45 - struct nouveau_job_ops *ops; 45 + const struct nouveau_job_ops *ops; 46 46 }; 47 47 48 48 struct nouveau_job { ··· 73 73 u32 count; 74 74 } out_sync; 75 75 76 - struct nouveau_job_ops { 76 + const struct nouveau_job_ops { 77 77 /* If .submit() returns without any error, it is guaranteed that 78 78 * armed_submit() is called. 79 79 */
+1 -1
drivers/gpu/drm/nouveau/nouveau_uvmm.c
··· 1534 1534 nouveau_uvmm_bind_job_put(bind_job); 1535 1535 } 1536 1536 1537 - static struct nouveau_job_ops nouveau_bind_job_ops = { 1537 + static const struct nouveau_job_ops nouveau_bind_job_ops = { 1538 1538 .submit = nouveau_uvmm_bind_job_submit, 1539 1539 .armed_submit = nouveau_uvmm_bind_job_armed_submit, 1540 1540 .run = nouveau_uvmm_bind_job_run,
+25 -167
drivers/gpu/drm/panel/panel-edp.c
··· 1045 1045 }, 1046 1046 }; 1047 1047 1048 - static const struct drm_display_mode auo_b133han05_mode = { 1049 - .clock = 142600, 1050 - .hdisplay = 1920, 1051 - .hsync_start = 1920 + 58, 1052 - .hsync_end = 1920 + 58 + 42, 1053 - .htotal = 1920 + 58 + 42 + 60, 1054 - .vdisplay = 1080, 1055 - .vsync_start = 1080 + 3, 1056 - .vsync_end = 1080 + 3 + 5, 1057 - .vtotal = 1080 + 3 + 5 + 54, 1058 - }; 1059 - 1060 - static const struct panel_desc auo_b133han05 = { 1061 - .modes = &auo_b133han05_mode, 1062 - .num_modes = 1, 1063 - .bpc = 8, 1064 - .size = { 1065 - .width = 293, 1066 - .height = 165, 1067 - }, 1068 - .delay = { 1069 - .hpd_reliable = 100, 1070 - .enable = 20, 1071 - .unprepare = 50, 1072 - }, 1073 - }; 1074 - 1075 1048 static const struct drm_display_mode auo_b133htn01_mode = { 1076 1049 .clock = 150660, 1077 1050 .hdisplay = 1920, ··· 1091 1118 .size = { 1092 1119 .width = 293, 1093 1120 .height = 165, 1094 - }, 1095 - }; 1096 - 1097 - static const struct drm_display_mode auo_b140han06_mode = { 1098 - .clock = 141000, 1099 - .hdisplay = 1920, 1100 - .hsync_start = 1920 + 16, 1101 - .hsync_end = 1920 + 16 + 16, 1102 - .htotal = 1920 + 16 + 16 + 152, 1103 - .vdisplay = 1080, 1104 - .vsync_start = 1080 + 3, 1105 - .vsync_end = 1080 + 3 + 14, 1106 - .vtotal = 1080 + 3 + 14 + 19, 1107 - }; 1108 - 1109 - static const struct panel_desc auo_b140han06 = { 1110 - .modes = &auo_b140han06_mode, 1111 - .num_modes = 1, 1112 - .bpc = 8, 1113 - .size = { 1114 - .width = 309, 1115 - .height = 174, 1116 - }, 1117 - .delay = { 1118 - .hpd_reliable = 100, 1119 - .enable = 20, 1120 - .unprepare = 50, 1121 1121 }, 1122 1122 }; 1123 1123 ··· 1360 1414 }, 1361 1415 }; 1362 1416 1363 - static const struct drm_display_mode ivo_m133nwf4_r0_mode = { 1364 - .clock = 138778, 1365 - .hdisplay = 1920, 1366 - .hsync_start = 1920 + 24, 1367 - .hsync_end = 1920 + 24 + 48, 1368 - .htotal = 1920 + 24 + 48 + 88, 1369 - .vdisplay = 1080, 1370 - .vsync_start = 1080 + 3, 1371 - .vsync_end = 1080 + 3 + 12, 1372 - .vtotal = 1080 + 3 + 12 + 17, 1373 - .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 1374 - }; 1375 - 1376 - static const struct panel_desc ivo_m133nwf4_r0 = { 1377 - .modes = &ivo_m133nwf4_r0_mode, 1378 - .num_modes = 1, 1379 - .bpc = 8, 1380 - .size = { 1381 - .width = 294, 1382 - .height = 165, 1383 - }, 1384 - .delay = { 1385 - .hpd_absent = 200, 1386 - .unprepare = 500, 1387 - }, 1388 - }; 1389 - 1390 1417 static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = { 1391 1418 .clock = 81000, 1392 1419 .hdisplay = 1366, ··· 1608 1689 }, 1609 1690 }; 1610 1691 1611 - static const struct drm_display_mode sharp_lq140m1jw46_mode[] = { 1612 - { 1613 - .clock = 346500, 1614 - .hdisplay = 1920, 1615 - .hsync_start = 1920 + 48, 1616 - .hsync_end = 1920 + 48 + 32, 1617 - .htotal = 1920 + 48 + 32 + 80, 1618 - .vdisplay = 1080, 1619 - .vsync_start = 1080 + 3, 1620 - .vsync_end = 1080 + 3 + 5, 1621 - .vtotal = 1080 + 3 + 5 + 69, 1622 - .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, 1623 - }, { 1624 - .clock = 144370, 1625 - .hdisplay = 1920, 1626 - .hsync_start = 1920 + 48, 1627 - .hsync_end = 1920 + 48 + 32, 1628 - .htotal = 1920 + 48 + 32 + 80, 1629 - .vdisplay = 1080, 1630 - .vsync_start = 1080 + 3, 1631 - .vsync_end = 1080 + 3 + 5, 1632 - .vtotal = 1080 + 3 + 5 + 69, 1633 - .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, 1634 - }, 1635 - }; 1636 - 1637 - static const struct panel_desc sharp_lq140m1jw46 = { 1638 - .modes = sharp_lq140m1jw46_mode, 1639 - .num_modes = ARRAY_SIZE(sharp_lq140m1jw46_mode), 1640 - .bpc = 8, 1641 - .size = { 1642 - .width = 309, 1643 - .height = 174, 1644 - }, 1645 - .delay = { 1646 - .hpd_absent = 80, 1647 - .enable = 50, 1648 - .unprepare = 500, 1649 - }, 1650 - }; 1651 - 1652 - static const struct drm_display_mode starry_kr122ea0sra_mode = { 1653 - .clock = 147000, 1654 - .hdisplay = 1920, 1655 - .hsync_start = 1920 + 16, 1656 - .hsync_end = 1920 + 16 + 16, 1657 - .htotal = 1920 + 16 + 16 + 32, 1658 - .vdisplay = 1200, 1659 - .vsync_start = 1200 + 15, 1660 - .vsync_end = 1200 + 15 + 2, 1661 - .vtotal = 1200 + 15 + 2 + 18, 1662 - .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, 1663 - }; 1664 - 1665 - static const struct panel_desc starry_kr122ea0sra = { 1666 - .modes = &starry_kr122ea0sra_mode, 1667 - .num_modes = 1, 1668 - .size = { 1669 - .width = 263, 1670 - .height = 164, 1671 - }, 1672 - .delay = { 1673 - /* TODO: should be hpd-absent and no-hpd should be set? */ 1674 - .hpd_reliable = 10 + 200, 1675 - .enable = 50, 1676 - .unprepare = 10 + 500, 1677 - }, 1678 - }; 1679 - 1680 1692 static const struct of_device_id platform_of_match[] = { 1681 1693 { 1682 1694 /* Must be first */ 1683 1695 .compatible = "edp-panel", 1684 - }, { 1696 + }, 1697 + /* 1698 + * Do not add panels to the list below unless they cannot be handled by 1699 + * the generic edp-panel compatible. 1700 + * 1701 + * The only two valid reasons are: 1702 + * - Because of the panel issues (e.g. broken EDID or broken 1703 + * identification). 1704 + * - Because the eDP drivers didn't wire up the AUX bus properly. 1705 + * NOTE that, though this is a marginally valid reason, 1706 + * some justification needs to be made for why the platform can't 1707 + * wire up the AUX bus properly. 1708 + * 1709 + * In all other cases the platform should use the aux-bus and declare 1710 + * the panel using the 'edp-panel' compatible as a device on the AUX 1711 + * bus. 1712 + */ 1713 + { 1685 1714 .compatible = "auo,b101ean01", 1686 1715 .data = &auo_b101ean01, 1687 1716 }, { 1688 1717 .compatible = "auo,b116xa01", 1689 1718 .data = &auo_b116xak01, 1690 1719 }, { 1691 - .compatible = "auo,b133han05", 1692 - .data = &auo_b133han05, 1693 - }, { 1694 1720 .compatible = "auo,b133htn01", 1695 1721 .data = &auo_b133htn01, 1696 1722 }, { 1697 1723 .compatible = "auo,b133xtn01", 1698 1724 .data = &auo_b133xtn01, 1699 - }, { 1700 - .compatible = "auo,b140han06", 1701 - .data = &auo_b140han06, 1702 1725 }, { 1703 1726 .compatible = "boe,nv101wxmn51", 1704 1727 .data = &boe_nv101wxmn51, ··· 1668 1807 }, { 1669 1808 .compatible = "innolux,p120zdg-bf1", 1670 1809 .data = &innolux_p120zdg_bf1, 1671 - }, { 1672 - .compatible = "ivo,m133nwf4-r0", 1673 - .data = &ivo_m133nwf4_r0, 1674 1810 }, { 1675 1811 .compatible = "kingdisplay,kd116n21-30nv-a010", 1676 1812 .data = &kingdisplay_kd116n21_30nv_a010, ··· 1698 1840 }, { 1699 1841 .compatible = "sharp,lq123p1jx31", 1700 1842 .data = &sharp_lq123p1jx31, 1701 - }, { 1702 - .compatible = "sharp,lq140m1jw46", 1703 - .data = &sharp_lq140m1jw46, 1704 - }, { 1705 - .compatible = "starry,kr122ea0sra", 1706 - .data = &starry_kr122ea0sra, 1707 1843 }, { 1708 1844 /* sentinel */ 1709 1845 } ··· 1747 1895 .unprepare = 500, 1748 1896 .enable = 80, 1749 1897 .disable = 50, 1898 + }; 1899 + 1900 + static const struct panel_delay delay_80_500_e50 = { 1901 + .hpd_absent = 80, 1902 + .unprepare = 500, 1903 + .enable = 50, 1750 1904 }; 1751 1905 1752 1906 static const struct panel_delay delay_100_500_e200 = { ··· 1963 2105 EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"), 1964 2106 1965 2107 EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), 1966 - EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), 2108 + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"), 1967 2109 EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"), 1968 2110 EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), 1969 2111
+10
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 777 777 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 778 778 }; 779 779 780 + /* MT8188 uses the same power domains and power supplies as MT8183 */ 781 + static const struct panfrost_compatible mediatek_mt8188_data = { 782 + .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1, 783 + .supply_names = mediatek_mt8183_b_supplies, 784 + .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), 785 + .pm_domain_names = mediatek_mt8183_pm_domains, 786 + .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 787 + }; 788 + 780 789 static const char * const mediatek_mt8192_supplies[] = { "mali", NULL }; 781 790 static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2", 782 791 "core3", "core4" }; ··· 817 808 { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data }, 818 809 { .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data }, 819 810 { .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data }, 811 + { .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data }, 820 812 { .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data }, 821 813 {} 822 814 };
+13 -1
drivers/gpu/drm/tidss/tidss_plane.c
··· 8 8 #include <drm/drm_atomic_helper.h> 9 9 #include <drm/drm_blend.h> 10 10 #include <drm/drm_crtc.h> 11 + #include <drm/drm_fb_dma_helper.h> 11 12 #include <drm/drm_fourcc.h> 12 13 #include <drm/drm_framebuffer.h> 13 14 #include <drm/drm_gem_atomic_helper.h> ··· 167 166 .atomic_disable = tidss_plane_atomic_disable, 168 167 }; 169 168 169 + static const struct drm_plane_helper_funcs tidss_primary_plane_helper_funcs = { 170 + .atomic_check = tidss_plane_atomic_check, 171 + .atomic_update = tidss_plane_atomic_update, 172 + .atomic_enable = tidss_plane_atomic_enable, 173 + .atomic_disable = tidss_plane_atomic_disable, 174 + .get_scanout_buffer = drm_fb_dma_get_scanout_buffer, 175 + }; 176 + 170 177 static const struct drm_plane_funcs tidss_plane_funcs = { 171 178 .update_plane = drm_atomic_helper_update_plane, 172 179 .disable_plane = drm_atomic_helper_disable_plane, ··· 220 211 if (ret < 0) 221 212 goto err; 222 213 223 - drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); 214 + if (type == DRM_PLANE_TYPE_PRIMARY) 215 + drm_plane_helper_add(&tplane->plane, &tidss_primary_plane_helper_funcs); 216 + else 217 + drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); 224 218 225 219 drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0, 226 220 num_planes - 1);
+18 -19
drivers/gpu/drm/xlnx/zynqmp_dp.c
··· 256 256 * @fmt: format identifier string 257 257 */ 258 258 struct zynqmp_dp_mode { 259 + const char *fmt; 260 + int pclock; 259 261 u8 bw_code; 260 262 u8 lane_cnt; 261 - int pclock; 262 - const char *fmt; 263 263 }; 264 264 265 265 /** ··· 296 296 * @train_set: set of training data 297 297 */ 298 298 struct zynqmp_dp { 299 + struct drm_dp_aux aux; 300 + struct drm_bridge bridge; 301 + struct work_struct hpd_work; 302 + 303 + struct drm_bridge *next_bridge; 299 304 struct device *dev; 300 305 struct zynqmp_dpsub *dpsub; 301 306 void __iomem *iomem; 302 307 struct reset_control *reset; 303 - int irq; 304 - 305 - struct drm_bridge bridge; 306 - struct drm_bridge *next_bridge; 307 - 308 - struct zynqmp_dp_config config; 309 - struct drm_dp_aux aux; 310 308 struct phy *phy[ZYNQMP_DP_MAX_LANES]; 311 - u8 num_lanes; 312 - struct delayed_work hpd_work; 309 + 313 310 enum drm_connector_status status; 311 + int irq; 314 312 bool enabled; 315 313 316 - u8 dpcd[DP_RECEIVER_CAP_SIZE]; 317 - struct zynqmp_dp_link_config link_config; 318 314 struct zynqmp_dp_mode mode; 315 + struct zynqmp_dp_link_config link_config; 316 + struct zynqmp_dp_config config; 317 + u8 dpcd[DP_RECEIVER_CAP_SIZE]; 319 318 u8 train_set[ZYNQMP_DP_MAX_LANES]; 319 + u8 num_lanes; 320 320 }; 321 321 322 322 static inline struct zynqmp_dp *bridge_to_dp(struct drm_bridge *bridge) ··· 1482 1482 struct zynqmp_dp *dp = bridge_to_dp(bridge); 1483 1483 1484 1484 dp->enabled = false; 1485 - cancel_delayed_work(&dp->hpd_work); 1485 + cancel_work(&dp->hpd_work); 1486 1486 zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0); 1487 1487 drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3); 1488 1488 zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN, ··· 1648 1648 1649 1649 static void zynqmp_dp_hpd_work_func(struct work_struct *work) 1650 1650 { 1651 - struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, 1652 - hpd_work.work); 1651 + struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work); 1653 1652 enum drm_connector_status status; 1654 1653 1655 1654 status = zynqmp_dp_bridge_detect(&dp->bridge); ··· 1684 1685 zynqmp_dpsub_drm_handle_vblank(dp->dpsub); 1685 1686 1686 1687 if (status & ZYNQMP_DP_INT_HPD_EVENT) 1687 - schedule_delayed_work(&dp->hpd_work, 0); 1688 + schedule_work(&dp->hpd_work); 1688 1689 1689 1690 if (status & ZYNQMP_DP_INT_HPD_IRQ) { 1690 1691 int ret; ··· 1726 1727 dp->dpsub = dpsub; 1727 1728 dp->status = connector_status_disconnected; 1728 1729 1729 - INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func); 1730 + INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func); 1730 1731 1731 1732 /* Acquire all resources (IOMEM, IRQ and PHYs). */ 1732 1733 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp"); ··· 1831 1832 zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL); 1832 1833 disable_irq(dp->irq); 1833 1834 1834 - cancel_delayed_work_sync(&dp->hpd_work); 1835 + cancel_work_sync(&dp->hpd_work); 1835 1836 1836 1837 zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0); 1837 1838 zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
+1
drivers/gpu/drm/xlnx/zynqmp_dpsub.c
··· 269 269 return 0; 270 270 271 271 err_disp: 272 + drm_bridge_remove(dpsub->bridge); 272 273 zynqmp_disp_remove(dpsub); 273 274 err_dp: 274 275 zynqmp_dp_remove(dpsub);
+16 -6
drivers/gpu/drm/xlnx/zynqmp_kms.c
··· 120 120 zynqmp_disp_blend_set_global_alpha(dpsub->disp, true, 121 121 plane->state->alpha >> 8); 122 122 123 - /* Enable or re-enable the plane if the format has changed. */ 124 - if (format_changed) 125 - zynqmp_disp_layer_enable(layer); 123 + /* 124 + * Unconditionally enable the layer, as it may have been disabled 125 + * previously either explicitly to reconfigure layer format, or 126 + * implicitly after DPSUB reset during display mode change. DRM 127 + * framework calls this callback for enabled planes only. 128 + */ 129 + zynqmp_disp_layer_enable(layer); 126 130 } 127 131 128 132 static const struct drm_plane_helper_funcs zynqmp_dpsub_plane_helper_funcs = { ··· 437 433 DRM_BRIDGE_ATTACH_NO_CONNECTOR); 438 434 if (ret) { 439 435 dev_err(dpsub->dev, "failed to attach bridge to encoder\n"); 440 - return ret; 436 + goto err_encoder; 441 437 } 442 438 443 439 /* Create the connector for the chain of bridges. */ 444 440 connector = drm_bridge_connector_init(&dpsub->drm->dev, encoder); 445 441 if (IS_ERR(connector)) { 446 442 dev_err(dpsub->dev, "failed to created connector\n"); 447 - return PTR_ERR(connector); 443 + ret = PTR_ERR(connector); 444 + goto err_encoder; 448 445 } 449 446 450 447 ret = drm_connector_attach_encoder(connector, encoder); 451 448 if (ret < 0) { 452 449 dev_err(dpsub->dev, "failed to attach connector to encoder\n"); 453 - return ret; 450 + goto err_encoder; 454 451 } 455 452 456 453 return 0; 454 + 455 + err_encoder: 456 + drm_encoder_cleanup(encoder); 457 + return ret; 457 458 } 458 459 459 460 static void zynqmp_dpsub_drm_release(struct drm_device *drm, void *res) ··· 538 529 539 530 drm_dev_unregister(drm); 540 531 drm_atomic_helper_shutdown(drm); 532 + drm_encoder_cleanup(&dpsub->drm->encoder); 541 533 drm_kms_helper_poll_fini(drm); 542 534 }
+115 -98
include/drm/drm_connector.h
··· 202 202 DRM_MODE_TV_MODE_SECAM, 203 203 204 204 /** 205 + * @DRM_MODE_TV_MODE_MONOCHROME: Use timings appropriate to 206 + * the DRM mode, including equalizing pulses for a 525-line 207 + * or 625-line mode, with no pedestal or color encoding. 208 + */ 209 + DRM_MODE_TV_MODE_MONOCHROME, 210 + 211 + /** 205 212 * @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes. 206 213 * 207 214 * Internal implementation detail; this is not uABI. ··· 936 929 bool set; 937 930 }; 938 931 932 + /* 933 + * struct drm_connector_hdmi_state - HDMI state container 934 + */ 935 + struct drm_connector_hdmi_state { 936 + /** 937 + * @broadcast_rgb: Connector property to pass the 938 + * Broadcast RGB selection value. 939 + */ 940 + enum drm_hdmi_broadcast_rgb broadcast_rgb; 941 + 942 + /** 943 + * @infoframes: HDMI Infoframes matching that state 944 + */ 945 + struct { 946 + /** 947 + * @avi: AVI Infoframes structure matching our 948 + * state. 949 + */ 950 + struct drm_connector_hdmi_infoframe avi; 951 + 952 + /** 953 + * @hdr_drm: DRM (Dynamic Range and Mastering) 954 + * Infoframes structure matching our state. 955 + */ 956 + struct drm_connector_hdmi_infoframe hdr_drm; 957 + 958 + /** 959 + * @spd: SPD Infoframes structure matching our 960 + * state. 961 + */ 962 + struct drm_connector_hdmi_infoframe spd; 963 + 964 + /** 965 + * @vendor: HDMI Vendor Infoframes structure 966 + * matching our state. 967 + */ 968 + struct drm_connector_hdmi_infoframe hdmi; 969 + } infoframes; 970 + 971 + /** 972 + * @is_limited_range: Is the output supposed to use a limited 973 + * RGB Quantization Range or not? 974 + */ 975 + bool is_limited_range; 976 + 977 + /** 978 + * @output_bpc: Bits per color channel to output. 979 + */ 980 + unsigned int output_bpc; 981 + 982 + /** 983 + * @output_format: Pixel format to output in. 984 + */ 985 + enum hdmi_colorspace output_format; 986 + 987 + /** 988 + * @tmds_char_rate: TMDS Character Rate, in Hz. 989 + */ 990 + unsigned long long tmds_char_rate; 991 + }; 992 + 939 993 /** 940 994 * struct drm_connector_state - mutable connector state 941 995 */ ··· 1146 1078 * @hdmi: HDMI-related variable and properties. Filled by 1147 1079 * @drm_atomic_helper_connector_hdmi_check(). 1148 1080 */ 1149 - struct { 1150 - /** 1151 - * @broadcast_rgb: Connector property to pass the 1152 - * Broadcast RGB selection value. 1153 - */ 1154 - enum drm_hdmi_broadcast_rgb broadcast_rgb; 1155 - 1156 - /** 1157 - * @infoframes: HDMI Infoframes matching that state 1158 - */ 1159 - struct { 1160 - /** 1161 - * @avi: AVI Infoframes structure matching our 1162 - * state. 1163 - */ 1164 - struct drm_connector_hdmi_infoframe avi; 1165 - 1166 - /** 1167 - * @hdr_drm: DRM (Dynamic Range and Mastering) 1168 - * Infoframes structure matching our state. 1169 - */ 1170 - struct drm_connector_hdmi_infoframe hdr_drm; 1171 - 1172 - /** 1173 - * @spd: SPD Infoframes structure matching our 1174 - * state. 1175 - */ 1176 - struct drm_connector_hdmi_infoframe spd; 1177 - 1178 - /** 1179 - * @vendor: HDMI Vendor Infoframes structure 1180 - * matching our state. 1181 - */ 1182 - struct drm_connector_hdmi_infoframe hdmi; 1183 - } infoframes; 1184 - 1185 - /** 1186 - * @is_limited_range: Is the output supposed to use a limited 1187 - * RGB Quantization Range or not? 1188 - */ 1189 - bool is_limited_range; 1190 - 1191 - /** 1192 - * @output_bpc: Bits per color channel to output. 1193 - */ 1194 - unsigned int output_bpc; 1195 - 1196 - /** 1197 - * @output_format: Pixel format to output in. 1198 - */ 1199 - enum hdmi_colorspace output_format; 1200 - 1201 - /** 1202 - * @tmds_char_rate: TMDS Character Rate, in Hz. 1203 - */ 1204 - unsigned long long tmds_char_rate; 1205 - } hdmi; 1081 + struct drm_connector_hdmi_state hdmi; 1206 1082 }; 1207 1083 1208 1084 /** ··· 1668 1656 bool tv_mode_specified; 1669 1657 }; 1670 1658 1659 + /* 1660 + * struct drm_connector_hdmi - DRM Connector HDMI-related structure 1661 + */ 1662 + struct drm_connector_hdmi { 1663 + #define DRM_CONNECTOR_HDMI_VENDOR_LEN 8 1664 + /** 1665 + * @vendor: HDMI Controller Vendor Name 1666 + */ 1667 + unsigned char vendor[DRM_CONNECTOR_HDMI_VENDOR_LEN] __nonstring; 1668 + 1669 + #define DRM_CONNECTOR_HDMI_PRODUCT_LEN 16 1670 + /** 1671 + * @product: HDMI Controller Product Name 1672 + */ 1673 + unsigned char product[DRM_CONNECTOR_HDMI_PRODUCT_LEN] __nonstring; 1674 + 1675 + /** 1676 + * @supported_formats: Bitmask of @hdmi_colorspace 1677 + * supported by the controller. 1678 + */ 1679 + unsigned long supported_formats; 1680 + 1681 + /** 1682 + * @funcs: HDMI connector Control Functions 1683 + */ 1684 + const struct drm_connector_hdmi_funcs *funcs; 1685 + 1686 + /** 1687 + * @infoframes: Current Infoframes output by the connector 1688 + */ 1689 + struct { 1690 + /** 1691 + * @lock: Mutex protecting against concurrent access to 1692 + * the infoframes, most notably between KMS and ALSA. 1693 + */ 1694 + struct mutex lock; 1695 + 1696 + /** 1697 + * @audio: Current Audio Infoframes structure. Protected 1698 + * by @lock. 1699 + */ 1700 + struct drm_connector_hdmi_infoframe audio; 1701 + } infoframes; 1702 + }; 1703 + 1671 1704 /** 1672 1705 * struct drm_connector - central DRM connector control structure 1673 1706 * ··· 2125 2068 /** 2126 2069 * @hdmi: HDMI-related variable and properties. 2127 2070 */ 2128 - struct { 2129 - #define DRM_CONNECTOR_HDMI_VENDOR_LEN 8 2130 - /** 2131 - * @vendor: HDMI Controller Vendor Name 2132 - */ 2133 - unsigned char vendor[DRM_CONNECTOR_HDMI_VENDOR_LEN] __nonstring; 2134 - 2135 - #define DRM_CONNECTOR_HDMI_PRODUCT_LEN 16 2136 - /** 2137 - * @product: HDMI Controller Product Name 2138 - */ 2139 - unsigned char product[DRM_CONNECTOR_HDMI_PRODUCT_LEN] __nonstring; 2140 - 2141 - /** 2142 - * @supported_formats: Bitmask of @hdmi_colorspace 2143 - * supported by the controller. 2144 - */ 2145 - unsigned long supported_formats; 2146 - 2147 - /** 2148 - * @funcs: HDMI connector Control Functions 2149 - */ 2150 - const struct drm_connector_hdmi_funcs *funcs; 2151 - 2152 - /** 2153 - * @infoframes: Current Infoframes output by the connector 2154 - */ 2155 - struct { 2156 - /** 2157 - * @lock: Mutex protecting against concurrent access to 2158 - * the infoframes, most notably between KMS and ALSA. 2159 - */ 2160 - struct mutex lock; 2161 - 2162 - /** 2163 - * @audio: Current Audio Infoframes structure. Protected 2164 - * by @lock. 2165 - */ 2166 - struct drm_connector_hdmi_infoframe audio; 2167 - } infoframes; 2168 - } hdmi; 2071 + struct drm_connector_hdmi hdmi; 2169 2072 }; 2170 2073 2171 2074 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
+2 -2
include/linux/dma-heap.h
··· 23 23 struct dma_heap_ops { 24 24 struct dma_buf *(*allocate)(struct dma_heap *heap, 25 25 unsigned long len, 26 - unsigned long fd_flags, 27 - unsigned long heap_flags); 26 + u32 fd_flags, 27 + u64 heap_flags); 28 28 }; 29 29 30 30 /**