Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2025-07-17' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for 6.17:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:

- mode_config: Change fb_create prototype to pass the drm_format_info
and avoid redundant lookups in drivers
- sched: kunit improvements, memory leak fixes, reset handling
improvements
- tests: kunit EDID update

Driver Changes:

- amdgpu: Hibernation fixes, structure lifetime fixes
- nouveau: sched improvements
- sitronix: Add Sitronix ST7567 Support

- bridge:
- Make connector available to bridge detect hook

- panel:
- More refcounting changes
- New panels: BOE NE14QDM

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://lore.kernel.org/r/20250717-efficient-kudu-of-fantasy-ff95e0@houat

+1106 -646
+2
Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
··· 19 19 - const: samsung,atna33xc20 20 20 - items: 21 21 - enum: 22 + # Samsung 13" 3K (2880×1920 pixels) eDP AMOLED panel 23 + - samsung,atna30dw01 22 24 # Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel 23 25 - samsung,atna40yk20 24 26 # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
-6
Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
··· 58 58 power-domains: 59 59 maxItems: 1 60 60 61 - "#address-cells": 62 - const: 1 63 - 64 - "#size-cells": 65 - const: 0 66 - 67 61 required: 68 62 - compatible 69 63 - clocks
+63
Documentation/devicetree/bindings/display/sitronix,st7567.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/sitronix,st7567.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Sitronix ST7567 Display Controller 8 + 9 + maintainers: 10 + - Javier Martinez Canillas <javierm@redhat.com> 11 + 12 + description: 13 + Sitronix ST7567 is a driver and controller for monochrome 14 + dot matrix LCD panels. 15 + 16 + allOf: 17 + - $ref: panel/panel-common.yaml# 18 + 19 + properties: 20 + compatible: 21 + const: sitronix,st7567 22 + 23 + reg: 24 + maxItems: 1 25 + 26 + width-mm: true 27 + height-mm: true 28 + panel-timing: true 29 + 30 + required: 31 + - compatible 32 + - reg 33 + - width-mm 34 + - height-mm 35 + - panel-timing 36 + 37 + additionalProperties: false 38 + 39 + examples: 40 + - | 41 + i2c { 42 + #address-cells = <1>; 43 + #size-cells = <0>; 44 + 45 + display@3f { 46 + compatible = "sitronix,st7567"; 47 + reg = <0x3f>; 48 + width-mm = <37>; 49 + height-mm = <27>; 50 + 51 + panel-timing { 52 + hactive = <128>; 53 + vactive = <64>; 54 + hback-porch = <0>; 55 + vback-porch = <0>; 56 + clock-frequency = <0>; 57 + hfront-porch = <0>; 58 + hsync-len = <0>; 59 + vfront-porch = <0>; 60 + vsync-len = <0>; 61 + }; 62 + }; 63 + };
+3 -3
Documentation/gpu/drm-uapi.rst
··· 447 447 complete wedging. 448 448 449 449 Task information 450 - --------------- 450 + ---------------- 451 451 452 452 The information about which application (if any) was involved in the device 453 453 wedging is useful for userspace if they want to notify the user about what ··· 460 460 461 461 The reliability of this information is driver and hardware specific, and should 462 462 be taken with a caution regarding it's precision. To have a big picture of what 463 - really happened, the devcoredump file provides should have much more detailed 464 - information about the device state and about the event. 463 + really happened, the devcoredump file provides much more detailed information 464 + about the device state and about the event. 465 465 466 466 Consumer prerequisites 467 467 ----------------------
+1
MAINTAINERS
··· 7835 7835 DRM DRIVER FOR SITRONIX ST7571 PANELS 7836 7836 M: Marcus Folkesson <marcus.folkesson@gmail.com> 7837 7837 S: Maintained 7838 + F: Documentation/devicetree/bindings/display/sitronix,st7567.yaml 7838 7839 F: Documentation/devicetree/bindings/display/sitronix,st7571.yaml 7839 7840 F: drivers/gpu/drm/sitronix/st7571-i2c.c 7840 7841
+1 -1
drivers/accel/amdxdna/aie2_ctx.c
··· 361 361 aie2_hwctx_restart(xdna, hwctx); 362 362 mutex_unlock(&xdna->dev_lock); 363 363 364 - return DRM_GPU_SCHED_STAT_NOMINAL; 364 + return DRM_GPU_SCHED_STAT_RESET; 365 365 } 366 366 367 367 static const struct drm_sched_backend_ops sched_ops = {
+14
drivers/base/power/main.c
··· 66 66 static DEFINE_MUTEX(async_wip_mtx); 67 67 static int async_error; 68 68 69 + /** 70 + * pm_hibernate_is_recovering - if recovering from hibernate due to error. 71 + * 72 + * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or 73 + * recovering from some error. 74 + * 75 + * Return: true for error case, false for normal case. 76 + */ 77 + bool pm_hibernate_is_recovering(void) 78 + { 79 + return pm_transition.event == PM_EVENT_RECOVER; 80 + } 81 + EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering); 82 + 69 83 static const char *pm_verb(int event) 70 84 { 71 85 switch (event) {
+9 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5021 5021 return 0; 5022 5022 5023 5023 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 5024 - if (ret) 5024 + if (ret) { 5025 5025 dev_warn(adev->dev, "evicting device resources failed\n"); 5026 + return ret; 5027 + } 5028 + 5029 + if (adev->in_s4) { 5030 + ret = ttm_device_prepare_hibernation(&adev->mman.bdev); 5031 + if (ret) 5032 + dev_err(adev->dev, "prepare hibernation failed, %d\n", ret); 5033 + } 5026 5034 return ret; 5027 5035 } 5028 5036
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 1196 1196 static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, 1197 1197 struct amdgpu_framebuffer *rfb, 1198 1198 struct drm_file *file_priv, 1199 + const struct drm_format_info *info, 1199 1200 const struct drm_mode_fb_cmd2 *mode_cmd, 1200 1201 struct drm_gem_object *obj) 1201 1202 { 1202 1203 int ret; 1203 1204 1204 1205 rfb->base.obj[0] = obj; 1205 - drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); 1206 + drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd); 1206 1207 /* Verify that the modifier is supported. */ 1207 1208 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, 1208 1209 mode_cmd->modifier[0])) { ··· 1298 1297 struct drm_framebuffer * 1299 1298 amdgpu_display_user_framebuffer_create(struct drm_device *dev, 1300 1299 struct drm_file *file_priv, 1300 + const struct drm_format_info *info, 1301 1301 const struct drm_mode_fb_cmd2 *mode_cmd) 1302 1302 { 1303 1303 struct amdgpu_framebuffer *amdgpu_fb; ··· 1332 1330 } 1333 1331 1334 1332 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, 1335 - mode_cmd, obj); 1333 + info, mode_cmd, obj); 1336 1334 if (ret) { 1337 1335 kfree(amdgpu_fb); 1338 1336 drm_gem_object_put(obj);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
··· 44 44 struct drm_framebuffer * 45 45 amdgpu_display_user_framebuffer_create(struct drm_device *dev, 46 46 struct drm_file *file_priv, 47 + const struct drm_format_info *info, 47 48 const struct drm_mode_fb_cmd2 *mode_cmd); 48 49 const struct drm_format_info * 49 50 amdgpu_lookup_format_info(u32 format, uint64_t modifier);
+17
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2541 2541 if (amdgpu_ras_intr_triggered()) 2542 2542 return; 2543 2543 2544 + /* device maybe not resumed here, return immediately in this case */ 2545 + if (adev->in_s4 && adev->in_suspend) 2546 + return; 2547 + 2544 2548 /* if we are running in a VM, make sure the device 2545 2549 * torn down properly on reboot/shutdown. 2546 2550 * unfortunately we can't detect certain ··· 2560 2556 { 2561 2557 struct drm_device *drm_dev = dev_get_drvdata(dev); 2562 2558 struct amdgpu_device *adev = drm_to_adev(drm_dev); 2559 + 2560 + /* device maybe not resumed here, return immediately in this case */ 2561 + if (adev->in_s4 && adev->in_suspend) 2562 + return 0; 2563 2563 2564 2564 /* Return a positive number here so 2565 2565 * DPM_FLAG_SMART_SUSPEND works properly ··· 2663 2655 { 2664 2656 struct drm_device *drm_dev = dev_get_drvdata(dev); 2665 2657 2658 + /* do not resume device if it's normal hibernation */ 2659 + if (!pm_hibernate_is_recovering()) 2660 + return 0; 2661 + 2666 2662 return amdgpu_device_resume(drm_dev, true); 2667 2663 } 2668 2664 2669 2665 static int amdgpu_pmops_poweroff(struct device *dev) 2670 2666 { 2671 2667 struct drm_device *drm_dev = dev_get_drvdata(dev); 2668 + struct amdgpu_device *adev = drm_to_adev(drm_dev); 2669 + 2670 + /* device maybe not resumed here, return immediately in this case */ 2671 + if (adev->in_s4 && adev->in_suspend) 2672 + return 0; 2672 2673 2673 2674 return amdgpu_device_suspend(drm_dev, true); 2674 2675 }
+3 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 90 90 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); 91 91 struct amdgpu_job *job = to_amdgpu_job(s_job); 92 92 struct drm_wedge_task_info *info = NULL; 93 - struct amdgpu_task_info *ti; 93 + struct amdgpu_task_info *ti = NULL; 94 94 struct amdgpu_device *adev = ring->adev; 95 95 int idx, r; 96 96 ··· 148 148 149 149 dma_fence_set_error(&s_job->s_fence->finished, -ETIME); 150 150 151 - amdgpu_vm_put_task_info(ti); 152 - 153 151 if (amdgpu_device_should_recover_gpu(ring->adev)) { 154 152 struct amdgpu_reset_context reset_context; 155 153 memset(&reset_context, 0, sizeof(reset_context)); ··· 173 175 } 174 176 175 177 exit: 178 + amdgpu_vm_put_task_info(ti); 176 179 drm_dev_exit(idx); 177 - return DRM_GPU_SCHED_STAT_NOMINAL; 180 + return DRM_GPU_SCHED_STAT_RESET; 178 181 } 179 182 180 183 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 521 521 } 522 522 523 523 queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); 524 - if (!queue_name) 525 - return -ENOMEM; 524 + if (!queue_name) { 525 + r = -ENOMEM; 526 + goto unlock; 527 + } 526 528 527 529 #if defined(CONFIG_DEBUG_FS) 528 530 /* Queue dentry per client to hold MQD information */
+2 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 92 92 MICRO_SWIZZLE_R = 3 93 93 }; 94 94 95 - const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 95 + const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier) 96 96 { 97 - return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 97 + return amdgpu_lookup_format_info(pixel_format, modifier); 98 98 } 99 99 100 100 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
··· 58 58 unsigned long possible_crtcs, 59 59 const struct dc_plane_cap *plane_cap); 60 60 61 - const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 61 + const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier); 62 62 63 63 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 64 64 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
+2 -1
drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
··· 157 157 158 158 struct drm_framebuffer * 159 159 komeda_fb_create(struct drm_device *dev, struct drm_file *file, 160 + const struct drm_format_info *info, 160 161 const struct drm_mode_fb_cmd2 *mode_cmd) 161 162 { 162 163 struct komeda_dev *mdev = dev->dev_private; ··· 178 177 return ERR_PTR(-EINVAL); 179 178 } 180 179 181 - drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd); 180 + drm_helper_mode_fill_fb_struct(dev, &kfb->base, info, mode_cmd); 182 181 183 182 if (kfb->base.modifier) 184 183 ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
+1
drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
··· 37 37 38 38 struct drm_framebuffer * 39 39 komeda_fb_create(struct drm_device *dev, struct drm_file *file, 40 + const struct drm_format_info *info, 40 41 const struct drm_mode_fb_cmd2 *mode_cmd); 41 42 int komeda_fb_check_src_coords(const struct komeda_fb *kfb, 42 43 u32 src_x, u32 src_y, u32 src_w, u32 src_h);
+6 -6
drivers/gpu/drm/arm/malidp_drv.c
··· 306 306 static bool 307 307 malidp_verify_afbc_framebuffer_size(struct drm_device *dev, 308 308 struct drm_file *file, 309 + const struct drm_format_info *info, 309 310 const struct drm_mode_fb_cmd2 *mode_cmd) 310 311 { 311 312 int n_superblocks = 0; 312 - const struct drm_format_info *info; 313 313 struct drm_gem_object *objs = NULL; 314 314 u32 afbc_superblock_size = 0, afbc_superblock_height = 0; 315 315 u32 afbc_superblock_width = 0, afbc_size = 0; ··· 324 324 DRM_DEBUG_KMS("AFBC superblock size is not supported\n"); 325 325 return false; 326 326 } 327 - 328 - info = drm_get_format_info(dev, mode_cmd); 329 327 330 328 n_superblocks = (mode_cmd->width / afbc_superblock_width) * 331 329 (mode_cmd->height / afbc_superblock_height); ··· 364 366 365 367 static bool 366 368 malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, 369 + const struct drm_format_info *info, 367 370 const struct drm_mode_fb_cmd2 *mode_cmd) 368 371 { 369 372 if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd)) 370 - return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd); 373 + return malidp_verify_afbc_framebuffer_size(dev, file, info, mode_cmd); 371 374 372 375 return false; 373 376 } 374 377 375 378 static struct drm_framebuffer * 376 379 malidp_fb_create(struct drm_device *dev, struct drm_file *file, 380 + const struct drm_format_info *info, 377 381 const struct drm_mode_fb_cmd2 *mode_cmd) 378 382 { 379 383 if (mode_cmd->modifier[0]) { 380 - if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd)) 384 + if (!malidp_verify_afbc_framebuffer(dev, file, info, mode_cmd)) 381 385 return ERR_PTR(-EINVAL); 382 386 } 383 387 384 - return drm_gem_fb_create(dev, file, mode_cmd); 388 + return drm_gem_fb_create(dev, file, info, mode_cmd); 385 389 } 386 390 387 391 static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
+7 -5
drivers/gpu/drm/armada/armada_fb.c
··· 18 18 }; 19 19 20 20 struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, 21 - const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) 21 + const struct drm_format_info *info, 22 + const struct drm_mode_fb_cmd2 *mode, 23 + struct armada_gem_object *obj) 22 24 { 23 25 struct armada_framebuffer *dfb; 24 26 uint8_t format, config; ··· 66 64 dfb->mod = config; 67 65 dfb->fb.obj[0] = &obj->obj; 68 66 69 - drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode); 67 + drm_helper_mode_fill_fb_struct(dev, &dfb->fb, info, mode); 70 68 71 69 ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs); 72 70 if (ret) { ··· 86 84 } 87 85 88 86 struct drm_framebuffer *armada_fb_create(struct drm_device *dev, 89 - struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode) 87 + struct drm_file *dfile, const struct drm_format_info *info, 88 + const struct drm_mode_fb_cmd2 *mode) 90 89 { 91 - const struct drm_format_info *info = drm_get_format_info(dev, mode); 92 90 struct armada_gem_object *obj; 93 91 struct armada_framebuffer *dfb; 94 92 int ret; ··· 124 122 goto err_unref; 125 123 } 126 124 127 - dfb = armada_framebuffer_create(dev, mode, obj); 125 + dfb = armada_framebuffer_create(dev, info, mode, obj); 128 126 if (IS_ERR(dfb)) { 129 127 ret = PTR_ERR(dfb); 130 128 goto err;
+3 -1
drivers/gpu/drm/armada/armada_fb.h
··· 17 17 #define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0]) 18 18 19 19 struct armada_framebuffer *armada_framebuffer_create(struct drm_device *, 20 + const struct drm_format_info *info, 20 21 const struct drm_mode_fb_cmd2 *, struct armada_gem_object *); 21 22 struct drm_framebuffer *armada_fb_create(struct drm_device *dev, 22 - struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode); 23 + struct drm_file *dfile, const struct drm_format_info *info, 24 + const struct drm_mode_fb_cmd2 *mode); 23 25 #endif
+4 -1
drivers/gpu/drm/armada/armada_fbdev.c
··· 78 78 return -ENOMEM; 79 79 } 80 80 81 - dfb = armada_framebuffer_create(dev, &mode, obj); 81 + dfb = armada_framebuffer_create(dev, 82 + drm_get_format_info(dev, mode.pixel_format, 83 + mode.modifier[0]), 84 + &mode, obj); 82 85 83 86 /* 84 87 * A reference is now held by the framebuffer object if
+8 -8
drivers/gpu/drm/bridge/adv7511/adv7511.h
··· 399 399 } 400 400 401 401 #ifdef CONFIG_DRM_I2C_ADV7511_CEC 402 - int adv7511_cec_init(struct drm_connector *connector, 403 - struct drm_bridge *bridge); 402 + int adv7511_cec_init(struct drm_bridge *bridge, 403 + struct drm_connector *connector); 404 404 int adv7511_cec_enable(struct drm_bridge *bridge, bool enable); 405 405 int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr); 406 406 int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts, ··· 424 424 int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv); 425 425 426 426 #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO 427 - int adv7511_hdmi_audio_startup(struct drm_connector *connector, 428 - struct drm_bridge *bridge); 429 - void adv7511_hdmi_audio_shutdown(struct drm_connector *connector, 430 - struct drm_bridge *bridge); 431 - int adv7511_hdmi_audio_prepare(struct drm_connector *connector, 432 - struct drm_bridge *bridge, 427 + int adv7511_hdmi_audio_startup(struct drm_bridge *bridge, 428 + struct drm_connector *connector); 429 + void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge, 430 + struct drm_connector *connector); 431 + int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge, 432 + struct drm_connector *connector, 433 433 struct hdmi_codec_daifmt *fmt, 434 434 struct hdmi_codec_params *hparms); 435 435 #else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
+6 -6
drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
··· 55 55 return 0; 56 56 } 57 57 58 - int adv7511_hdmi_audio_prepare(struct drm_connector *connector, 59 - struct drm_bridge *bridge, 58 + int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge, 59 + struct drm_connector *connector, 60 60 struct hdmi_codec_daifmt *fmt, 61 61 struct hdmi_codec_params *hparms) 62 62 { ··· 168 168 return 0; 169 169 } 170 170 171 - int adv7511_hdmi_audio_startup(struct drm_connector *connector, 172 - struct drm_bridge *bridge) 171 + int adv7511_hdmi_audio_startup(struct drm_bridge *bridge, 172 + struct drm_connector *connector) 173 173 { 174 174 struct adv7511 *adv7511 = bridge_to_adv7511(bridge); 175 175 ··· 206 206 return 0; 207 207 } 208 208 209 - void adv7511_hdmi_audio_shutdown(struct drm_connector *connector, 210 - struct drm_bridge *bridge) 209 + void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge, 210 + struct drm_connector *connector) 211 211 { 212 212 struct adv7511 *adv7511 = bridge_to_adv7511(bridge); 213 213
+2 -2
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
··· 346 346 return 0; 347 347 } 348 348 349 - int adv7511_cec_init(struct drm_connector *connector, 350 - struct drm_bridge *bridge) 349 + int adv7511_cec_init(struct drm_bridge *bridge, 350 + struct drm_connector *connector) 351 351 { 352 352 struct adv7511 *adv7511 = bridge_to_adv7511(bridge); 353 353 struct device *dev = &adv7511->i2c_main->dev;
+2 -1
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 864 864 return ret; 865 865 } 866 866 867 - static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge) 867 + static enum drm_connector_status 868 + adv7511_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 868 869 { 869 870 struct adv7511 *adv = bridge_to_adv7511(bridge); 870 871
+1 -1
drivers/gpu/drm/bridge/analogix/anx7625.c
··· 2448 2448 enum drm_connector_status status); 2449 2449 2450 2450 static enum drm_connector_status 2451 - anx7625_bridge_detect(struct drm_bridge *bridge) 2451 + anx7625_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 2452 2452 { 2453 2453 struct anx7625_data *ctx = bridge_to_anx7625(bridge); 2454 2454 struct device *dev = ctx->dev;
+2 -1
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
··· 2143 2143 return 0; 2144 2144 } 2145 2145 2146 - static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge) 2146 + static enum drm_connector_status 2147 + cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 2147 2148 { 2148 2149 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2149 2150
+1 -1
drivers/gpu/drm/bridge/chrontel-ch7033.c
··· 215 215 { 216 216 struct ch7033_priv *priv = conn_to_ch7033_priv(connector); 217 217 218 - return drm_bridge_detect(priv->next_bridge); 218 + return drm_bridge_detect(priv->next_bridge, connector); 219 219 } 220 220 221 221 static const struct drm_connector_funcs ch7033_connector_funcs = {
+8 -3
drivers/gpu/drm/bridge/display-connector.c
··· 40 40 return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; 41 41 } 42 42 43 - static enum drm_connector_status 44 - display_connector_detect(struct drm_bridge *bridge) 43 + static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge) 45 44 { 46 45 struct display_connector *conn = to_display_connector(bridge); 47 46 ··· 79 80 */ 80 81 return connector_status_unknown; 81 82 } 83 + } 84 + 85 + static enum drm_connector_status 86 + display_connector_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 87 + { 88 + return display_connector_detect(bridge); 82 89 } 83 90 84 91 static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge, ··· 177 172 178 173 static const struct drm_bridge_funcs display_connector_bridge_funcs = { 179 174 .attach = display_connector_attach, 180 - .detect = display_connector_detect, 175 + .detect = display_connector_bridge_detect, 181 176 .edid_read = display_connector_edid_read, 182 177 .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, 183 178 .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
+2 -1
drivers/gpu/drm/bridge/ite-it6263.c
··· 693 693 return 0; 694 694 } 695 695 696 - static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge) 696 + static enum drm_connector_status 697 + it6263_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 697 698 { 698 699 struct it6263 *it = bridge_to_it6263(bridge); 699 700
+1 -1
drivers/gpu/drm/bridge/ite-it6505.c
··· 3238 3238 } 3239 3239 3240 3240 static enum drm_connector_status 3241 - it6505_bridge_detect(struct drm_bridge *bridge) 3241 + it6505_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 3242 3242 { 3243 3243 struct it6505 *it6505 = bridge_to_it6505(bridge); 3244 3244
+2 -1
drivers/gpu/drm/bridge/ite-it66121.c
··· 843 843 return MODE_OK; 844 844 } 845 845 846 - static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge) 846 + static enum drm_connector_status 847 + it66121_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 847 848 { 848 849 struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); 849 850
+3 -3
drivers/gpu/drm/bridge/lontium-lt8912b.c
··· 408 408 struct lt8912 *lt = connector_to_lt8912(connector); 409 409 410 410 if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT) 411 - return drm_bridge_detect(lt->hdmi_port); 411 + return drm_bridge_detect(lt->hdmi_port, connector); 412 412 413 413 return lt8912_check_cable_status(lt); 414 414 } ··· 607 607 } 608 608 609 609 static enum drm_connector_status 610 - lt8912_bridge_detect(struct drm_bridge *bridge) 610 + lt8912_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 611 611 { 612 612 struct lt8912 *lt = bridge_to_lt8912(bridge); 613 613 614 614 if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT) 615 - return drm_bridge_detect(lt->hdmi_port); 615 + return drm_bridge_detect(lt->hdmi_port, connector); 616 616 617 617 return lt8912_check_cable_status(lt); 618 618 }
+8 -7
drivers/gpu/drm/bridge/lontium-lt9611.c
··· 543 543 return 0; 544 544 } 545 545 546 - static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge) 546 + static enum drm_connector_status 547 + lt9611_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 547 548 { 548 549 struct lt9611 *lt9611 = bridge_to_lt9611(bridge); 549 550 unsigned int reg_val = 0; ··· 937 936 return MODE_OK; 938 937 } 939 938 940 - static int lt9611_hdmi_audio_startup(struct drm_connector *connector, 941 - struct drm_bridge *bridge) 939 + static int lt9611_hdmi_audio_startup(struct drm_bridge *bridge, 940 + struct drm_connector *connector) 942 941 { 943 942 struct lt9611 *lt9611 = bridge_to_lt9611(bridge); 944 943 ··· 953 952 return 0; 954 953 } 955 954 956 - static int lt9611_hdmi_audio_prepare(struct drm_connector *connector, 957 - struct drm_bridge *bridge, 955 + static int lt9611_hdmi_audio_prepare(struct drm_bridge *bridge, 956 + struct drm_connector *connector, 958 957 struct hdmi_codec_daifmt *fmt, 959 958 struct hdmi_codec_params *hparms) 960 959 { ··· 975 974 &hparms->cea); 976 975 } 977 976 978 - static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector, 979 - struct drm_bridge *bridge) 977 + static void lt9611_hdmi_audio_shutdown(struct drm_bridge *bridge, 978 + struct drm_connector *connector) 980 979 { 981 980 struct lt9611 *lt9611 = bridge_to_lt9611(bridge); 982 981
+2 -1
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
··· 353 353 lt9611uxc_unlock(lt9611uxc); 354 354 } 355 355 356 - static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge) 356 + static enum drm_connector_status 357 + lt9611uxc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 357 358 { 358 359 struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); 359 360 unsigned int reg_val = 0;
+3 -2
drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
··· 120 120 .get_modes = ge_b850v3_lvds_get_modes, 121 121 }; 122 122 123 - static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge) 123 + static enum drm_connector_status 124 + ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 124 125 { 125 126 struct i2c_client *stdp4028_i2c = 126 127 ge_b850v3_lvds_ptr->stdp4028_i2c; ··· 142 141 static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector, 143 142 bool force) 144 143 { 145 - return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge); 144 + return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge, connector); 146 145 } 147 146 148 147 static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
+2 -1
drivers/gpu/drm/bridge/sii902x.c
··· 458 458 return 0; 459 459 } 460 460 461 - static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge) 461 + static enum drm_connector_status 462 + sii902x_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 462 463 { 463 464 struct sii902x *sii902x = bridge_to_sii902x(bridge); 464 465
+1 -1
drivers/gpu/drm/bridge/simple-bridge.c
··· 90 90 { 91 91 struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector); 92 92 93 - return drm_bridge_detect(sbridge->next_bridge); 93 + return drm_bridge_detect(sbridge->next_bridge, connector); 94 94 } 95 95 96 96 static const struct drm_connector_funcs simple_bridge_con_funcs = {
+7 -7
drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
··· 440 440 dw_hdmi_qp_set_cts_n(hdmi, cts, n); 441 441 } 442 442 443 - static int dw_hdmi_qp_audio_enable(struct drm_connector *connector, 444 - struct drm_bridge *bridge) 443 + static int dw_hdmi_qp_audio_enable(struct drm_bridge *bridge, 444 + struct drm_connector *connector) 445 445 { 446 446 struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); 447 447 ··· 451 451 return 0; 452 452 } 453 453 454 - static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector, 455 - struct drm_bridge *bridge, 454 + static int dw_hdmi_qp_audio_prepare(struct drm_bridge *bridge, 455 + struct drm_connector *connector, 456 456 struct hdmi_codec_daifmt *fmt, 457 457 struct hdmi_codec_params *hparms) 458 458 { ··· 497 497 AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE); 498 498 } 499 499 500 - static void dw_hdmi_qp_audio_disable(struct drm_connector *connector, 501 - struct drm_bridge *bridge) 500 + static void dw_hdmi_qp_audio_disable(struct drm_bridge *bridge, 501 + struct drm_connector *connector) 502 502 { 503 503 struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); 504 504 ··· 876 876 } 877 877 878 878 static enum drm_connector_status 879 - dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge) 879 + dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 880 880 { 881 881 struct dw_hdmi_qp *hdmi = bridge->driver_private; 882 882
+2 -1
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 2978 2978 mutex_unlock(&hdmi->mutex); 2979 2979 } 2980 2980 2981 - static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge) 2981 + static enum drm_connector_status 2982 + dw_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 2982 2983 { 2983 2984 struct dw_hdmi *hdmi = bridge->driver_private; 2984 2985
+3 -2
drivers/gpu/drm/bridge/tc358767.c
··· 1760 1760 .get_modes = tc_connector_get_modes, 1761 1761 }; 1762 1762 1763 - static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge) 1763 + static enum drm_connector_status 1764 + tc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 1764 1765 { 1765 1766 struct tc_data *tc = bridge_to_tc(bridge); 1766 1767 bool conn; ··· 1786 1785 struct tc_data *tc = connector_to_tc(connector); 1787 1786 1788 1787 if (tc->hpd_pin >= 0) 1789 - return tc_bridge_detect(&tc->bridge); 1788 + return tc_bridge_detect(&tc->bridge, connector); 1790 1789 1791 1790 if (tc->panel_bridge) 1792 1791 return connector_status_connected;
+2 -1
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 1155 1155 pm_runtime_put_sync(pdata->dev); 1156 1156 } 1157 1157 1158 - static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge) 1158 + static enum drm_connector_status 1159 + ti_sn_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 1159 1160 { 1160 1161 struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); 1161 1162 int val = 0;
+1 -1
drivers/gpu/drm/bridge/ti-tfp410.c
··· 89 89 { 90 90 struct tfp410 *dvi = drm_connector_to_tfp410(connector); 91 91 92 - return drm_bridge_detect(dvi->next_bridge); 92 + return drm_bridge_detect(dvi->next_bridge, connector); 93 93 } 94 94 95 95 static const struct drm_connector_funcs tfp410_con_funcs = {
+7 -1
drivers/gpu/drm/bridge/ti-tpd12s015.c
··· 77 77 return connector_status_disconnected; 78 78 } 79 79 80 + static enum drm_connector_status 81 + tpd12s015_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 82 + { 83 + return tpd12s015_detect(bridge); 84 + } 85 + 80 86 static void tpd12s015_hpd_enable(struct drm_bridge *bridge) 81 87 { 82 88 struct tpd12s015_device *tpd = to_tpd12s015(bridge); ··· 100 94 static const struct drm_bridge_funcs tpd12s015_bridge_funcs = { 101 95 .attach = tpd12s015_attach, 102 96 .detach = tpd12s015_detach, 103 - .detect = tpd12s015_detect, 97 + .detect = tpd12s015_bridge_detect, 104 98 .hpd_enable = tpd12s015_hpd_enable, 105 99 .hpd_disable = tpd12s015_hpd_disable, 106 100 };
+10 -10
drivers/gpu/drm/display/drm_bridge_connector.c
··· 210 210 enum drm_connector_status status; 211 211 212 212 if (detect) { 213 - status = detect->funcs->detect(detect); 213 + status = detect->funcs->detect(detect, connector); 214 214 215 215 if (hdmi) 216 216 drm_atomic_helper_connector_hdmi_hotplug(connector, status); ··· 463 463 if (!bridge->funcs->hdmi_audio_startup) 464 464 return 0; 465 465 466 - return bridge->funcs->hdmi_audio_startup(connector, bridge); 466 + return bridge->funcs->hdmi_audio_startup(bridge, connector); 467 467 } 468 468 469 469 if (bridge_connector->bridge_dp_audio) { ··· 472 472 if (!bridge->funcs->dp_audio_startup) 473 473 return 0; 474 474 475 - return bridge->funcs->dp_audio_startup(connector, bridge); 475 + return bridge->funcs->dp_audio_startup(bridge, connector); 476 476 } 477 477 478 478 return -EINVAL; ··· 489 489 if (bridge_connector->bridge_hdmi_audio) { 490 490 bridge = bridge_connector->bridge_hdmi_audio; 491 491 492 - return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms); 492 + return bridge->funcs->hdmi_audio_prepare(bridge, connector, fmt, hparms); 493 493 } 494 494 495 495 if (bridge_connector->bridge_dp_audio) { 496 496 bridge = bridge_connector->bridge_dp_audio; 497 497 498 - return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms); 498 + return bridge->funcs->dp_audio_prepare(bridge, connector, fmt, hparms); 499 499 } 500 500 501 501 return -EINVAL; ··· 509 509 510 510 if (bridge_connector->bridge_hdmi_audio) { 511 511 bridge = bridge_connector->bridge_hdmi_audio; 512 - bridge->funcs->hdmi_audio_shutdown(connector, bridge); 512 + bridge->funcs->hdmi_audio_shutdown(bridge, connector); 513 513 } 514 514 515 515 if (bridge_connector->bridge_dp_audio) { 516 516 bridge = bridge_connector->bridge_dp_audio; 517 - bridge->funcs->dp_audio_shutdown(connector, bridge); 517 + bridge->funcs->dp_audio_shutdown(bridge, connector); 518 518 } 519 519 } 520 520 ··· 531 531 if (!bridge->funcs->hdmi_audio_mute_stream) 532 532 return -ENOTSUPP; 533 533 534 - return bridge->funcs->hdmi_audio_mute_stream(connector, bridge, 534 + return bridge->funcs->hdmi_audio_mute_stream(bridge, connector, 535 535 enable, direction); 536 536 } 537 537 ··· 541 541 if (!bridge->funcs->dp_audio_mute_stream) 542 542 return -ENOTSUPP; 543 543 544 - return bridge->funcs->dp_audio_mute_stream(connector, bridge, 544 + return bridge->funcs->dp_audio_mute_stream(bridge, connector, 545 545 enable, direction); 546 546 } 547 547 ··· 604 604 if (!bridge->funcs->hdmi_cec_init) 605 605 return 0; 606 606 607 - return bridge->funcs->hdmi_cec_init(connector, bridge); 607 + return bridge->funcs->hdmi_cec_init(bridge, connector); 608 608 } 609 609 610 610 static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = {
+3 -2
drivers/gpu/drm/drm_bridge.c
··· 1237 1237 * The detection status on success, or connector_status_unknown if the bridge 1238 1238 * doesn't support output detection. 1239 1239 */ 1240 - enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge) 1240 + enum drm_connector_status 1241 + drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 1241 1242 { 1242 1243 if (!(bridge->ops & DRM_BRIDGE_OP_DETECT)) 1243 1244 return connector_status_unknown; 1244 1245 1245 - return bridge->funcs->detect(bridge); 1246 + return bridge->funcs->detect(bridge, connector); 1246 1247 } 1247 1248 EXPORT_SYMBOL_GPL(drm_bridge_detect); 1248 1249
+6 -4
drivers/gpu/drm/drm_fourcc.c
··· 417 417 /** 418 418 * drm_get_format_info - query information for a given framebuffer configuration 419 419 * @dev: DRM device 420 - * @mode_cmd: metadata from the userspace fb creation request 420 + * @pixel_format: pixel format (DRM_FORMAT_*) 421 + * @modifier: modifier 421 422 * 422 423 * Returns: 423 424 * The instance of struct drm_format_info that describes the pixel format, or ··· 426 425 */ 427 426 const struct drm_format_info * 428 427 drm_get_format_info(struct drm_device *dev, 429 - const struct drm_mode_fb_cmd2 *mode_cmd) 428 + u32 pixel_format, u64 modifier) 430 429 { 431 430 const struct drm_format_info *info = NULL; 432 431 433 432 if (dev->mode_config.funcs->get_format_info) 434 - info = dev->mode_config.funcs->get_format_info(mode_cmd); 433 + info = dev->mode_config.funcs->get_format_info(pixel_format, 434 + modifier); 435 435 436 436 if (!info) 437 - info = drm_format_info(mode_cmd->pixel_format); 437 + info = drm_format_info(pixel_format); 438 438 439 439 return info; 440 440 }
+14 -13
drivers/gpu/drm/drm_framebuffer.c
··· 153 153 } 154 154 155 155 static int framebuffer_check(struct drm_device *dev, 156 + const struct drm_format_info *info, 156 157 const struct drm_mode_fb_cmd2 *r) 157 158 { 158 - const struct drm_format_info *info; 159 159 int i; 160 - 161 - /* check if the format is supported at all */ 162 - if (!__drm_format_info(r->pixel_format)) { 163 - drm_dbg_kms(dev, "bad framebuffer format %p4cc\n", 164 - &r->pixel_format); 165 - return -EINVAL; 166 - } 167 160 168 161 if (r->width == 0) { 169 162 drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width); ··· 167 174 drm_dbg_kms(dev, "bad framebuffer height %u\n", r->height); 168 175 return -EINVAL; 169 176 } 170 - 171 - /* now let the driver pick its own format info */ 172 - info = drm_get_format_info(dev, r); 173 177 174 178 for (i = 0; i < info->num_planes; i++) { 175 179 unsigned int width = drm_format_info_plane_width(info, r->width, i); ··· 262 272 struct drm_file *file_priv) 263 273 { 264 274 struct drm_mode_config *config = &dev->mode_config; 275 + const struct drm_format_info *info; 265 276 struct drm_framebuffer *fb; 266 277 int ret; 267 278 ··· 288 297 return ERR_PTR(-EINVAL); 289 298 } 290 299 291 - ret = framebuffer_check(dev, r); 300 + /* check if the format is supported at all */ 301 + if (!__drm_format_info(r->pixel_format)) { 302 + drm_dbg_kms(dev, "bad framebuffer format %p4cc\n", 303 + &r->pixel_format); 304 + return ERR_PTR(-EINVAL); 305 + } 306 + 307 + /* now let the driver pick its own format info */ 308 + info = drm_get_format_info(dev, r->pixel_format, r->modifier[0]); 309 + 310 + ret = framebuffer_check(dev, info, r); 292 311 if (ret) 293 312 return ERR_PTR(ret); 294 313 295 - fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 314 + fb = dev->mode_config.funcs->fb_create(dev, file_priv, info, r); 296 315 if (IS_ERR(fb)) { 297 316 drm_dbg_kms(dev, "could not create framebuffer\n"); 298 317 return fb;
+20 -22
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 68 68 static int 69 69 drm_gem_fb_init(struct drm_device *dev, 70 70 struct drm_framebuffer *fb, 71 + const struct drm_format_info *info, 71 72 const struct drm_mode_fb_cmd2 *mode_cmd, 72 73 struct drm_gem_object **obj, unsigned int num_planes, 73 74 const struct drm_framebuffer_funcs *funcs) ··· 76 75 unsigned int i; 77 76 int ret; 78 77 79 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 78 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 80 79 81 80 for (i = 0; i < num_planes; i++) 82 81 fb->obj[i] = obj[i]; ··· 137 136 * @dev: DRM device 138 137 * @fb: framebuffer object 139 138 * @file: DRM file that holds the GEM handle(s) backing the framebuffer 139 + * @info: pixel format information 140 140 * @mode_cmd: Metadata from the userspace framebuffer creation request 141 141 * @funcs: vtable to be used for the new framebuffer object 142 142 * ··· 154 152 int drm_gem_fb_init_with_funcs(struct drm_device *dev, 155 153 struct drm_framebuffer *fb, 156 154 struct drm_file *file, 155 + const struct drm_format_info *info, 157 156 const struct drm_mode_fb_cmd2 *mode_cmd, 158 157 const struct drm_framebuffer_funcs *funcs) 159 158 { 160 - const struct drm_format_info *info; 161 159 struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES]; 162 160 unsigned int i; 163 161 int ret; 164 - 165 - info = drm_get_format_info(dev, mode_cmd); 166 - if (!info) { 167 - drm_dbg_kms(dev, "Failed to get FB format info\n"); 168 - return -EINVAL; 169 - } 170 162 171 163 if (drm_drv_uses_atomic_modeset(dev) && 172 164 !drm_any_plane_has_format(dev, mode_cmd->pixel_format, ··· 196 200 } 197 201 } 198 202 199 - ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); 203 + ret = drm_gem_fb_init(dev, fb, info, mode_cmd, objs, i, funcs); 200 204 if (ret) 201 205 goto err_gem_object_put; 202 206 ··· 217 221 * callback 218 222 * @dev: DRM device 219 223 * @file: DRM file that holds the GEM handle(s) backing the framebuffer 224 + * @info: pixel format information 220 225 * @mode_cmd: Metadata from the userspace framebuffer creation request 221 226 * @funcs: vtable to be used for the new framebuffer object 222 227 * ··· 230 233 */ 231 234 struct drm_framebuffer * 232 235 drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, 236 + const struct drm_format_info *info, 233 237 const struct drm_mode_fb_cmd2 *mode_cmd, 234 238 const struct drm_framebuffer_funcs *funcs) 235 239 { ··· 241 243 if (!fb) 242 244 return ERR_PTR(-ENOMEM); 243 245 244 - ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs); 246 + ret = drm_gem_fb_init_with_funcs(dev, fb, file, info, mode_cmd, funcs); 245 247 if (ret) { 246 248 kfree(fb); 247 249 return ERR_PTR(ret); ··· 261 263 * &drm_mode_config_funcs.fb_create callback 262 264 * @dev: DRM device 263 265 * @file: DRM file that holds the GEM handle(s) backing the framebuffer 266 + * @info: pixel format information 264 267 * @mode_cmd: Metadata from the userspace framebuffer creation request 265 268 * 266 269 * This function creates a new framebuffer object described by ··· 281 282 */ 282 283 struct drm_framebuffer * 283 284 drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, 285 + const struct drm_format_info *info, 284 286 const struct drm_mode_fb_cmd2 *mode_cmd) 285 287 { 286 - return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, 288 + return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd, 287 289 &drm_gem_fb_funcs); 288 290 } 289 291 EXPORT_SYMBOL_GPL(drm_gem_fb_create); ··· 300 300 * &drm_mode_config_funcs.fb_create callback 301 301 * @dev: DRM device 302 302 * @file: DRM file that holds the GEM handle(s) backing the framebuffer 303 + * @info: pixel format information 303 304 * @mode_cmd: Metadata from the userspace framebuffer creation request 304 305 * 305 306 * This function creates a new framebuffer object described by ··· 321 320 */ 322 321 struct drm_framebuffer * 323 322 drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, 323 + const struct drm_format_info *info, 324 324 const struct drm_mode_fb_cmd2 *mode_cmd) 325 325 { 326 - return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, 326 + return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd, 327 327 &drm_gem_fb_funcs_dirtyfb); 328 328 } 329 329 EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); ··· 500 498 // TODO Drop this function and replace by drm_format_info_bpp() once all 501 499 // DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c 502 500 static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev, 501 + const struct drm_format_info *info, 503 502 const struct drm_mode_fb_cmd2 *mode_cmd) 504 503 { 505 - const struct drm_format_info *info; 506 - 507 - info = drm_get_format_info(dev, mode_cmd); 508 - 509 504 switch (info->format) { 510 505 case DRM_FORMAT_YUV420_8BIT: 511 506 return 12; ··· 516 517 } 517 518 518 519 static int drm_gem_afbc_min_size(struct drm_device *dev, 520 + const struct drm_format_info *info, 519 521 const struct drm_mode_fb_cmd2 *mode_cmd, 520 522 struct drm_afbc_framebuffer *afbc_fb) 521 523 { ··· 557 557 afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment); 558 558 afbc_fb->offset = mode_cmd->offsets[0]; 559 559 560 - bpp = drm_gem_afbc_get_bpp(dev, mode_cmd); 560 + bpp = drm_gem_afbc_get_bpp(dev, info, mode_cmd); 561 561 if (!bpp) { 562 562 drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp); 563 563 return -EINVAL; ··· 579 579 * 580 580 * @dev: DRM device 581 581 * @afbc_fb: afbc-specific framebuffer 582 + * @info: pixel format information 582 583 * @mode_cmd: Metadata from the userspace framebuffer creation request 583 584 * @afbc_fb: afbc framebuffer 584 585 * ··· 593 592 * Zero on success or a negative error value on failure. 594 593 */ 595 594 int drm_gem_fb_afbc_init(struct drm_device *dev, 595 + const struct drm_format_info *info, 596 596 const struct drm_mode_fb_cmd2 *mode_cmd, 597 597 struct drm_afbc_framebuffer *afbc_fb) 598 598 { 599 - const struct drm_format_info *info; 600 599 struct drm_gem_object **objs; 601 600 int ret; 602 601 603 602 objs = afbc_fb->base.obj; 604 - info = drm_get_format_info(dev, mode_cmd); 605 - if (!info) 606 - return -EINVAL; 607 603 608 - ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb); 604 + ret = drm_gem_afbc_min_size(dev, info, mode_cmd, afbc_fb); 609 605 if (ret < 0) 610 606 return ret; 611 607
+3 -1
drivers/gpu/drm/drm_modeset_helper.c
··· 74 74 * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata 75 75 * @dev: DRM device 76 76 * @fb: drm_framebuffer object to fill out 77 + * @info: pixel format information 77 78 * @mode_cmd: metadata from the userspace fb creation request 78 79 * 79 80 * This helper can be used in a drivers fb_create callback to pre-fill the fb's ··· 82 81 */ 83 82 void drm_helper_mode_fill_fb_struct(struct drm_device *dev, 84 83 struct drm_framebuffer *fb, 84 + const struct drm_format_info *info, 85 85 const struct drm_mode_fb_cmd2 *mode_cmd) 86 86 { 87 87 int i; 88 88 89 89 fb->dev = dev; 90 - fb->format = drm_get_format_info(dev, mode_cmd); 90 + fb->format = info; 91 91 fb->width = mode_cmd->width; 92 92 fb->height = mode_cmd->height; 93 93 for (i = 0; i < 4; i++) {
+5 -9
drivers/gpu/drm/etnaviv/etnaviv_sched.c
··· 40 40 int change; 41 41 42 42 /* 43 - * If the GPU managed to complete this jobs fence, the timout is 44 - * spurious. Bail out. 43 + * If the GPU managed to complete this jobs fence, the timeout has 44 + * fired before free-job worker. The timeout is spurious, so bail out. 45 45 */ 46 46 if (dma_fence_is_signaled(submit->out_fence)) 47 - goto out_no_timeout; 47 + return DRM_GPU_SCHED_STAT_NO_HANG; 48 48 49 49 /* 50 50 * If the GPU is still making forward progress on the front-end (which ··· 70 70 gpu->hangcheck_dma_addr = dma_addr; 71 71 gpu->hangcheck_primid = primid; 72 72 gpu->hangcheck_fence = gpu->completed_fence; 73 - goto out_no_timeout; 73 + return DRM_GPU_SCHED_STAT_NO_HANG; 74 74 } 75 75 76 76 /* block scheduler */ ··· 86 86 drm_sched_resubmit_jobs(&gpu->sched); 87 87 88 88 drm_sched_start(&gpu->sched, 0); 89 - return DRM_GPU_SCHED_STAT_NOMINAL; 90 - 91 - out_no_timeout: 92 - list_add(&sched_job->list, &sched_job->sched->pending_list); 93 - return DRM_GPU_SCHED_STAT_NOMINAL; 89 + return DRM_GPU_SCHED_STAT_RESET; 94 90 } 95 91 96 92 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
+4 -3
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 56 56 57 57 struct drm_framebuffer * 58 58 exynos_drm_framebuffer_init(struct drm_device *dev, 59 + const struct drm_format_info *info, 59 60 const struct drm_mode_fb_cmd2 *mode_cmd, 60 61 struct exynos_drm_gem **exynos_gem, 61 62 int count) ··· 77 76 fb->obj[i] = &exynos_gem[i]->base; 78 77 } 79 78 80 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 79 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 81 80 82 81 ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); 83 82 if (ret < 0) { ··· 95 94 96 95 static struct drm_framebuffer * 97 96 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 97 + const struct drm_format_info *info, 98 98 const struct drm_mode_fb_cmd2 *mode_cmd) 99 99 { 100 - const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); 101 100 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 102 101 struct drm_framebuffer *fb; 103 102 int i; ··· 125 124 } 126 125 } 127 126 128 - fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); 127 + fb = exynos_drm_framebuffer_init(dev, info, mode_cmd, exynos_gem, i); 129 128 if (IS_ERR(fb)) { 130 129 ret = PTR_ERR(fb); 131 130 goto err;
+1
drivers/gpu/drm/exynos/exynos_drm_fb.h
··· 14 14 15 15 struct drm_framebuffer * 16 16 exynos_drm_framebuffer_init(struct drm_device *dev, 17 + const struct drm_format_info *info, 17 18 const struct drm_mode_fb_cmd2 *mode_cmd, 18 19 struct exynos_drm_gem **exynos_gem, 19 20 int count);
+4 -1
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 116 116 return PTR_ERR(exynos_gem); 117 117 118 118 helper->fb = 119 - exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1); 119 + exynos_drm_framebuffer_init(dev, 120 + drm_get_format_info(dev, mode_cmd.pixel_format, 121 + mode_cmd.modifier[0]), 122 + &mode_cmd, &exynos_gem, 1); 120 123 if (IS_ERR(helper->fb)) { 121 124 DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n"); 122 125 ret = PTR_ERR(helper->fb);
+4 -1
drivers/gpu/drm/gma500/fbdev.c
··· 203 203 return PTR_ERR(backing); 204 204 obj = &backing->base; 205 205 206 - fb = psb_framebuffer_create(dev, &mode_cmd, obj); 206 + fb = psb_framebuffer_create(dev, 207 + drm_get_format_info(dev, mode_cmd.pixel_format, 208 + mode_cmd.modifier[0]), 209 + &mode_cmd, obj); 207 210 if (IS_ERR(fb)) { 208 211 ret = PTR_ERR(fb); 209 212 goto err_drm_gem_object_put;
+8 -6
drivers/gpu/drm/gma500/framebuffer.c
··· 29 29 */ 30 30 static int psb_framebuffer_init(struct drm_device *dev, 31 31 struct drm_framebuffer *fb, 32 + const struct drm_format_info *info, 32 33 const struct drm_mode_fb_cmd2 *mode_cmd, 33 34 struct drm_gem_object *obj) 34 35 { 35 - const struct drm_format_info *info; 36 36 int ret; 37 37 38 38 /* 39 39 * Reject unknown formats, YUV formats, and formats with more than 40 40 * 4 bytes per pixel. 41 41 */ 42 - info = drm_get_format_info(dev, mode_cmd); 43 - if (!info || !info->depth || info->cpp[0] > 4) 42 + if (!info->depth || info->cpp[0] > 4) 44 43 return -EINVAL; 45 44 46 45 if (mode_cmd->pitches[0] & 63) 47 46 return -EINVAL; 48 47 49 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 48 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 50 49 fb->obj[0] = obj; 51 50 ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs); 52 51 if (ret) { ··· 58 59 /** 59 60 * psb_framebuffer_create - create a framebuffer backed by gt 60 61 * @dev: our DRM device 62 + * @info: pixel format information 61 63 * @mode_cmd: the description of the requested mode 62 64 * @obj: the backing object 63 65 * ··· 68 68 * TODO: review object references 69 69 */ 70 70 struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, 71 + const struct drm_format_info *info, 71 72 const struct drm_mode_fb_cmd2 *mode_cmd, 72 73 struct drm_gem_object *obj) 73 74 { ··· 79 78 if (!fb) 80 79 return ERR_PTR(-ENOMEM); 81 80 82 - ret = psb_framebuffer_init(dev, fb, mode_cmd, obj); 81 + ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj); 83 82 if (ret) { 84 83 kfree(fb); 85 84 return ERR_PTR(ret); ··· 97 96 */ 98 97 static struct drm_framebuffer *psb_user_framebuffer_create 99 98 (struct drm_device *dev, struct drm_file *filp, 99 + const struct drm_format_info *info, 100 100 const struct drm_mode_fb_cmd2 *cmd) 101 101 { 102 102 struct drm_gem_object *obj; ··· 112 110 return ERR_PTR(-ENOENT); 113 111 114 112 /* Let the core code do all the work */ 115 - fb = psb_framebuffer_create(dev, cmd, obj); 113 + fb = psb_framebuffer_create(dev, info, cmd, obj); 116 114 if (IS_ERR(fb)) 117 115 drm_gem_object_put(obj); 118 116
+1
drivers/gpu/drm/gma500/psb_drv.h
··· 594 594 595 595 /* framebuffer */ 596 596 struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, 597 + const struct drm_format_info *info, 597 598 const struct drm_mode_fb_cmd2 *mode_cmd, 598 599 struct drm_gem_object *obj); 599 600
+12 -8
drivers/gpu/drm/i915/display/intel_fb.c
··· 422 422 423 423 /** 424 424 * intel_fb_get_format_info: Get a modifier specific format information 425 - * @cmd: FB add command structure 425 + * @pixel_format: pixel format 426 + * @modifier: modifier 426 427 * 427 428 * Returns: 428 - * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], 429 + * Returns the format information for @pixel_format specific to @modifier, 429 430 * or %NULL if the modifier doesn't override the format. 430 431 */ 431 432 const struct drm_format_info * 432 - intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 433 + intel_fb_get_format_info(u32 pixel_format, u64 modifier) 433 434 { 434 - const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); 435 + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); 435 436 436 437 if (!md || !md->formats) 437 438 return NULL; 438 439 439 - return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); 440 + return lookup_format_info(md->formats, md->format_count, pixel_format); 440 441 } 441 442 442 443 static bool plane_caps_contain_any(u8 caps, u8 mask) ··· 2207 2206 2208 2207 int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 2209 2208 struct drm_gem_object *obj, 2209 + const struct drm_format_info *info, 2210 2210 struct drm_mode_fb_cmd2 *mode_cmd) 2211 2211 { 2212 2212 struct intel_display *display = to_intel_display(obj->dev); ··· 2255 2253 goto err_frontbuffer_put; 2256 2254 } 2257 2255 2258 - drm_helper_mode_fill_fb_struct(display->drm, fb, mode_cmd); 2256 + drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd); 2259 2257 2260 2258 for (i = 0; i < fb->format->num_planes; i++) { 2261 2259 unsigned int stride_alignment; ··· 2325 2323 struct drm_framebuffer * 2326 2324 intel_user_framebuffer_create(struct drm_device *dev, 2327 2325 struct drm_file *filp, 2326 + const struct drm_format_info *info, 2328 2327 const struct drm_mode_fb_cmd2 *user_mode_cmd) 2329 2328 { 2330 2329 struct drm_framebuffer *fb; ··· 2336 2333 if (IS_ERR(obj)) 2337 2334 return ERR_CAST(obj); 2338 2335 2339 - fb = intel_framebuffer_create(obj, &mode_cmd); 2336 + fb = intel_framebuffer_create(obj, info, &mode_cmd); 2340 2337 drm_gem_object_put(obj); 2341 2338 2342 2339 return fb; ··· 2344 2341 2345 2342 struct drm_framebuffer * 2346 2343 intel_framebuffer_create(struct drm_gem_object *obj, 2344 + const struct drm_format_info *info, 2347 2345 struct drm_mode_fb_cmd2 *mode_cmd) 2348 2346 { 2349 2347 struct intel_framebuffer *intel_fb; ··· 2354 2350 if (!intel_fb) 2355 2351 return ERR_PTR(-ENOMEM); 2356 2352 2357 - ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 2353 + ret = intel_framebuffer_init(intel_fb, obj, info, mode_cmd); 2358 2354 if (ret) 2359 2355 goto err; 2360 2356
+4 -1
drivers/gpu/drm/i915/display/intel_fb.h
··· 47 47 bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier); 48 48 49 49 const struct drm_format_info * 50 - intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 50 + intel_fb_get_format_info(u32 pixel_format, u64 modifier); 51 51 52 52 bool 53 53 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, ··· 102 102 103 103 int intel_framebuffer_init(struct intel_framebuffer *ifb, 104 104 struct drm_gem_object *obj, 105 + const struct drm_format_info *info, 105 106 struct drm_mode_fb_cmd2 *mode_cmd); 106 107 struct drm_framebuffer * 107 108 intel_framebuffer_create(struct drm_gem_object *obj, 109 + const struct drm_format_info *info, 108 110 struct drm_mode_fb_cmd2 *mode_cmd); 109 111 struct drm_framebuffer * 110 112 intel_user_framebuffer_create(struct drm_device *dev, 111 113 struct drm_file *filp, 114 + const struct drm_format_info *info, 112 115 const struct drm_mode_fb_cmd2 *user_mode_cmd); 113 116 114 117 bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier);
+5 -1
drivers/gpu/drm/i915/display/intel_fbdev_fb.c
··· 62 62 return ERR_PTR(-ENOMEM); 63 63 } 64 64 65 - fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd); 65 + fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), 66 + drm_get_format_info(display->drm, 67 + mode_cmd.pixel_format, 68 + mode_cmd.modifier[0]), 69 + &mode_cmd); 66 70 i915_gem_object_put(obj); 67 71 68 72 return to_intel_framebuffer(fb);
+2 -1
drivers/gpu/drm/i915/display/intel_plane_initial.c
··· 289 289 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 290 290 291 291 if (intel_framebuffer_init(to_intel_framebuffer(fb), 292 - intel_bo_to_drm_bo(vma->obj), &mode_cmd)) { 292 + intel_bo_to_drm_bo(vma->obj), 293 + fb->format, &mode_cmd)) { 293 294 drm_dbg_kms(display->drm, "intel fb init failed\n"); 294 295 goto err_vma; 295 296 }
+2 -2
drivers/gpu/drm/imagination/pvr_queue.c
··· 803 803 * the scheduler, and re-assign parent fences in the middle. 804 804 * 805 805 * Return: 806 - * * DRM_GPU_SCHED_STAT_NOMINAL. 806 + * * DRM_GPU_SCHED_STAT_RESET. 807 807 */ 808 808 static enum drm_gpu_sched_stat 809 809 pvr_queue_timedout_job(struct drm_sched_job *s_job) ··· 854 854 855 855 drm_sched_start(sched, 0); 856 856 857 - return DRM_GPU_SCHED_STAT_NOMINAL; 857 + return DRM_GPU_SCHED_STAT_RESET; 858 858 } 859 859 860 860 /**
+3 -2
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
··· 901 901 902 902 static struct drm_framebuffer * 903 903 ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file, 904 + const struct drm_format_info *info, 904 905 const struct drm_mode_fb_cmd2 *mode_cmd) 905 906 { 906 907 struct ingenic_drm *priv = drm_device_get_priv(drm); 907 908 908 909 if (priv->soc_info->map_noncoherent) 909 - return drm_gem_fb_create_with_dirty(drm, file, mode_cmd); 910 + return drm_gem_fb_create_with_dirty(drm, file, info, mode_cmd); 910 911 911 - return drm_gem_fb_create(drm, file, mode_cmd); 912 + return drm_gem_fb_create(drm, file, info, mode_cmd); 912 913 } 913 914 914 915 static struct drm_gem_object *
+3 -3
drivers/gpu/drm/lima/lima_sched.c
··· 412 412 */ 413 413 if (dma_fence_is_signaled(task->fence)) { 414 414 DRM_WARN("%s spurious timeout\n", lima_ip_name(ip)); 415 - return DRM_GPU_SCHED_STAT_NOMINAL; 415 + return DRM_GPU_SCHED_STAT_RESET; 416 416 } 417 417 418 418 /* ··· 429 429 430 430 if (dma_fence_is_signaled(task->fence)) { 431 431 DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip)); 432 - return DRM_GPU_SCHED_STAT_NOMINAL; 432 + return DRM_GPU_SCHED_STAT_RESET; 433 433 } 434 434 435 435 /* ··· 467 467 drm_sched_resubmit_jobs(&pipe->base); 468 468 drm_sched_start(&pipe->base, 0); 469 469 470 - return DRM_GPU_SCHED_STAT_NOMINAL; 470 + return DRM_GPU_SCHED_STAT_RESET; 471 471 } 472 472 473 473 static void lima_sched_free_job(struct drm_sched_job *job)
+2 -1
drivers/gpu/drm/mediatek/mtk_dp.c
··· 2118 2118 mutex_unlock(&mtk_dp->update_plugged_status_lock); 2119 2119 } 2120 2120 2121 - static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge) 2121 + static enum drm_connector_status 2122 + mtk_dp_bdg_detect(struct drm_bridge *bridge, struct drm_connector *connector) 2122 2123 { 2123 2124 struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); 2124 2125 enum drm_connector_status ret = connector_status_disconnected;
+2 -3
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 43 43 static struct drm_framebuffer * 44 44 mtk_drm_mode_fb_create(struct drm_device *dev, 45 45 struct drm_file *file, 46 + const struct drm_format_info *info, 46 47 const struct drm_mode_fb_cmd2 *cmd) 47 48 { 48 - const struct drm_format_info *info = drm_get_format_info(dev, cmd); 49 - 50 49 if (info->num_planes != 1) 51 50 return ERR_PTR(-EINVAL); 52 51 53 - return drm_gem_fb_create(dev, file, cmd); 52 + return drm_gem_fb_create(dev, file, info, cmd); 54 53 } 55 54 56 55 static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
+2 -1
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 1174 1174 * Bridge callbacks 1175 1175 */ 1176 1176 1177 - static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge) 1177 + static enum drm_connector_status 1178 + mtk_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 1178 1179 { 1179 1180 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); 1180 1181
+4 -4
drivers/gpu/drm/msm/dp/dp_audio.c
··· 265 265 return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); 266 266 } 267 267 268 - int msm_dp_audio_prepare(struct drm_connector *connector, 269 - struct drm_bridge *bridge, 268 + int msm_dp_audio_prepare(struct drm_bridge *bridge, 269 + struct drm_connector *connector, 270 270 struct hdmi_codec_daifmt *daifmt, 271 271 struct hdmi_codec_params *params) 272 272 { ··· 308 308 return rc; 309 309 } 310 310 311 - void msm_dp_audio_shutdown(struct drm_connector *connector, 312 - struct drm_bridge *bridge) 311 + void msm_dp_audio_shutdown(struct drm_bridge *bridge, 312 + struct drm_connector *connecter) 313 313 { 314 314 struct msm_dp_audio_private *audio; 315 315 struct msm_dp *msm_dp_display;
+4 -4
drivers/gpu/drm/msm/dp/dp_audio.h
··· 45 45 */ 46 46 void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio); 47 47 48 - int msm_dp_audio_prepare(struct drm_connector *connector, 49 - struct drm_bridge *bridge, 48 + int msm_dp_audio_prepare(struct drm_bridge *bridge, 49 + struct drm_connector *connector, 50 50 struct hdmi_codec_daifmt *daifmt, 51 51 struct hdmi_codec_params *params); 52 - void msm_dp_audio_shutdown(struct drm_connector *connector, 53 - struct drm_bridge *bridge); 52 + void msm_dp_audio_shutdown(struct drm_bridge *bridge, 53 + struct drm_connector *connector); 54 54 55 55 #endif /* _DP_AUDIO_H_ */ 56 56
+2 -1
drivers/gpu/drm/msm/dp/dp_drm.c
··· 20 20 * @bridge: Pointer to drm bridge structure 21 21 * Returns: Bridge's 'is connected' status 22 22 */ 23 - static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge) 23 + static enum drm_connector_status 24 + msm_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 24 25 { 25 26 struct msm_dp *dp; 26 27
+5 -5
drivers/gpu/drm/msm/hdmi/hdmi.h
··· 200 200 struct hdmi_codec_params; 201 201 202 202 int msm_hdmi_audio_update(struct hdmi *hdmi); 203 - int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, 204 - struct drm_bridge *bridge, 203 + int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge, 204 + struct drm_connector *connector, 205 205 struct hdmi_codec_daifmt *daifmt, 206 206 struct hdmi_codec_params *params); 207 - void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector, 208 - struct drm_bridge *bridge); 207 + void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge, 208 + struct drm_connector *connector); 209 209 210 210 /* 211 211 * hdmi bridge: ··· 215 215 216 216 void msm_hdmi_hpd_irq(struct drm_bridge *bridge); 217 217 enum drm_connector_status msm_hdmi_bridge_detect( 218 - struct drm_bridge *bridge); 218 + struct drm_bridge *bridge, struct drm_connector *connector); 219 219 void msm_hdmi_hpd_enable(struct drm_bridge *bridge); 220 220 void msm_hdmi_hpd_disable(struct drm_bridge *bridge); 221 221
+4 -4
drivers/gpu/drm/msm/hdmi/hdmi_audio.c
··· 122 122 return 0; 123 123 } 124 124 125 - int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, 126 - struct drm_bridge *bridge, 125 + int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge, 126 + struct drm_connector *connector, 127 127 struct hdmi_codec_daifmt *daifmt, 128 128 struct hdmi_codec_params *params) 129 129 { ··· 163 163 return msm_hdmi_audio_update(hdmi); 164 164 } 165 165 166 - void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector, 167 - struct drm_bridge *bridge) 166 + void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge, 167 + struct drm_connector *connector) 168 168 { 169 169 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 170 170 struct hdmi *hdmi = hdmi_bridge->hdmi;
+1 -1
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
··· 475 475 container_of(work, struct hdmi_bridge, hpd_work); 476 476 struct drm_bridge *bridge = &hdmi_bridge->base; 477 477 478 - drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge)); 478 + drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge, hdmi_bridge->hdmi->connector)); 479 479 } 480 480 481 481 /* initialize bridge */
+2 -2
drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
··· 177 177 connector_status_disconnected; 178 178 } 179 179 180 - enum drm_connector_status msm_hdmi_bridge_detect( 181 - struct drm_bridge *bridge) 180 + enum drm_connector_status 181 + msm_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 182 182 { 183 183 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 184 184 struct hdmi *hdmi = hdmi_bridge->hdmi;
+2 -1
drivers/gpu/drm/msm/msm_drv.h
··· 260 260 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 261 261 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 262 262 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, 263 - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 263 + struct drm_file *file, const struct drm_format_info *info, 264 + const struct drm_mode_fb_cmd2 *mode_cmd); 264 265 struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, 265 266 int w, int h, int p, uint32_t format); 266 267
+10 -8
drivers/gpu/drm/msm/msm_fb.c
··· 30 30 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) 31 31 32 32 static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 33 + const struct drm_format_info *info, 33 34 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 34 35 35 36 static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb, ··· 140 139 } 141 140 142 141 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, 143 - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) 142 + struct drm_file *file, const struct drm_format_info *info, 143 + const struct drm_mode_fb_cmd2 *mode_cmd) 144 144 { 145 - const struct drm_format_info *info = drm_get_format_info(dev, 146 - mode_cmd); 147 145 struct drm_gem_object *bos[4] = {0}; 148 146 struct drm_framebuffer *fb; 149 147 int ret, i, n = info->num_planes; ··· 155 155 } 156 156 } 157 157 158 - fb = msm_framebuffer_init(dev, mode_cmd, bos); 158 + fb = msm_framebuffer_init(dev, info, mode_cmd, bos); 159 159 if (IS_ERR(fb)) { 160 160 ret = PTR_ERR(fb); 161 161 goto out_unref; ··· 170 170 } 171 171 172 172 static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 173 + const struct drm_format_info *info, 173 174 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) 174 175 { 175 - const struct drm_format_info *info = drm_get_format_info(dev, 176 - mode_cmd); 177 176 struct msm_drm_private *priv = dev->dev_private; 178 177 struct msm_kms *kms = priv->kms; 179 178 struct msm_framebuffer *msm_fb = NULL; ··· 226 227 msm_fb->base.obj[i] = bos[i]; 227 228 } 228 229 229 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 230 + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); 230 231 231 232 ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); 232 233 if (ret) { ··· 275 276 276 277 msm_gem_object_set_name(bo, "stolenfb"); 277 278 278 - fb = msm_framebuffer_init(dev, &mode_cmd, &bo); 279 + fb = msm_framebuffer_init(dev, 280 + drm_get_format_info(dev, mode_cmd.pixel_format, 281 + mode_cmd.modifier[0]), 282 + &mode_cmd, &bo); 279 283 if (IS_ERR(fb)) { 280 284 DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n"); 281 285 /* note: if fb creation failed, we can't rely on fb destroy
+2 -7
drivers/gpu/drm/mxsfb/mxsfb_drv.c
··· 91 91 92 92 static struct drm_framebuffer * 93 93 mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, 94 + const struct drm_format_info *info, 94 95 const struct drm_mode_fb_cmd2 *mode_cmd) 95 96 { 96 - const struct drm_format_info *info; 97 - 98 - info = drm_get_format_info(dev, mode_cmd); 99 - if (!info) 100 - return ERR_PTR(-EINVAL); 101 - 102 97 if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { 103 98 dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); 104 99 return ERR_PTR(-EINVAL); 105 100 } 106 101 107 - return drm_gem_fb_create(dev, file_priv, mode_cmd); 102 + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); 108 103 } 109 104 110 105 static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
+4 -2
drivers/gpu/drm/nouveau/nouveau_display.c
··· 295 295 kind = nvbo->kind; 296 296 } 297 297 298 - info = drm_get_format_info(dev, mode_cmd); 298 + info = drm_get_format_info(dev, mode_cmd->pixel_format, 299 + mode_cmd->modifier[0]); 299 300 300 301 for (i = 0; i < info->num_planes; i++) { 301 302 height = drm_format_info_plane_height(info, ··· 321 320 if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) 322 321 return -ENOMEM; 323 322 324 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 323 + drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); 325 324 fb->obj[0] = gem; 326 325 327 326 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); ··· 333 332 struct drm_framebuffer * 334 333 nouveau_user_framebuffer_create(struct drm_device *dev, 335 334 struct drm_file *file_priv, 335 + const struct drm_format_info *info, 336 336 const struct drm_mode_fb_cmd2 *mode_cmd) 337 337 { 338 338 struct drm_framebuffer *fb;
+1
drivers/gpu/drm/nouveau/nouveau_display.h
··· 67 67 68 68 struct drm_framebuffer * 69 69 nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *, 70 + const struct drm_format_info *, 70 71 const struct drm_mode_fb_cmd2 *); 71 72 #endif
+1 -1
drivers/gpu/drm/nouveau/nouveau_exec.c
··· 189 189 NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", 190 190 chan->chid); 191 191 192 - return DRM_GPU_SCHED_STAT_NOMINAL; 192 + return DRM_GPU_SCHED_STAT_RESET; 193 193 } 194 194 195 195 static const struct nouveau_job_ops nouveau_exec_job_ops = {
+22 -13
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 38 38 static const struct dma_fence_ops nouveau_fence_ops_uevent; 39 39 static const struct dma_fence_ops nouveau_fence_ops_legacy; 40 40 41 - static inline struct nouveau_fence * 42 - from_fence(struct dma_fence *fence) 43 - { 44 - return container_of(fence, struct nouveau_fence, base); 45 - } 46 - 47 41 static inline struct nouveau_fence_chan * 48 42 nouveau_fctx(struct nouveau_fence *fence) 49 43 { ··· 71 77 fence->ops != &nouveau_fence_ops_uevent) 72 78 return NULL; 73 79 74 - return from_fence(fence); 80 + return to_nouveau_fence(fence); 75 81 } 76 82 77 83 void ··· 240 246 return ret; 241 247 } 242 248 249 + void 250 + nouveau_fence_cancel(struct nouveau_fence *fence) 251 + { 252 + struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 253 + unsigned long flags; 254 + 255 + spin_lock_irqsave(&fctx->lock, flags); 256 + if (!dma_fence_is_signaled_locked(&fence->base)) { 257 + dma_fence_set_error(&fence->base, -ECANCELED); 258 + if (nouveau_fence_signal(fence)) 259 + nvif_event_block(&fctx->event); 260 + } 261 + spin_unlock_irqrestore(&fctx->lock, flags); 262 + } 263 + 243 264 bool 244 265 nouveau_fence_done(struct nouveau_fence *fence) 245 266 { ··· 277 268 static long 278 269 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) 279 270 { 280 - struct nouveau_fence *fence = from_fence(f); 271 + struct nouveau_fence *fence = to_nouveau_fence(f); 281 272 unsigned long sleep_time = NSEC_PER_MSEC / 1000; 282 273 unsigned long t = jiffies, timeout = t + wait; 283 274 ··· 457 448 458 449 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) 459 450 { 460 - struct nouveau_fence *fence = from_fence(f); 451 + struct nouveau_fence *fence = to_nouveau_fence(f); 461 452 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 462 453 463 454 return !fctx->dead ? fctx->name : "dead channel"; ··· 471 462 */ 472 463 static bool nouveau_fence_is_signaled(struct dma_fence *f) 473 464 { 474 - struct nouveau_fence *fence = from_fence(f); 465 + struct nouveau_fence *fence = to_nouveau_fence(f); 475 466 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 476 467 struct nouveau_channel *chan; 477 468 bool ret = false; ··· 487 478 488 479 static bool nouveau_fence_no_signaling(struct dma_fence *f) 489 480 { 490 - struct nouveau_fence *fence = from_fence(f); 481 + struct nouveau_fence *fence = to_nouveau_fence(f); 491 482 492 483 /* 493 484 * caller should have a reference on the fence, ··· 512 503 513 504 static void nouveau_fence_release(struct dma_fence *f) 514 505 { 515 - struct nouveau_fence *fence = from_fence(f); 506 + struct nouveau_fence *fence = to_nouveau_fence(f); 516 507 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 517 508 518 509 kref_put(&fctx->fence_ref, nouveau_fence_context_put); ··· 530 521 531 522 static bool nouveau_fence_enable_signaling(struct dma_fence *f) 532 523 { 533 - struct nouveau_fence *fence = from_fence(f); 524 + struct nouveau_fence *fence = to_nouveau_fence(f); 534 525 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 535 526 bool ret; 536 527
+7
drivers/gpu/drm/nouveau/nouveau_fence.h
··· 17 17 unsigned long timeout; 18 18 }; 19 19 20 + static inline struct nouveau_fence * 21 + to_nouveau_fence(struct dma_fence *fence) 22 + { 23 + return container_of(fence, struct nouveau_fence, base); 24 + } 25 + 20 26 int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *); 21 27 int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *); 22 28 void nouveau_fence_unref(struct nouveau_fence **); 23 29 24 30 int nouveau_fence_emit(struct nouveau_fence *); 25 31 bool nouveau_fence_done(struct nouveau_fence *); 32 + void nouveau_fence_cancel(struct nouveau_fence *fence); 26 33 int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 27 34 int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); 28 35
+22 -15
drivers/gpu/drm/nouveau/nouveau_sched.c
··· 11 11 #include "nouveau_exec.h" 12 12 #include "nouveau_abi16.h" 13 13 #include "nouveau_sched.h" 14 + #include "nouveau_chan.h" 14 15 15 16 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000 16 17 ··· 122 121 { 123 122 struct nouveau_sched *sched = job->sched; 124 123 125 - spin_lock(&sched->job.list.lock); 124 + spin_lock(&sched->job_list.lock); 126 125 list_del(&job->entry); 127 - spin_unlock(&sched->job.list.lock); 128 - 129 - wake_up(&sched->job.wq); 126 + spin_unlock(&sched->job_list.lock); 130 127 } 131 128 132 129 void ··· 305 306 } 306 307 307 308 /* Submit was successful; add the job to the schedulers job list. */ 308 - spin_lock(&sched->job.list.lock); 309 - list_add(&job->entry, &sched->job.list.head); 310 - spin_unlock(&sched->job.list.lock); 309 + spin_lock(&sched->job_list.lock); 310 + list_add(&job->entry, &sched->job_list.head); 311 + spin_unlock(&sched->job_list.lock); 311 312 312 313 drm_sched_job_arm(&job->base); 313 314 job->done_fence = dma_fence_get(&job->base.s_fence->finished); ··· 370 371 { 371 372 struct drm_gpu_scheduler *sched = sched_job->sched; 372 373 struct nouveau_job *job = to_nouveau_job(sched_job); 373 - enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL; 374 + enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_RESET; 374 375 375 376 drm_sched_stop(sched, sched_job); 376 377 ··· 392 393 nouveau_job_fini(job); 393 394 } 394 395 396 + static void 397 + nouveau_sched_cancel_job(struct drm_sched_job *sched_job) 398 + { 399 + struct nouveau_fence *fence; 400 + struct nouveau_job *job; 401 + 402 + job = to_nouveau_job(sched_job); 403 + fence = to_nouveau_fence(job->done_fence); 404 + 405 + nouveau_fence_cancel(fence); 406 + } 407 + 395 408 static const struct drm_sched_backend_ops nouveau_sched_ops = { 396 409 .run_job = nouveau_sched_run_job, 397 410 .timedout_job = nouveau_sched_timedout_job, 398 411 .free_job = nouveau_sched_free_job, 412 + .cancel_job = nouveau_sched_cancel_job, 399 413 }; 400 414 401 415 static int ··· 458 446 goto fail_sched; 459 447 460 448 mutex_init(&sched->mutex); 461 - spin_lock_init(&sched->job.list.lock); 462 - INIT_LIST_HEAD(&sched->job.list.head); 463 - init_waitqueue_head(&sched->job.wq); 449 + spin_lock_init(&sched->job_list.lock); 450 + INIT_LIST_HEAD(&sched->job_list.head); 464 451 465 452 return 0; 466 453 ··· 493 482 return 0; 494 483 } 495 484 496 - 497 485 static void 498 486 nouveau_sched_fini(struct nouveau_sched *sched) 499 487 { 500 488 struct drm_gpu_scheduler *drm_sched = &sched->base; 501 489 struct drm_sched_entity *entity = &sched->entity; 502 - 503 - rmb(); /* for list_empty to work without lock */ 504 - wait_event(sched->job.wq, list_empty(&sched->job.list.head)); 505 490 506 491 drm_sched_entity_fini(entity); 507 492 drm_sched_fini(drm_sched);
+3 -6
drivers/gpu/drm/nouveau/nouveau_sched.h
··· 103 103 struct mutex mutex; 104 104 105 105 struct { 106 - struct { 107 - struct list_head head; 108 - spinlock_t lock; 109 - } list; 110 - struct wait_queue_head wq; 111 - } job; 106 + struct list_head head; 107 + spinlock_t lock; 108 + } job_list; 112 109 }; 113 110 114 111 int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+4 -4
drivers/gpu/drm/nouveau/nouveau_uvmm.c
··· 1019 1019 u64 end = addr + range; 1020 1020 1021 1021 again: 1022 - spin_lock(&sched->job.list.lock); 1023 - list_for_each_entry(__job, &sched->job.list.head, entry) { 1022 + spin_lock(&sched->job_list.lock); 1023 + list_for_each_entry(__job, &sched->job_list.head, entry) { 1024 1024 struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job); 1025 1025 1026 1026 list_for_each_op(op, &bind_job->ops) { ··· 1030 1030 1031 1031 if (!(end <= op_addr || addr >= op_end)) { 1032 1032 nouveau_uvmm_bind_job_get(bind_job); 1033 - spin_unlock(&sched->job.list.lock); 1033 + spin_unlock(&sched->job_list.lock); 1034 1034 wait_for_completion(&bind_job->complete); 1035 1035 nouveau_uvmm_bind_job_put(bind_job); 1036 1036 goto again; ··· 1038 1038 } 1039 1039 } 1040 1040 } 1041 - spin_unlock(&sched->job.list.lock); 1041 + spin_unlock(&sched->job_list.lock); 1042 1042 } 1043 1043 1044 1044 static int
+5 -5
drivers/gpu/drm/omapdrm/omap_fb.c
··· 335 335 #endif 336 336 337 337 struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 338 - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) 338 + struct drm_file *file, const struct drm_format_info *info, 339 + const struct drm_mode_fb_cmd2 *mode_cmd) 339 340 { 340 - const struct drm_format_info *info = drm_get_format_info(dev, 341 - mode_cmd); 342 341 unsigned int num_planes = info->num_planes; 343 342 struct drm_gem_object *bos[4]; 344 343 struct drm_framebuffer *fb; ··· 377 378 dev, mode_cmd, mode_cmd->width, mode_cmd->height, 378 379 (char *)&mode_cmd->pixel_format); 379 380 380 - format = drm_get_format_info(dev, mode_cmd); 381 + format = drm_get_format_info(dev, mode_cmd->pixel_format, 382 + mode_cmd->modifier[0]); 381 383 382 384 for (i = 0; i < ARRAY_SIZE(formats); i++) { 383 385 if (formats[i] == mode_cmd->pixel_format) ··· 440 440 plane->dma_addr = 0; 441 441 } 442 442 443 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 443 + drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); 444 444 445 445 ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); 446 446 if (ret) {
+2 -1
drivers/gpu/drm/omapdrm/omap_fb.h
··· 20 20 struct seq_file; 21 21 22 22 struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, 23 - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 23 + struct drm_file *file, const struct drm_format_info *info, 24 + const struct drm_mode_fb_cmd2 *mode_cmd); 24 25 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 25 26 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 26 27 int omap_framebuffer_pin(struct drm_framebuffer *fb);
+5 -6
drivers/gpu/drm/panel/panel-boe-himax8279d.c
··· 847 847 "failed to get enable gpio\n"); 848 848 } 849 849 850 - drm_panel_init(&pinfo->base, dev, &panel_funcs, 851 - DRM_MODE_CONNECTOR_DSI); 852 - 853 850 ret = drm_panel_of_backlight(&pinfo->base); 854 851 if (ret) 855 852 return ret; ··· 862 865 const struct panel_desc *desc; 863 866 int err; 864 867 865 - pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL); 866 - if (!pinfo) 867 - return -ENOMEM; 868 + pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base, 869 + &panel_funcs, DRM_MODE_CONNECTOR_DSI); 870 + 871 + if (IS_ERR(pinfo)) 872 + return PTR_ERR(pinfo); 868 873 869 874 desc = of_device_get_match_data(&dsi->dev); 870 875 dsi->mode_flags = desc->mode_flags;
+5 -5
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
··· 1720 1720 1721 1721 boe->base.prepare_prev_first = true; 1722 1722 1723 - drm_panel_init(&boe->base, dev, &boe_panel_funcs, 1724 - DRM_MODE_CONNECTOR_DSI); 1725 1723 err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation); 1726 1724 if (err < 0) { 1727 1725 dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err); ··· 1744 1746 int ret; 1745 1747 const struct panel_desc *desc; 1746 1748 1747 - boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL); 1748 - if (!boe) 1749 - return -ENOMEM; 1749 + boe = devm_drm_panel_alloc(&dsi->dev, __typeof(*boe), base, 1750 + &boe_panel_funcs, DRM_MODE_CONNECTOR_DSI); 1751 + 1752 + if (IS_ERR(boe)) 1753 + return PTR_ERR(boe); 1750 1754 1751 1755 desc = of_device_get_match_data(&dsi->dev); 1752 1756 dsi->lanes = desc->lanes;
+1
drivers/gpu/drm/panel/panel-edp.c
··· 1939 1939 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), 1940 1940 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), 1941 1941 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"), 1942 + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"), 1942 1943 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"), 1943 1944 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"), 1944 1945 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
+5 -5
drivers/gpu/drm/panel/panel-himax-hx83102.c
··· 989 989 990 990 ctx->base.prepare_prev_first = true; 991 991 992 - drm_panel_init(&ctx->base, dev, &hx83102_drm_funcs, 993 - DRM_MODE_CONNECTOR_DSI); 994 992 err = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation); 995 993 if (err < 0) 996 994 return dev_err_probe(dev, err, "failed to get orientation\n"); ··· 1011 1013 int ret; 1012 1014 const struct hx83102_panel_desc *desc; 1013 1015 1014 - ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); 1015 - if (!ctx) 1016 - return -ENOMEM; 1016 + ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base, 1017 + &hx83102_drm_funcs, DRM_MODE_CONNECTOR_DSI); 1018 + 1019 + if (IS_ERR(ctx)) 1020 + return PTR_ERR(ctx); 1017 1021 1018 1022 desc = of_device_get_match_data(&dsi->dev); 1019 1023 dsi->lanes = 4;
+5 -5
drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
··· 614 614 615 615 gpiod_set_value(ili->enable_gpio, 0); 616 616 617 - drm_panel_init(&ili->base, dev, &ili9882t_funcs, 618 - DRM_MODE_CONNECTOR_DSI); 619 617 err = of_drm_get_panel_orientation(dev->of_node, &ili->orientation); 620 618 if (err < 0) { 621 619 dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err); ··· 638 640 int ret; 639 641 const struct panel_desc *desc; 640 642 641 - ili = devm_kzalloc(&dsi->dev, sizeof(*ili), GFP_KERNEL); 642 - if (!ili) 643 - return -ENOMEM; 643 + ili = devm_drm_panel_alloc(&dsi->dev, __typeof(*ili), base, 644 + &ili9882t_funcs, DRM_MODE_CONNECTOR_DSI); 645 + 646 + if (IS_ERR(ili)) 647 + return PTR_ERR(ili); 644 648 645 649 desc = of_device_get_match_data(&dsi->dev); 646 650 dsi->lanes = desc->lanes;
+6 -6
drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
··· 435 435 return dev_err_probe(dev, PTR_ERR(jdi->backlight), 436 436 "failed to create backlight\n"); 437 437 438 - drm_panel_init(&jdi->base, &jdi->link1->dev, &jdi_panel_funcs, 439 - DRM_MODE_CONNECTOR_DSI); 440 - 441 438 drm_panel_add(&jdi->base); 442 439 443 440 return 0; ··· 472 475 473 476 /* register a panel for only the DSI-LINK1 interface */ 474 477 if (secondary) { 475 - jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL); 476 - if (!jdi) { 478 + jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), 479 + base, &jdi_panel_funcs, 480 + DRM_MODE_CONNECTOR_DSI); 481 + 482 + if (IS_ERR(jdi)) { 477 483 put_device(&secondary->dev); 478 - return -ENOMEM; 484 + return PTR_ERR(jdi); 479 485 } 480 486 481 487 mipi_dsi_set_drvdata(dsi, jdi);
+5 -6
drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
··· 402 402 return dev_err_probe(dev, PTR_ERR(jdi->backlight), 403 403 "failed to register backlight %d\n", ret); 404 404 405 - drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, 406 - DRM_MODE_CONNECTOR_DSI); 407 - 408 405 drm_panel_add(&jdi->base); 409 406 410 407 return 0; ··· 423 426 dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO | 424 427 MIPI_DSI_CLOCK_NON_CONTINUOUS; 425 428 426 - jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL); 427 - if (!jdi) 428 - return -ENOMEM; 429 + jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), base, 430 + &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI); 431 + 432 + if (IS_ERR(jdi)) 433 + return PTR_ERR(jdi); 429 434 430 435 mipi_dsi_set_drvdata(dsi, jdi); 431 436
+6 -7
drivers/gpu/drm/panel/panel-khadas-ts050.c
··· 821 821 return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio), 822 822 "failed to get enable gpio"); 823 823 824 - drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev, 825 - &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI); 826 - 827 824 err = drm_panel_of_backlight(&khadas_ts050->base); 828 825 if (err) 829 826 return err; ··· 847 850 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 848 851 MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; 849 852 850 - khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050), 851 - GFP_KERNEL); 852 - if (!khadas_ts050) 853 - return -ENOMEM; 853 + khadas_ts050 = devm_drm_panel_alloc(&dsi->dev, __typeof(*khadas_ts050), 854 + base, &khadas_ts050_panel_funcs, 855 + DRM_MODE_CONNECTOR_DSI); 856 + 857 + if (IS_ERR(khadas_ts050)) 858 + return PTR_ERR(khadas_ts050); 854 859 855 860 khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data; 856 861 mipi_dsi_set_drvdata(dsi, khadas_ts050);
+6 -6
drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
··· 337 337 kingdisplay->enable_gpio = NULL; 338 338 } 339 339 340 - drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev, 341 - &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI); 342 - 343 340 err = drm_panel_of_backlight(&kingdisplay->base); 344 341 if (err) 345 342 return err; ··· 361 364 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 362 365 MIPI_DSI_MODE_LPM; 363 366 364 - kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL); 365 - if (!kingdisplay) 366 - return -ENOMEM; 367 + kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base, 368 + &kingdisplay_panel_funcs, 369 + DRM_MODE_CONNECTOR_DSI); 370 + 371 + if (IS_ERR(kingdisplay)) 372 + return PTR_ERR(kingdisplay); 367 373 368 374 mipi_dsi_set_drvdata(dsi, kingdisplay); 369 375 kingdisplay->link = dsi;
+5 -5
drivers/gpu/drm/panel/panel-lg-sw43408.c
··· 246 246 247 247 ctx->base.prepare_prev_first = true; 248 248 249 - drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI); 250 - 251 249 drm_panel_add(&ctx->base); 252 250 return ret; 253 251 } ··· 255 257 struct sw43408_panel *ctx; 256 258 int ret; 257 259 258 - ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); 259 - if (!ctx) 260 - return -ENOMEM; 260 + ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base, 261 + &sw43408_funcs, DRM_MODE_CONNECTOR_DSI); 262 + 263 + if (IS_ERR(ctx)) 264 + return PTR_ERR(ctx); 261 265 262 266 dsi->mode_flags = MIPI_DSI_MODE_LPM; 263 267 dsi->format = MIPI_DSI_FMT_RGB888;
+5 -5
drivers/gpu/drm/panel/panel-novatek-nt36672a.c
··· 608 608 return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio), 609 609 "failed to get reset gpio from DT\n"); 610 610 611 - drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI); 612 - 613 611 ret = drm_panel_of_backlight(&pinfo->base); 614 612 if (ret) 615 613 return dev_err_probe(dev, ret, "Failed to get backlight\n"); ··· 623 625 const struct nt36672a_panel_desc *desc; 624 626 int err; 625 627 626 - pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL); 627 - if (!pinfo) 628 - return -ENOMEM; 628 + pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base, 629 + &panel_funcs, DRM_MODE_CONNECTOR_DSI); 630 + 631 + if (IS_ERR(pinfo)) 632 + return PTR_ERR(pinfo); 629 633 630 634 desc = of_device_get_match_data(&dsi->dev); 631 635 dsi->mode_flags = desc->mode_flags;
+6 -6
drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
··· 132 132 if (IS_ERR(osd101t2587->supply)) 133 133 return PTR_ERR(osd101t2587->supply); 134 134 135 - drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev, 136 - &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI); 137 - 138 135 ret = drm_panel_of_backlight(&osd101t2587->base); 139 136 if (ret) 140 137 return ret; ··· 158 161 MIPI_DSI_MODE_VIDEO_SYNC_PULSE | 159 162 MIPI_DSI_MODE_NO_EOT_PACKET; 160 163 161 - osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL); 162 - if (!osd101t2587) 163 - return -ENOMEM; 164 + osd101t2587 = devm_drm_panel_alloc(&dsi->dev, __typeof(*osd101t2587), base, 165 + &osd101t2587_panel_funcs, 166 + DRM_MODE_CONNECTOR_DSI); 167 + 168 + if (IS_ERR(osd101t2587)) 169 + return PTR_ERR(osd101t2587); 164 170 165 171 mipi_dsi_set_drvdata(dsi, osd101t2587); 166 172
+6 -6
drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
··· 166 166 if (IS_ERR(wuxga_nt->supply)) 167 167 return PTR_ERR(wuxga_nt->supply); 168 168 169 - drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev, 170 - &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI); 171 - 172 169 ret = drm_panel_of_backlight(&wuxga_nt->base); 173 170 if (ret) 174 171 return ret; ··· 193 196 MIPI_DSI_CLOCK_NON_CONTINUOUS | 194 197 MIPI_DSI_MODE_LPM; 195 198 196 - wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL); 197 - if (!wuxga_nt) 198 - return -ENOMEM; 199 + wuxga_nt = devm_drm_panel_alloc(&dsi->dev, __typeof(*wuxga_nt), base, 200 + &wuxga_nt_panel_funcs, 201 + DRM_MODE_CONNECTOR_DSI); 202 + 203 + if (IS_ERR(wuxga_nt)) 204 + return PTR_ERR(wuxga_nt); 199 205 200 206 mipi_dsi_set_drvdata(dsi, wuxga_nt); 201 207
+6 -6
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
··· 373 373 .node = NULL, 374 374 }; 375 375 376 - ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL); 377 - if (!ts) 378 - return -ENOMEM; 376 + ts = devm_drm_panel_alloc(dev, __typeof(*ts), base, 377 + &rpi_touchscreen_funcs, 378 + DRM_MODE_CONNECTOR_DSI); 379 + 380 + if (IS_ERR(ts)) 381 + return PTR_ERR(ts); 379 382 380 383 i2c_set_clientdata(i2c, ts); 381 384 ··· 430 427 PTR_ERR(ts->dsi)); 431 428 return PTR_ERR(ts->dsi); 432 429 } 433 - 434 - drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs, 435 - DRM_MODE_CONNECTOR_DSI); 436 430 437 431 /* This appears last, as it's what will unblock the DSI host 438 432 * driver's component bind function.
+5 -6
drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
··· 279 279 if (IS_ERR(sharp->supply)) 280 280 return PTR_ERR(sharp->supply); 281 281 282 - drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs, 283 - DRM_MODE_CONNECTOR_DSI); 284 - 285 282 ret = drm_panel_of_backlight(&sharp->base); 286 283 if (ret) 287 284 return ret; ··· 320 323 321 324 /* register a panel for only the DSI-LINK1 interface */ 322 325 if (secondary) { 323 - sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL); 324 - if (!sharp) { 326 + sharp = devm_drm_panel_alloc(&dsi->dev, __typeof(*sharp), base, 327 + &sharp_panel_funcs, 328 + DRM_MODE_CONNECTOR_DSI); 329 + if (IS_ERR(sharp)) { 325 330 put_device(&secondary->dev); 326 - return -ENOMEM; 331 + return PTR_ERR(sharp); 327 332 } 328 333 329 334 mipi_dsi_set_drvdata(dsi, sharp);
+5 -5
drivers/gpu/drm/panfrost/panfrost_job.c
··· 751 751 int js = panfrost_job_get_slot(job); 752 752 753 753 /* 754 - * If the GPU managed to complete this jobs fence, the timeout is 755 - * spurious. Bail out. 754 + * If the GPU managed to complete this jobs fence, the timeout has 755 + * fired before free-job worker. The timeout is spurious, so bail out. 756 756 */ 757 757 if (dma_fence_is_signaled(job->done_fence)) 758 - return DRM_GPU_SCHED_STAT_NOMINAL; 758 + return DRM_GPU_SCHED_STAT_NO_HANG; 759 759 760 760 /* 761 761 * Panfrost IRQ handler may take a long time to process an interrupt ··· 770 770 771 771 if (dma_fence_is_signaled(job->done_fence)) { 772 772 dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); 773 - return DRM_GPU_SCHED_STAT_NOMINAL; 773 + return DRM_GPU_SCHED_STAT_NO_HANG; 774 774 } 775 775 776 776 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", ··· 786 786 atomic_set(&pfdev->reset.pending, 1); 787 787 panfrost_reset(pfdev, sched_job); 788 788 789 - return DRM_GPU_SCHED_STAT_NOMINAL; 789 + return DRM_GPU_SCHED_STAT_RESET; 790 790 } 791 791 792 792 static void panfrost_reset_work(struct work_struct *work)
+1 -12
drivers/gpu/drm/panthor/panthor_mmu.c
··· 885 885 return ret; 886 886 } 887 887 888 - /** 889 - * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS 890 - * @vm: VM whose cache to flush 891 - * 892 - * Return: 0 on success, a negative error code if flush failed. 893 - */ 894 - int panthor_vm_flush_all(struct panthor_vm *vm) 895 - { 896 - return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range); 897 - } 898 - 899 888 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) 900 889 { 901 890 struct panthor_device *ptdev = vm->ptdev; ··· 2259 2270 panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job) 2260 2271 { 2261 2272 WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!"); 2262 - return DRM_GPU_SCHED_STAT_NOMINAL; 2273 + return DRM_GPU_SCHED_STAT_RESET; 2263 2274 } 2264 2275 2265 2276 static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
-1
drivers/gpu/drm/panthor/panthor_mmu.h
··· 33 33 void panthor_vm_idle(struct panthor_vm *vm); 34 34 u32 panthor_vm_page_size(struct panthor_vm *vm); 35 35 int panthor_vm_as(struct panthor_vm *vm); 36 - int panthor_vm_flush_all(struct panthor_vm *vm); 37 36 38 37 struct panthor_heap_pool * 39 38 panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
+1 -1
drivers/gpu/drm/panthor/panthor_sched.c
··· 3241 3241 3242 3242 queue_start(queue); 3243 3243 3244 - return DRM_GPU_SCHED_STAT_NOMINAL; 3244 + return DRM_GPU_SCHED_STAT_RESET; 3245 3245 } 3246 3246 3247 3247 static void queue_free_job(struct drm_sched_job *sched_job)
+2 -1
drivers/gpu/drm/qxl/qxl_display.c
··· 1176 1176 static struct drm_framebuffer * 1177 1177 qxl_user_framebuffer_create(struct drm_device *dev, 1178 1178 struct drm_file *file_priv, 1179 + const struct drm_format_info *info, 1179 1180 const struct drm_mode_fb_cmd2 *mode_cmd) 1180 1181 { 1181 - return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, 1182 + return drm_gem_fb_create_with_funcs(dev, file_priv, info, mode_cmd, 1182 1183 &qxl_fb_funcs); 1183 1184 } 1184 1185
+2 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 1302 1302 { 1303 1303 int ret; 1304 1304 fb->obj[0] = obj; 1305 - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 1305 + drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); 1306 1306 ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); 1307 1307 if (ret) { 1308 1308 fb->obj[0] = NULL; ··· 1314 1314 static struct drm_framebuffer * 1315 1315 radeon_user_framebuffer_create(struct drm_device *dev, 1316 1316 struct drm_file *file_priv, 1317 + const struct drm_format_info *info, 1317 1318 const struct drm_mode_fb_cmd2 *mode_cmd) 1318 1319 { 1319 1320 struct drm_gem_object *obj;
+2 -1
drivers/gpu/drm/radeon/radeon_fbdev.c
··· 67 67 int height = mode_cmd->height; 68 68 u32 cpp; 69 69 70 - info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd); 70 + info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format, 71 + mode_cmd->modifier[0]); 71 72 cpp = info->cpp[0]; 72 73 73 74 /* need to align pitch with crtc limits */
+2 -1
drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
··· 426 426 427 427 static struct drm_framebuffer * 428 428 rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, 429 + const struct drm_format_info *info, 429 430 const struct drm_mode_fb_cmd2 *mode_cmd) 430 431 { 431 432 struct rcar_du_device *rcdu = to_rcar_du_device(dev); ··· 491 490 } 492 491 } 493 492 494 - return drm_gem_fb_create(dev, file_priv, mode_cmd); 493 + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); 495 494 } 496 495 497 496 /* -----------------------------------------------------------------------------
+2 -1
drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
··· 191 191 192 192 static struct drm_framebuffer * 193 193 rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, 194 + const struct drm_format_info *info, 194 195 const struct drm_mode_fb_cmd2 *mode_cmd) 195 196 { 196 197 const struct rzg2l_du_format_info *format; ··· 215 214 return ERR_PTR(-EINVAL); 216 215 } 217 216 218 - return drm_gem_fb_create(dev, file_priv, mode_cmd); 217 + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); 219 218 } 220 219 221 220 /* -----------------------------------------------------------------------------
+2 -1
drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
··· 117 117 118 118 static struct drm_framebuffer * 119 119 shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv, 120 + const struct drm_format_info *info, 120 121 const struct drm_mode_fb_cmd2 *mode_cmd) 121 122 { 122 123 const struct shmob_drm_format_info *format; ··· 145 144 } 146 145 } 147 146 148 - return drm_gem_fb_create(dev, file_priv, mode_cmd); 147 + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); 149 148 } 150 149 151 150 static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
+7 -7
drivers/gpu/drm/rockchip/cdn-dp-core.c
··· 233 233 } 234 234 235 235 static enum drm_connector_status 236 - cdn_dp_bridge_detect(struct drm_bridge *bridge) 236 + cdn_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 237 237 { 238 238 struct cdn_dp_device *dp = bridge_to_dp(bridge); 239 239 enum drm_connector_status status = connector_status_disconnected; ··· 743 743 return 0; 744 744 } 745 745 746 - static int cdn_dp_audio_prepare(struct drm_connector *connector, 747 - struct drm_bridge *bridge, 746 + static int cdn_dp_audio_prepare(struct drm_bridge *bridge, 747 + struct drm_connector *connector, 748 748 struct hdmi_codec_daifmt *daifmt, 749 749 struct hdmi_codec_params *params) 750 750 { ··· 784 784 return ret; 785 785 } 786 786 787 - static void cdn_dp_audio_shutdown(struct drm_connector *connector, 788 - struct drm_bridge *bridge) 787 + static void cdn_dp_audio_shutdown(struct drm_bridge *bridge, 788 + struct drm_connector *connector) 789 789 { 790 790 struct cdn_dp_device *dp = bridge_to_dp(bridge); 791 791 int ret; ··· 801 801 mutex_unlock(&dp->lock); 802 802 } 803 803 804 - static int cdn_dp_audio_mute_stream(struct drm_connector *connector, 805 - struct drm_bridge *bridge, 804 + static int cdn_dp_audio_mute_stream(struct drm_bridge *bridge, 805 + struct drm_connector *connector, 806 806 bool enable, int direction) 807 807 { 808 808 struct cdn_dp_device *dp = bridge_to_dp(bridge);
+1 -1
drivers/gpu/drm/rockchip/rk3066_hdmi.c
··· 450 450 }; 451 451 452 452 static enum drm_connector_status 453 - rk3066_hdmi_bridge_detect(struct drm_bridge *bridge) 453 + rk3066_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 454 454 { 455 455 struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge); 456 456
+4 -7
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
··· 30 30 31 31 static struct drm_framebuffer * 32 32 rockchip_fb_create(struct drm_device *dev, struct drm_file *file, 33 + const struct drm_format_info *info, 33 34 const struct drm_mode_fb_cmd2 *mode_cmd) 34 35 { 35 36 struct drm_afbc_framebuffer *afbc_fb; 36 - const struct drm_format_info *info; 37 37 int ret; 38 - 39 - info = drm_get_format_info(dev, mode_cmd); 40 - if (!info) 41 - return ERR_PTR(-ENOMEM); 42 38 43 39 afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL); 44 40 if (!afbc_fb) 45 41 return ERR_PTR(-ENOMEM); 46 42 47 - ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd, 43 + ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, 44 + file, info, mode_cmd, 48 45 &rockchip_drm_fb_funcs); 49 46 if (ret) { 50 47 kfree(afbc_fb); ··· 49 52 } 50 53 51 54 if (drm_is_afbc(mode_cmd->modifier[0])) { 52 - ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb); 55 + ret = drm_gem_fb_afbc_init(dev, info, mode_cmd, afbc_fb); 53 56 if (ret) { 54 57 drm_framebuffer_put(&afbc_fb->base); 55 58 return ERR_PTR(ret);
+69 -16
drivers/gpu/drm/scheduler/sched_main.c
··· 366 366 { 367 367 struct drm_sched_job *job; 368 368 369 - spin_lock(&sched->job_list_lock); 370 369 job = list_first_entry_or_null(&sched->pending_list, 371 370 struct drm_sched_job, list); 372 371 if (job && dma_fence_is_signaled(&job->s_fence->finished)) 373 372 __drm_sched_run_free_queue(sched); 373 + } 374 + 375 + static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched) 376 + { 377 + spin_lock(&sched->job_list_lock); 378 + drm_sched_run_free_queue(sched); 374 379 spin_unlock(&sched->job_list_lock); 375 380 } 376 381 ··· 528 523 spin_unlock(&sched->job_list_lock); 529 524 } 530 525 526 + /** 527 + * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout 528 + * @sched: scheduler instance 529 + * @job: job to be reinserted on the pending list 530 + * 531 + * In the case of a "false timeout" - when a timeout occurs but the GPU isn't 532 + * hung and is making progress, the scheduler must reinsert the job back into 533 + * @sched->pending_list. Otherwise, the job and its resources won't be freed 534 + * through the &struct drm_sched_backend_ops.free_job callback. 535 + * 536 + * This function must be used in "false timeout" cases only. 537 + */ 538 + static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched, 539 + struct drm_sched_job *job) 540 + { 541 + spin_lock(&sched->job_list_lock); 542 + list_add(&job->list, &sched->pending_list); 543 + 544 + /* After reinserting the job, the scheduler enqueues the free-job work 545 + * again if ready. Otherwise, a signaled job could be added to the 546 + * pending list, but never freed. 547 + */ 548 + drm_sched_run_free_queue(sched); 549 + spin_unlock(&sched->job_list_lock); 550 + } 551 + 531 552 static void drm_sched_job_timedout(struct work_struct *work) 532 553 { 533 554 struct drm_gpu_scheduler *sched; 534 555 struct drm_sched_job *job; 535 - enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; 556 + enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET; 536 557 537 558 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 538 559 ··· 587 556 job->sched->ops->free_job(job); 588 557 sched->free_guilty = false; 589 558 } 559 + 560 + if (status == DRM_GPU_SCHED_STAT_NO_HANG) 561 + drm_sched_job_reinsert_on_false_timeout(sched, job); 590 562 } else { 591 563 spin_unlock(&sched->job_list_lock); 592 564 } ··· 612 578 * This function is typically used for reset recovery (see the docu of 613 579 * drm_sched_backend_ops.timedout_job() for details). Do not call it for 614 580 * scheduler teardown, i.e., before calling drm_sched_fini(). 581 + * 582 + * As it's only used for reset recovery, drivers must not call this function 583 + * in their &struct drm_sched_backend_ops.timedout_job callback when they 584 + * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. 615 585 */ 616 586 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 617 587 { ··· 701 663 * drm_sched_backend_ops.timedout_job() for details). Do not call it for 702 664 * scheduler startup. The scheduler itself is fully operational after 703 665 * drm_sched_init() succeeded. 666 + * 667 + * As it's only used for reset recovery, drivers must not call this function 668 + * in their &struct drm_sched_backend_ops.timedout_job callback when they 669 + * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. 704 670 */ 705 671 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) 706 672 { ··· 1226 1184 if (job) 1227 1185 sched->ops->free_job(job); 1228 1186 1229 - drm_sched_run_free_queue(sched); 1187 + drm_sched_run_free_queue_unlocked(sched); 1230 1188 drm_sched_run_job_queue(sched); 1231 1189 } 1232 1190 ··· 1394 1352 } 1395 1353 EXPORT_SYMBOL(drm_sched_init); 1396 1354 1355 + static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched) 1356 + { 1357 + struct drm_sched_job *job, *tmp; 1358 + 1359 + /* All other accessors are stopped. No locking necessary. */ 1360 + list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) { 1361 + sched->ops->cancel_job(job); 1362 + list_del(&job->list); 1363 + sched->ops->free_job(job); 1364 + } 1365 + } 1366 + 1397 1367 /** 1398 1368 * drm_sched_fini - Destroy a gpu scheduler 1399 1369 * ··· 1413 1359 * 1414 1360 * Tears down and cleans up the scheduler. 1415 1361 * 1416 - * This stops submission of new jobs to the hardware through 1417 - * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job() 1418 - * will not be called for all jobs still in drm_gpu_scheduler.pending_list. 1419 - * There is no solution for this currently. Thus, it is up to the driver to make 1420 - * sure that: 1421 - * 1422 - * a) drm_sched_fini() is only called after for all submitted jobs 1423 - * drm_sched_backend_ops.free_job() has been called or that 1424 - * b) the jobs for which drm_sched_backend_ops.free_job() has not been called 1425 - * after drm_sched_fini() ran are freed manually. 1426 - * 1427 - * FIXME: Take care of the above problem and prevent this function from leaking 1428 - * the jobs in drm_gpu_scheduler.pending_list under any circumstances. 1362 + * This stops submission of new jobs to the hardware through &struct 1363 + * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job 1364 + * is implemented, all jobs will be canceled through it and afterwards cleaned 1365 + * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not 1366 + * implemented, memory could leak. 1429 1367 */ 1430 1368 void drm_sched_fini(struct drm_gpu_scheduler *sched) 1431 1369 { ··· 1447 1401 /* Confirm no work left behind accessing device structures */ 1448 1402 cancel_delayed_work_sync(&sched->work_tdr); 1449 1403 1404 + /* Avoid memory leaks if supported by the driver. */ 1405 + if (sched->ops->cancel_job) 1406 + drm_sched_cancel_remaining_jobs(sched); 1407 + 1450 1408 if (sched->own_submit_wq) 1451 1409 destroy_workqueue(sched->submit_wq); 1452 1410 sched->ready = false; 1453 1411 kfree(sched->sched_rq); 1454 1412 sched->sched_rq = NULL; 1413 + 1414 + if (!list_empty(&sched->pending_list)) 1415 + dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n"); 1455 1416 } 1456 1417 EXPORT_SYMBOL(drm_sched_fini); 1457 1418
+31 -44
drivers/gpu/drm/scheduler/tests/mock_scheduler.c
··· 63 63 lockdep_assert_held(&sched->lock); 64 64 65 65 job->flags |= DRM_MOCK_SCHED_JOB_DONE; 66 - list_move_tail(&job->link, &sched->done_list); 66 + list_del(&job->link); 67 67 dma_fence_signal_locked(&job->hw_fence); 68 68 complete(&job->done); 69 69 } ··· 218 218 struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); 219 219 unsigned long flags; 220 220 221 + if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) { 222 + job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET; 223 + return DRM_GPU_SCHED_STAT_NO_HANG; 224 + } 225 + 221 226 spin_lock_irqsave(&sched->lock, flags); 222 227 if (!dma_fence_is_signaled_locked(&job->hw_fence)) { 223 228 list_del(&job->link); ··· 236 231 drm_sched_job_cleanup(sched_job); 237 232 /* Mock job itself is freed by the kunit framework. */ 238 233 239 - return DRM_GPU_SCHED_STAT_NOMINAL; 234 + return DRM_GPU_SCHED_STAT_RESET; 240 235 } 241 236 242 237 static void mock_sched_free_job(struct drm_sched_job *sched_job) 243 238 { 244 - struct drm_mock_scheduler *sched = 245 - drm_sched_to_mock_sched(sched_job->sched); 246 239 struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); 247 - unsigned long flags; 248 240 249 - /* Remove from the scheduler done list. */ 250 - spin_lock_irqsave(&sched->lock, flags); 251 - list_del(&job->link); 252 - spin_unlock_irqrestore(&sched->lock, flags); 253 241 dma_fence_put(&job->hw_fence); 254 - 255 242 drm_sched_job_cleanup(sched_job); 256 243 257 244 /* Mock job itself is freed by the kunit framework. */ 258 245 } 259 246 247 + static void mock_sched_cancel_job(struct drm_sched_job *sched_job) 248 + { 249 + struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); 250 + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); 251 + unsigned long flags; 252 + 253 + hrtimer_cancel(&job->timer); 254 + 255 + spin_lock_irqsave(&sched->lock, flags); 256 + if (!dma_fence_is_signaled_locked(&job->hw_fence)) { 257 + list_del(&job->link); 258 + dma_fence_set_error(&job->hw_fence, -ECANCELED); 259 + dma_fence_signal_locked(&job->hw_fence); 260 + } 261 + spin_unlock_irqrestore(&sched->lock, flags); 262 + 263 + /* 264 + * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still. 265 + * Mock job itself is freed by the kunit framework. 266 + */ 267 + } 268 + 260 269 static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { 261 270 .run_job = mock_sched_run_job, 262 271 .timedout_job = mock_sched_timedout_job, 263 - .free_job = mock_sched_free_job 272 + .free_job = mock_sched_free_job, 273 + .cancel_job = mock_sched_cancel_job, 264 274 }; 265 275 266 276 /** ··· 309 289 sched->hw_timeline.context = dma_fence_context_alloc(1); 310 290 atomic_set(&sched->hw_timeline.next_seqno, 0); 311 291 INIT_LIST_HEAD(&sched->job_list); 312 - INIT_LIST_HEAD(&sched->done_list); 313 292 spin_lock_init(&sched->lock); 314 293 315 294 return sched; ··· 323 304 */ 324 305 void drm_mock_sched_fini(struct drm_mock_scheduler *sched) 325 306 { 326 - struct drm_mock_sched_job *job, *next; 327 - unsigned long flags; 328 - LIST_HEAD(list); 329 - 330 - drm_sched_wqueue_stop(&sched->base); 331 - 332 - /* Force complete all unfinished jobs. */ 333 - spin_lock_irqsave(&sched->lock, flags); 334 - list_for_each_entry_safe(job, next, &sched->job_list, link) 335 - list_move_tail(&job->link, &list); 336 - spin_unlock_irqrestore(&sched->lock, flags); 337 - 338 - list_for_each_entry(job, &list, link) 339 - hrtimer_cancel(&job->timer); 340 - 341 - spin_lock_irqsave(&sched->lock, flags); 342 - list_for_each_entry_safe(job, next, &list, link) 343 - drm_mock_sched_job_complete(job); 344 - spin_unlock_irqrestore(&sched->lock, flags); 345 - 346 - /* 347 - * Free completed jobs and jobs not yet processed by the DRM scheduler 348 - * free worker. 349 - */ 350 - spin_lock_irqsave(&sched->lock, flags); 351 - list_for_each_entry_safe(job, next, &sched->done_list, link) 352 - list_move_tail(&job->link, &list); 353 - spin_unlock_irqrestore(&sched->lock, flags); 354 - 355 - list_for_each_entry_safe(job, next, &list, link) 356 - mock_sched_free_job(&job->base); 357 - 358 307 drm_sched_fini(&sched->base); 359 308 } 360 309
+1 -1
drivers/gpu/drm/scheduler/tests/sched_tests.h
··· 49 49 50 50 spinlock_t lock; 51 51 struct list_head job_list; 52 - struct list_head done_list; 53 52 54 53 struct { 55 54 u64 context; ··· 97 98 98 99 #define DRM_MOCK_SCHED_JOB_DONE 0x1 99 100 #define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 101 + #define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4 100 102 unsigned long flags; 101 103 102 104 struct list_head link;
+90 -3
drivers/gpu/drm/scheduler/tests/tests_basic.c
··· 5 5 6 6 #include "sched_tests.h" 7 7 8 + #define MOCK_TIMEOUT (HZ / 5) 9 + 8 10 /* 9 11 * DRM scheduler basic tests should check the basic functional correctness of 10 12 * the scheduler, including some very light smoke testing. More targeted tests, ··· 30 28 31 29 static int drm_sched_timeout_init(struct kunit *test) 32 30 { 33 - test->priv = drm_mock_sched_new(test, HZ); 31 + test->priv = drm_mock_sched_new(test, MOCK_TIMEOUT); 34 32 35 33 return 0; 36 34 } ··· 206 204 .test_cases = drm_sched_basic_tests, 207 205 }; 208 206 207 + static void drm_sched_basic_cancel(struct kunit *test) 208 + { 209 + struct drm_mock_sched_entity *entity; 210 + struct drm_mock_scheduler *sched; 211 + struct drm_mock_sched_job *job; 212 + bool done; 213 + 214 + /* 215 + * Check that drm_sched_fini() uses the cancel_job() callback to cancel 216 + * jobs that are still pending. 217 + */ 218 + 219 + sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); 220 + entity = drm_mock_sched_entity_new(test, DRM_SCHED_PRIORITY_NORMAL, 221 + sched); 222 + 223 + job = drm_mock_sched_job_new(test, entity); 224 + 225 + drm_mock_sched_job_submit(job); 226 + 227 + done = drm_mock_sched_job_wait_scheduled(job, HZ); 228 + KUNIT_ASSERT_TRUE(test, done); 229 + 230 + drm_mock_sched_entity_free(entity); 231 + drm_mock_sched_fini(sched); 232 + 233 + KUNIT_ASSERT_EQ(test, job->hw_fence.error, -ECANCELED); 234 + } 235 + 236 + static struct kunit_case drm_sched_cancel_tests[] = { 237 + KUNIT_CASE(drm_sched_basic_cancel), 238 + {} 239 + }; 240 + 241 + static struct kunit_suite drm_sched_cancel = { 242 + .name = "drm_sched_basic_cancel_tests", 243 + .init = drm_sched_basic_init, 244 + .exit = drm_sched_basic_exit, 245 + .test_cases = drm_sched_cancel_tests, 246 + }; 247 + 209 248 static void drm_sched_basic_timeout(struct kunit *test) 210 249 { 211 250 struct drm_mock_scheduler *sched = test->priv; ··· 270 227 done = drm_mock_sched_job_wait_scheduled(job, HZ); 271 228 KUNIT_ASSERT_TRUE(test, done); 272 229 273 - done = drm_mock_sched_job_wait_finished(job, HZ / 2); 230 + done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT / 2); 274 231 KUNIT_ASSERT_FALSE(test, done); 275 232 276 233 KUNIT_ASSERT_EQ(test, 277 234 job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, 278 235 0); 279 236 280 - done = drm_mock_sched_job_wait_finished(job, HZ); 237 + done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT); 281 238 KUNIT_ASSERT_FALSE(test, done); 282 239 283 240 KUNIT_ASSERT_EQ(test, ··· 287 244 drm_mock_sched_entity_free(entity); 288 245 } 289 246 247 + static void drm_sched_skip_reset(struct kunit *test) 248 + { 249 + struct drm_mock_scheduler *sched = test->priv; 250 + struct drm_mock_sched_entity *entity; 251 + struct drm_mock_sched_job *job; 252 + unsigned int i; 253 + bool done; 254 + 255 + /* 256 + * Submit a single job against a scheduler with the timeout configured 257 + * and verify that if the job is still running, the timeout handler 258 + * will skip the reset and allow the job to complete. 259 + */ 260 + 261 + entity = drm_mock_sched_entity_new(test, 262 + DRM_SCHED_PRIORITY_NORMAL, 263 + sched); 264 + job = drm_mock_sched_job_new(test, entity); 265 + 266 + job->flags = DRM_MOCK_SCHED_JOB_DONT_RESET; 267 + 268 + drm_mock_sched_job_submit(job); 269 + 270 + done = drm_mock_sched_job_wait_scheduled(job, HZ); 271 + KUNIT_ASSERT_TRUE(test, done); 272 + 273 + done = drm_mock_sched_job_wait_finished(job, 2 * MOCK_TIMEOUT); 274 + KUNIT_ASSERT_FALSE(test, done); 275 + 276 + KUNIT_ASSERT_EQ(test, 277 + job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET, 278 + 0); 279 + 280 + i = drm_mock_sched_advance(sched, 1); 281 + KUNIT_ASSERT_EQ(test, i, 1); 282 + 283 + done = drm_mock_sched_job_wait_finished(job, HZ); 284 + KUNIT_ASSERT_TRUE(test, done); 285 + 286 + drm_mock_sched_entity_free(entity); 287 + } 288 + 290 289 static struct kunit_case drm_sched_timeout_tests[] = { 291 290 KUNIT_CASE(drm_sched_basic_timeout), 291 + KUNIT_CASE(drm_sched_skip_reset), 292 292 {} 293 293 }; 294 294 ··· 557 471 558 472 kunit_test_suites(&drm_sched_basic, 559 473 &drm_sched_timeout, 474 + &drm_sched_cancel, 560 475 &drm_sched_priority, 561 476 &drm_sched_modify_sched, 562 477 &drm_sched_credits);
+87 -8
drivers/gpu/drm/sitronix/st7571-i2c.c
··· 68 68 #define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c))) 69 69 #define ST7571_COMMAND_SET_NORMAL (0x00) 70 70 71 + /* ST7567 commands */ 72 + #define ST7567_SET_LCD_BIAS(m) (0xa2 | FIELD_PREP(GENMASK(0, 0), (m))) 73 + 71 74 #define ST7571_PAGE_HEIGHT 8 72 75 73 76 #define DRIVER_NAME "st7571" ··· 95 92 96 93 struct st7571_panel_data { 97 94 int (*init)(struct st7571_device *st7571); 95 + int (*parse_dt)(struct st7571_device *st7571); 98 96 struct st7571_panel_constraints constraints; 99 97 }; 100 98 ··· 554 550 * Encoder 555 551 */ 556 552 557 - static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder, 558 - struct drm_atomic_state *state) 553 + static void st7571_encoder_atomic_enable(struct drm_encoder *encoder, 554 + struct drm_atomic_state *state) 559 555 { 560 556 struct drm_device *drm = encoder->dev; 561 557 struct st7571_device *st7571 = drm_to_st7571(drm); ··· 569 565 st7571_send_command_list(st7571, &command, 1); 570 566 } 571 567 572 - static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder, 573 - struct drm_atomic_state *state) 568 + static void st7571_encoder_atomic_disable(struct drm_encoder *encoder, 569 + struct drm_atomic_state *state) 574 570 { 575 571 struct drm_device *drm = encoder->dev; 576 572 struct st7571_device *st7571 = drm_to_st7571(drm); ··· 585 581 }; 586 582 587 583 static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = { 588 - .atomic_enable = ssd130x_encoder_atomic_enable, 589 - .atomic_disable = ssd130x_encoder_atomic_disable, 584 + .atomic_enable = st7571_encoder_atomic_enable, 585 + .atomic_disable = st7571_encoder_atomic_disable, 590 586 }; 591 587 592 588 /* ··· 777 773 return 0; 778 774 } 779 775 776 + static int st7567_parse_dt(struct st7571_device *st7567) 777 + { 778 + struct device *dev = &st7567->client->dev; 779 + struct device_node *np = dev->of_node; 780 + struct display_timing dt; 781 + int ret; 782 + 783 + ret = of_get_display_timing(np, "panel-timing", &dt); 784 + if (ret) { 785 + dev_err(dev, "Failed to get display timing from DT\n"); 786 + return ret; 787 + } 788 + 789 + of_property_read_u32(np, "width-mm", &st7567->width_mm); 790 + of_property_read_u32(np, "height-mm", &st7567->height_mm); 791 + 792 + st7567->pformat = &st7571_monochrome; 793 + st7567->bpp = 1; 794 + 795 + st7567->startline = dt.vfront_porch.typ; 796 + st7567->nlines = dt.vactive.typ; 797 + st7567->ncols = dt.hactive.typ; 798 + 799 + return 0; 800 + } 801 + 780 802 static int st7571_parse_dt(struct st7571_device *st7571) 781 803 { 782 804 struct device *dev = &st7571->client->dev; ··· 834 804 835 805 st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 836 806 if (IS_ERR(st7571->reset)) 837 - return PTR_ERR(st7571->reset); 807 + return dev_err_probe(dev, PTR_ERR(st7571->reset), 808 + "Failed to get reset gpio\n"); 809 + 838 810 839 811 return 0; 840 812 } ··· 846 814 gpiod_set_value_cansleep(st7571->reset, 1); 847 815 fsleep(20); 848 816 gpiod_set_value_cansleep(st7571->reset, 0); 817 + } 818 + 819 + static int st7567_lcd_init(struct st7571_device *st7567) 820 + { 821 + /* 822 + * Most of the initialization sequence is taken directly from the 823 + * referential initial code in the ST7567 datasheet. 824 + */ 825 + u8 commands[] = { 826 + ST7571_DISPLAY_OFF, 827 + 828 + ST7567_SET_LCD_BIAS(1), 829 + 830 + ST7571_SET_SEG_SCAN_DIR(0), 831 + ST7571_SET_COM_SCAN_DIR(1), 832 + 833 + ST7571_SET_REGULATOR_REG(4), 834 + ST7571_SET_CONTRAST_MSB, 835 + ST7571_SET_CONTRAST_LSB(0x20), 836 + 837 + ST7571_SET_START_LINE_MSB, 838 + ST7571_SET_START_LINE_LSB(st7567->startline), 839 + 840 + ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */ 841 + ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */ 842 + ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */ 843 + 844 + ST7571_SET_REVERSE(0), 845 + ST7571_SET_ENTIRE_DISPLAY_ON(0), 846 + }; 847 + 848 + return st7571_send_command_list(st7567, commands, ARRAY_SIZE(commands)); 849 849 } 850 850 851 851 static int st7571_lcd_init(struct st7571_device *st7571) ··· 943 879 i2c_set_clientdata(client, st7571); 944 880 st7571->pdata = device_get_match_data(&client->dev); 945 881 946 - ret = st7571_parse_dt(st7571); 882 + ret = st7571->pdata->parse_dt(st7571); 947 883 if (ret) 948 884 return ret; 949 885 ··· 1024 960 drm_dev_unplug(&st7571->dev); 1025 961 } 1026 962 963 + struct st7571_panel_data st7567_config = { 964 + .init = st7567_lcd_init, 965 + .parse_dt = st7567_parse_dt, 966 + .constraints = { 967 + .min_nlines = 1, 968 + .max_nlines = 64, 969 + .min_ncols = 128, 970 + .max_ncols = 128, 971 + .support_grayscale = false, 972 + }, 973 + }; 974 + 1027 975 struct st7571_panel_data st7571_config = { 1028 976 .init = st7571_lcd_init, 977 + .parse_dt = st7571_parse_dt, 1029 978 .constraints = { 1030 979 .min_nlines = 1, 1031 980 .max_nlines = 128, ··· 1049 972 }; 1050 973 1051 974 static const struct of_device_id st7571_of_match[] = { 975 + { .compatible = "sitronix,st7567", .data = &st7567_config }, 1052 976 { .compatible = "sitronix,st7571", .data = &st7571_config }, 1053 977 {}, 1054 978 }; 1055 979 MODULE_DEVICE_TABLE(of, st7571_of_match); 1056 980 1057 981 static const struct i2c_device_id st7571_id[] = { 982 + { "st7567", 0 }, 1058 983 { "st7571", 0 }, 1059 984 { } 1060 985 };
+2
drivers/gpu/drm/tegra/drm.h
··· 185 185 int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, 186 186 struct tegra_bo_tiling *tiling); 187 187 struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, 188 + const struct drm_format_info *info, 188 189 const struct drm_mode_fb_cmd2 *mode_cmd, 189 190 struct tegra_bo **planes, 190 191 unsigned int num_planes); 191 192 struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, 192 193 struct drm_file *file, 194 + const struct drm_format_info *info, 193 195 const struct drm_mode_fb_cmd2 *cmd); 194 196 195 197 #ifdef CONFIG_DRM_FBDEV_EMULATION
+4 -3
drivers/gpu/drm/tegra/fb.c
··· 102 102 }; 103 103 104 104 struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, 105 + const struct drm_format_info *info, 105 106 const struct drm_mode_fb_cmd2 *mode_cmd, 106 107 struct tegra_bo **planes, 107 108 unsigned int num_planes) ··· 115 114 if (!fb) 116 115 return ERR_PTR(-ENOMEM); 117 116 118 - drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd); 117 + drm_helper_mode_fill_fb_struct(drm, fb, info, mode_cmd); 119 118 120 119 for (i = 0; i < fb->format->num_planes; i++) 121 120 fb->obj[i] = &planes[i]->gem; ··· 133 132 134 133 struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, 135 134 struct drm_file *file, 135 + const struct drm_format_info *info, 136 136 const struct drm_mode_fb_cmd2 *cmd) 137 137 { 138 - const struct drm_format_info *info = drm_get_format_info(drm, cmd); 139 138 struct tegra_bo *planes[4]; 140 139 struct drm_gem_object *gem; 141 140 struct drm_framebuffer *fb; ··· 167 166 planes[i] = to_tegra_bo(gem); 168 167 } 169 168 170 - fb = tegra_fb_alloc(drm, cmd, planes, i); 169 + fb = tegra_fb_alloc(drm, info, cmd, planes, i); 171 170 if (IS_ERR(fb)) { 172 171 err = PTR_ERR(fb); 173 172 goto unreference;
+3 -1
drivers/gpu/drm/tegra/fbdev.c
··· 106 106 return PTR_ERR(info); 107 107 } 108 108 109 - fb = tegra_fb_alloc(drm, &cmd, &bo, 1); 109 + fb = tegra_fb_alloc(drm, 110 + drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]), 111 + &cmd, &bo, 1); 110 112 if (IS_ERR(fb)) { 111 113 err = PTR_ERR(fb); 112 114 dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
+1
drivers/gpu/drm/tests/drm_framebuffer_test.c
··· 363 363 364 364 static struct drm_framebuffer *fb_create_mock(struct drm_device *dev, 365 365 struct drm_file *file_priv, 366 + const struct drm_format_info *info, 366 367 const struct drm_mode_fb_cmd2 *mode_cmd) 367 368 { 368 369 struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev);
+94 -48
drivers/gpu/drm/tests/drm_kunit_edid.h
··· 46 46 * Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz 47 47 * Dummy Descriptor: 48 48 * Checksum: 0xab 49 + * 50 + * ---------------- 51 + * 52 + * edid-decode 1.30.0-5367 53 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 54 + * 55 + * EDID conformity: PASS 49 56 */ 50 57 static const unsigned char test_edid_dvi_1080p[] = { 51 58 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, ··· 69 62 }; 70 63 71 64 /* 65 + * 66 + * This edid is intentionally broken with the 100MHz limit. It's meant 67 + * to be used only with tests in unusual situations. 68 + * 72 69 * edid-decode (hex): 73 70 * 74 71 * 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00 ··· 84 73 * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 85 74 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92 86 75 * 87 - * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c 88 - * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00 76 + * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c 77 + * 00 12 34 00 14 00 00 00 00 00 00 00 00 00 00 00 89 78 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 90 79 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 91 80 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 92 81 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 93 82 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 94 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4 83 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10 95 84 * 96 85 * ---------------- 97 86 * ··· 146 135 * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: 147 136 * Source physical address: 1.2.3.4 148 137 * Maximum TMDS clock: 100 MHz 149 - * Extended HDMI video details: 150 - * Checksum: 0xe4 Unused space in Extension Block: 100 bytes 138 + * Checksum: 0x10 Unused space in Extension Block: 106 bytes 139 + * 140 + * ---------------- 141 + * 142 + * edid-decode 1.30.0-5367 143 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 144 + * 145 + * Failures: 146 + * 147 + * EDID: 148 + * CTA-861: The maximum HDMI TMDS clock is 100000 kHz, but one or more video timings go up to 148500 kHz. 149 + * 150 + * EDID conformity: FAIL 151 151 */ 152 152 static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = { 153 153 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, ··· 169 147 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e, 170 148 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44, 171 149 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 172 - 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 150 + 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 173 151 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 174 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81, 175 - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c, 176 - 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 152 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81, 153 + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c, 154 + 0x00, 0x12, 0x34, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 177 155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 178 156 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 179 157 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ··· 182 160 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 183 161 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 184 162 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 185 - 0x00, 0x00, 0x00, 0xe4 163 + 0x00, 0x00, 0x00, 0x10 186 164 }; 187 165 188 166 /* ··· 197 175 * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 198 176 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92 199 177 * 200 - * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c 201 - * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00 178 + * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c 179 + * 00 12 34 00 28 00 00 00 00 00 00 00 00 00 00 00 202 180 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 203 181 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 204 182 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 205 183 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 206 184 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 207 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0 185 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc 208 186 * 209 187 * ---------------- 210 188 * ··· 259 237 * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: 260 238 * Source physical address: 1.2.3.4 261 239 * Maximum TMDS clock: 200 MHz 262 - * Extended HDMI video details: 263 - * Checksum: 0xd0 Unused space in Extension Block: 100 bytes 240 + * Checksum: 0xfc Unused space in Extension Block: 106 bytes 241 + * 242 + * ---------------- 243 + * 244 + * edid-decode 1.30.0-5367 245 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 246 + * 247 + * EDID conformity: PASS 264 248 */ 265 249 static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = { 266 250 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, ··· 277 249 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e, 278 250 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44, 279 251 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 280 - 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 252 + 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 281 253 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 282 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81, 283 - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c, 284 - 0x00, 0x12, 0x34, 0x00, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 254 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81, 255 + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c, 256 + 0x00, 0x12, 0x34, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 285 257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 286 258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 287 259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ··· 290 262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 291 263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 292 264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 293 - 0x00, 0x00, 0x00, 0xd0 265 + 0x00, 0x00, 0x00, 0xfc 294 266 }; 295 267 296 268 /* ··· 305 277 * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 306 278 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92 307 279 * 308 - * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c 309 - * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00 280 + * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c 281 + * 00 12 34 00 44 00 00 00 00 00 00 00 00 00 00 00 310 282 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 311 283 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 312 284 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 313 285 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 314 286 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 315 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0 287 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e0 316 288 * 317 289 * ---------------- 318 290 * ··· 367 339 * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: 368 340 * Source physical address: 1.2.3.4 369 341 * Maximum TMDS clock: 340 MHz 370 - * Extended HDMI video details: 371 - * Checksum: 0xd0 Unused space in Extension Block: 100 bytes 342 + * Checksum: 0xe0 Unused space in Extension Block: 106 bytes 343 + * 344 + * ---------------- 345 + * 346 + * edid-decode 1.30.0-5367 347 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 348 + * 349 + * EDID conformity: PASS 372 350 */ 373 351 static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = { 374 352 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, ··· 385 351 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e, 386 352 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44, 387 353 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 388 - 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 354 + 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 389 355 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 390 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81, 391 - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c, 392 - 0x00, 0x12, 0x34, 0x00, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 356 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81, 357 + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c, 358 + 0x00, 0x12, 0x34, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 393 359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 394 360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 395 361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ··· 398 364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 399 365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 400 366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 401 - 0x00, 0x00, 0x00, 0xd0 367 + 0x00, 0x00, 0x00, 0xe0 402 368 }; 403 369 404 370 /* ··· 413 379 * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 414 380 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7a 415 381 * 416 - * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c 417 - * 00 12 34 78 28 20 00 00 00 00 00 00 00 00 00 00 382 + * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c 383 + * 00 12 34 78 28 00 00 00 00 00 00 00 00 00 00 00 418 384 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 419 385 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 420 386 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 421 387 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 422 388 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 423 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 a8 389 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d4 424 390 * 425 391 * ---------------- 426 392 * ··· 481 447 * DC_30bit 482 448 * DC_Y444 483 449 * Maximum TMDS clock: 200 MHz 484 - * Extended HDMI video details: 485 - * Checksum: 0xa8 Unused space in Extension Block: 100 bytes 450 + * Checksum: 0xd4 Unused space in Extension Block: 106 bytes 451 + * 452 + * ---------------- 453 + * 454 + * edid-decode 1.30.0-5367 455 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 456 + * 457 + * EDID conformity: PASS 486 458 */ 487 459 static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = { 488 460 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, ··· 501 461 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 502 462 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 503 463 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 504 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x1b, 0xb1, 505 - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c, 506 - 0x00, 0x12, 0x34, 0x78, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 464 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x15, 0xb1, 465 + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c, 466 + 0x00, 0x12, 0x34, 0x78, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 507 467 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 508 468 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 509 469 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ··· 512 472 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 513 473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 514 474 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 515 - 0x00, 0x00, 0x00, 0xa8 475 + 0x00, 0x00, 0x00, 0xd4 516 476 }; 517 477 518 478 /* ··· 527 487 * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 528 488 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 8a 529 489 * 530 - * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c 531 - * 00 12 34 78 44 20 00 00 00 00 00 00 00 00 00 00 490 + * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c 491 + * 00 12 34 78 44 00 00 00 00 00 00 00 00 00 00 00 532 492 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 533 493 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 534 494 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 535 495 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 536 496 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 537 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 8c 497 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 b8 538 498 * 539 499 * ---------------- 540 500 * ··· 595 555 * DC_30bit 596 556 * DC_Y444 597 557 * Maximum TMDS clock: 340 MHz 598 - * Extended HDMI video details: 599 - * Checksum: 0x8c Unused space in Extension Block: 100 bytes 558 + * Checksum: 0xb8 Unused space in Extension Block: 106 bytes 559 + * 560 + * ---------------- 561 + * 562 + * edid-decode 1.30.0-5367 563 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 564 + * 565 + * EDID conformity: PASS 600 566 */ 601 567 static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = { 602 568 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, ··· 615 569 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 616 570 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 617 571 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 618 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x1b, 0xb1, 619 - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c, 620 - 0x00, 0x12, 0x34, 0x78, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 572 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x15, 0xb1, 573 + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c, 574 + 0x00, 0x12, 0x34, 0x78, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 621 575 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 622 576 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 623 577 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ··· 626 580 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 627 581 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 628 582 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 629 - 0x00, 0x00, 0x00, 0x8c 583 + 0x00, 0x00, 0x00, 0xb8 630 584 }; 631 585 632 586 /*
+5 -55
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
··· 542 542 bo->ttm = old_tt; 543 543 } 544 544 545 - err = ttm_resource_alloc(bo, place, &bo->resource, NULL); 546 - KUNIT_EXPECT_EQ(test, err, 0); 547 - KUNIT_ASSERT_EQ(test, man->usage, size); 548 - 549 545 placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL); 550 546 KUNIT_ASSERT_NOT_NULL(test, placement); 551 547 552 548 ttm_bo_reserve(bo, false, false, NULL); 549 + 550 + err = ttm_resource_alloc(bo, place, &bo->resource, NULL); 551 + KUNIT_EXPECT_EQ(test, err, 0); 552 + KUNIT_ASSERT_EQ(test, man->usage, size); 553 + 553 554 err = ttm_bo_validate(bo, placement, &ctx); 554 555 ttm_bo_unreserve(bo); 555 556 ··· 756 755 ttm_bo_put(bo); 757 756 ttm_mock_manager_fini(priv->ttm_dev, fst_mem); 758 757 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 759 - } 760 - 761 - static void ttm_bo_validate_swapout(struct kunit *test) 762 - { 763 - unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE); 764 - enum ttm_bo_type bo_type = ttm_bo_type_device; 765 - struct ttm_buffer_object *bo_small, *bo_big; 766 - struct ttm_test_devices *priv = test->priv; 767 - struct ttm_operation_ctx ctx = { }; 768 - struct ttm_placement *placement; 769 - u32 mem_type = TTM_PL_TT; 770 - struct ttm_place *place; 771 - struct sysinfo si; 772 - int err; 773 - 774 - si_meminfo(&si); 775 - size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE); 776 - 777 - ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size); 778 - 779 - place = ttm_place_kunit_init(test, mem_type, 0); 780 - placement = ttm_placement_kunit_init(test, place, 1); 781 - 782 - bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL); 783 - KUNIT_ASSERT_NOT_NULL(test, bo_small); 784 - 785 - drm_gem_private_object_init(priv->drm, &bo_small->base, size); 786 - 787 - err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement, 788 - PAGE_SIZE, &ctx, NULL, NULL, 789 - &dummy_ttm_bo_destroy); 790 - KUNIT_EXPECT_EQ(test, err, 0); 791 - dma_resv_unlock(bo_small->base.resv); 792 - 793 - bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL); 794 - 795 - dma_resv_lock(bo_big->base.resv, NULL); 796 - err = ttm_bo_validate(bo_big, placement, &ctx); 797 - dma_resv_unlock(bo_big->base.resv); 798 - 799 - KUNIT_EXPECT_EQ(test, err, 0); 800 - KUNIT_EXPECT_NOT_NULL(test, bo_big->resource); 801 - KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type); 802 - KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM); 803 - KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED); 804 - 805 - ttm_bo_put(bo_big); 806 - ttm_bo_put(bo_small); 807 - 808 - ttm_mock_manager_fini(priv->ttm_dev, mem_type); 809 758 } 810 759 811 760 static void ttm_bo_validate_happy_evict(struct kunit *test) ··· 1152 1201 KUNIT_CASE(ttm_bo_validate_move_fence_signaled), 1153 1202 KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled, 1154 1203 ttm_bo_validate_wait_gen_params), 1155 - KUNIT_CASE(ttm_bo_validate_swapout), 1156 1204 KUNIT_CASE(ttm_bo_validate_happy_evict), 1157 1205 KUNIT_CASE(ttm_bo_validate_all_pinned_evict), 1158 1206 KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
+22
drivers/gpu/drm/ttm/ttm_device.c
··· 125 125 return ret; 126 126 } 127 127 128 + /** 129 + * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation. 130 + * 131 + * @bdev: A pointer to a struct ttm_device to prepare hibernation for. 132 + * 133 + * Return: 0 on success, negative number on failure. 134 + */ 135 + int ttm_device_prepare_hibernation(struct ttm_device *bdev) 136 + { 137 + struct ttm_operation_ctx ctx = { 138 + .interruptible = false, 139 + .no_wait_gpu = false, 140 + }; 141 + int ret; 142 + 143 + do { 144 + ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL); 145 + } while (ret > 0); 146 + return ret; 147 + } 148 + EXPORT_SYMBOL(ttm_device_prepare_hibernation); 149 + 128 150 /* 129 151 * A buffer object shrink method that tries to swap out the first 130 152 * buffer object on the global::swap_lru list.
+3 -15
drivers/gpu/drm/v3d/v3d_sched.c
··· 741 741 742 742 mutex_unlock(&v3d->reset_lock); 743 743 744 - return DRM_GPU_SCHED_STAT_NOMINAL; 745 - } 746 - 747 - static void 748 - v3d_sched_skip_reset(struct drm_sched_job *sched_job) 749 - { 750 - struct drm_gpu_scheduler *sched = sched_job->sched; 751 - 752 - spin_lock(&sched->job_list_lock); 753 - list_add(&sched_job->list, &sched->pending_list); 754 - spin_unlock(&sched->job_list_lock); 744 + return DRM_GPU_SCHED_STAT_RESET; 755 745 } 756 746 757 747 static enum drm_gpu_sched_stat ··· 762 772 *timedout_ctca = ctca; 763 773 *timedout_ctra = ctra; 764 774 765 - v3d_sched_skip_reset(sched_job); 766 - return DRM_GPU_SCHED_STAT_NOMINAL; 775 + return DRM_GPU_SCHED_STAT_NO_HANG; 767 776 } 768 777 769 778 return v3d_gpu_reset_for_timeout(v3d, sched_job); ··· 807 818 if (job->timedout_batches != batches) { 808 819 job->timedout_batches = batches; 809 820 810 - v3d_sched_skip_reset(sched_job); 811 - return DRM_GPU_SCHED_STAT_NOMINAL; 821 + return DRM_GPU_SCHED_STAT_NO_HANG; 812 822 } 813 823 814 824 return v3d_gpu_reset_for_timeout(v3d, sched_job);
+2 -1
drivers/gpu/drm/vc4/vc4_kms.c
··· 530 530 531 531 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 532 532 struct drm_file *file_priv, 533 + const struct drm_format_info *info, 533 534 const struct drm_mode_fb_cmd2 *mode_cmd) 534 535 { 535 536 struct vc4_dev *vc4 = to_vc4_dev(dev); ··· 569 568 mode_cmd = &mode_cmd_local; 570 569 } 571 570 572 - return drm_gem_fb_create(dev, file_priv, mode_cmd); 571 + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); 573 572 } 574 573 575 574 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
+4 -2
drivers/gpu/drm/virtio/virtgpu_display.c
··· 66 66 static int 67 67 virtio_gpu_framebuffer_init(struct drm_device *dev, 68 68 struct virtio_gpu_framebuffer *vgfb, 69 + const struct drm_format_info *info, 69 70 const struct drm_mode_fb_cmd2 *mode_cmd, 70 71 struct drm_gem_object *obj) 71 72 { ··· 74 73 75 74 vgfb->base.obj[0] = obj; 76 75 77 - drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); 76 + drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd); 78 77 79 78 ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); 80 79 if (ret) { ··· 294 293 static struct drm_framebuffer * 295 294 virtio_gpu_user_framebuffer_create(struct drm_device *dev, 296 295 struct drm_file *file_priv, 296 + const struct drm_format_info *info, 297 297 const struct drm_mode_fb_cmd2 *mode_cmd) 298 298 { 299 299 struct drm_gem_object *obj = NULL; ··· 316 314 return ERR_PTR(-ENOMEM); 317 315 } 318 316 319 - ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); 317 + ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj); 320 318 if (ret) { 321 319 kfree(virtio_gpu_fb); 322 320 drm_gem_object_put(obj);
+10 -5
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 500 500 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 501 501 struct vmw_user_object *uo, 502 502 struct vmw_framebuffer **out, 503 + const struct drm_format_info *info, 503 504 const struct drm_mode_fb_cmd2 504 505 *mode_cmd) 505 506 ··· 549 548 goto out_err1; 550 549 } 551 550 552 - drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 551 + drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd); 553 552 memcpy(&vfbs->uo, uo, sizeof(vfbs->uo)); 554 553 vmw_user_object_ref(&vfbs->uo); 555 554 ··· 603 602 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, 604 603 struct vmw_bo *bo, 605 604 struct vmw_framebuffer **out, 605 + const struct drm_format_info *info, 606 606 const struct drm_mode_fb_cmd2 607 607 *mode_cmd) 608 608 ··· 636 634 } 637 635 638 636 vfbd->base.base.obj[0] = &bo->tbo.base; 639 - drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 637 + drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd); 640 638 vfbd->base.bo = true; 641 639 vfbd->buffer = vmw_bo_reference(bo); 642 640 *out = &vfbd->base; ··· 681 679 * @dev_priv: Pointer to device private struct. 682 680 * @uo: Pointer to user object to wrap the kms framebuffer around. 683 681 * Either the buffer or surface inside the user object must be NULL. 682 + * @info: pixel format information. 684 683 * @mode_cmd: Frame-buffer metadata. 685 684 */ 686 685 struct vmw_framebuffer * 687 686 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 688 687 struct vmw_user_object *uo, 688 + const struct drm_format_info *info, 689 689 const struct drm_mode_fb_cmd2 *mode_cmd) 690 690 { 691 691 struct vmw_framebuffer *vfb = NULL; ··· 696 692 /* Create the new framebuffer depending one what we have */ 697 693 if (vmw_user_object_surface(uo)) { 698 694 ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb, 699 - mode_cmd); 695 + info, mode_cmd); 700 696 } else if (uo->buffer) { 701 697 ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb, 702 - mode_cmd); 698 + info, mode_cmd); 703 699 } else { 704 700 BUG(); 705 701 } ··· 716 712 717 713 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 718 714 struct drm_file *file_priv, 715 + const struct drm_format_info *info, 719 716 const struct drm_mode_fb_cmd2 *mode_cmd) 720 717 { 721 718 struct vmw_private *dev_priv = vmw_priv(dev); ··· 746 741 } 747 742 748 743 749 - vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd); 744 + vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd); 750 745 if (IS_ERR(vfb)) { 751 746 ret = PTR_ERR(vfb); 752 747 goto err_out;
+1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
··· 399 399 struct vmw_framebuffer * 400 400 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 401 401 struct vmw_user_object *uo, 402 + const struct drm_format_info *info, 402 403 const struct drm_mode_fb_cmd2 *mode_cmd); 403 404 void vmw_guess_mode_timing(struct drm_display_mode *mode); 404 405 void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+5 -1
drivers/gpu/drm/xe/display/intel_fbdev_fb.c
··· 66 66 goto err; 67 67 } 68 68 69 - fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd); 69 + fb = intel_framebuffer_create(&obj->ttm.base, 70 + drm_get_format_info(dev, 71 + mode_cmd.pixel_format, 72 + mode_cmd.modifier[0]), 73 + &mode_cmd); 70 74 if (IS_ERR(fb)) { 71 75 xe_bo_unpin_map_no_vm(obj); 72 76 goto err;
+1 -1
drivers/gpu/drm/xe/display/xe_plane_initial.c
··· 184 184 return false; 185 185 186 186 if (intel_framebuffer_init(to_intel_framebuffer(fb), 187 - &bo->ttm.base, &mode_cmd)) { 187 + &bo->ttm.base, fb->format, &mode_cmd)) { 188 188 drm_dbg_kms(&xe->drm, "intel fb init failed\n"); 189 189 goto err_bo; 190 190 }
+4 -10
drivers/gpu/drm/xe/xe_guc_submit.c
··· 1093 1093 * list so job can be freed and kick scheduler ensuring free job is not 1094 1094 * lost. 1095 1095 */ 1096 - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { 1097 - xe_sched_add_pending_job(sched, job); 1098 - xe_sched_submission_start(sched); 1099 - 1100 - return DRM_GPU_SCHED_STAT_NOMINAL; 1101 - } 1096 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) 1097 + return DRM_GPU_SCHED_STAT_NO_HANG; 1102 1098 1103 1099 /* Kill the run_job entry point */ 1104 1100 xe_sched_submission_stop(sched); ··· 1263 1267 /* Start fence signaling */ 1264 1268 xe_hw_fence_irq_start(q->fence_irq); 1265 1269 1266 - return DRM_GPU_SCHED_STAT_NOMINAL; 1270 + return DRM_GPU_SCHED_STAT_RESET; 1267 1271 1268 1272 sched_enable: 1269 1273 enable_scheduling(q); ··· 1273 1277 * but there is not currently an easy way to do in DRM scheduler. With 1274 1278 * some thought, do this in a follow up. 1275 1279 */ 1276 - xe_sched_add_pending_job(sched, job); 1277 1280 xe_sched_submission_start(sched); 1278 - 1279 - return DRM_GPU_SCHED_STAT_NOMINAL; 1281 + return DRM_GPU_SCHED_STAT_NO_HANG; 1280 1282 } 1281 1283 1282 1284 static void __guc_exec_queue_fini_async(struct work_struct *w)
+2 -1
drivers/gpu/drm/xen/xen_drm_front_kms.c
··· 54 54 55 55 static struct drm_framebuffer * 56 56 fb_create(struct drm_device *dev, struct drm_file *filp, 57 + const struct drm_format_info *info, 57 58 const struct drm_mode_fb_cmd2 *mode_cmd) 58 59 { 59 60 struct xen_drm_front_drm_info *drm_info = dev->dev_private; ··· 62 61 struct drm_gem_object *gem_obj; 63 62 int ret; 64 63 65 - fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); 64 + fb = drm_gem_fb_create_with_funcs(dev, filp, info, mode_cmd, &fb_funcs); 66 65 if (IS_ERR(fb)) 67 66 return fb; 68 67
+2 -1
drivers/gpu/drm/xlnx/zynqmp_dp.c
··· 1720 1720 return connector_status_disconnected; 1721 1721 } 1722 1722 1723 - static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) 1723 + static enum drm_connector_status 1724 + zynqmp_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) 1724 1725 { 1725 1726 struct zynqmp_dp *dp = bridge_to_dp(bridge); 1726 1727
+2 -1
drivers/gpu/drm/xlnx/zynqmp_kms.c
··· 373 373 374 374 static struct drm_framebuffer * 375 375 zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv, 376 + const struct drm_format_info *info, 376 377 const struct drm_mode_fb_cmd2 *mode_cmd) 377 378 { 378 379 struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm); ··· 384 383 for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i) 385 384 cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align); 386 385 387 - return drm_gem_fb_create(drm, file_priv, &cmd); 386 + return drm_gem_fb_create(drm, file_priv, info, &cmd); 388 387 } 389 388 390 389 static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
+22 -20
include/drm/drm_bridge.h
··· 660 660 * 661 661 * drm_connector_status indicating the bridge output status. 662 662 */ 663 - enum drm_connector_status (*detect)(struct drm_bridge *bridge); 663 + enum drm_connector_status (*detect)(struct drm_bridge *bridge, 664 + struct drm_connector *connector); 664 665 665 666 /** 666 667 * @get_modes: ··· 818 817 * Returns: 819 818 * 0 on success, a negative error code otherwise 820 819 */ 821 - int (*hdmi_audio_startup)(struct drm_connector *connector, 822 - struct drm_bridge *bridge); 820 + int (*hdmi_audio_startup)(struct drm_bridge *bridge, 821 + struct drm_connector *connector); 823 822 824 823 /** 825 824 * @hdmi_audio_prepare: ··· 832 831 * Returns: 833 832 * 0 on success, a negative error code otherwise 834 833 */ 835 - int (*hdmi_audio_prepare)(struct drm_connector *connector, 836 - struct drm_bridge *bridge, 834 + int (*hdmi_audio_prepare)(struct drm_bridge *bridge, 835 + struct drm_connector *connector, 837 836 struct hdmi_codec_daifmt *fmt, 838 837 struct hdmi_codec_params *hparms); 839 838 ··· 848 847 * Returns: 849 848 * 0 on success, a negative error code otherwise 850 849 */ 851 - void (*hdmi_audio_shutdown)(struct drm_connector *connector, 852 - struct drm_bridge *bridge); 850 + void (*hdmi_audio_shutdown)(struct drm_bridge *bridge, 851 + struct drm_connector *connector); 853 852 854 853 /** 855 854 * @hdmi_audio_mute_stream: ··· 862 861 * Returns: 863 862 * 0 on success, a negative error code otherwise 864 863 */ 865 - int (*hdmi_audio_mute_stream)(struct drm_connector *connector, 866 - struct drm_bridge *bridge, 864 + int (*hdmi_audio_mute_stream)(struct drm_bridge *bridge, 865 + struct drm_connector *connector, 867 866 bool enable, int direction); 868 867 869 - int (*hdmi_cec_init)(struct drm_connector *connector, 870 - struct drm_bridge *bridge); 868 + int (*hdmi_cec_init)(struct drm_bridge *bridge, 869 + struct drm_connector *connector); 871 870 872 871 int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable); 873 872 ··· 887 886 * Returns: 888 887 * 0 on success, a negative error code otherwise 889 888 */ 890 - int (*dp_audio_startup)(struct drm_connector *connector, 891 - struct drm_bridge *bridge); 889 + int (*dp_audio_startup)(struct drm_bridge *bridge, 890 + struct drm_connector *connector); 892 891 893 892 /** 894 893 * @dp_audio_prepare: ··· 901 900 * Returns: 902 901 * 0 on success, a negative error code otherwise 903 902 */ 904 - int (*dp_audio_prepare)(struct drm_connector *connector, 905 - struct drm_bridge *bridge, 903 + int (*dp_audio_prepare)(struct drm_bridge *bridge, 904 + struct drm_connector *connector, 906 905 struct hdmi_codec_daifmt *fmt, 907 906 struct hdmi_codec_params *hparms); 908 907 ··· 917 916 * Returns: 918 917 * 0 on success, a negative error code otherwise 919 918 */ 920 - void (*dp_audio_shutdown)(struct drm_connector *connector, 921 - struct drm_bridge *bridge); 919 + void (*dp_audio_shutdown)(struct drm_bridge *bridge, 920 + struct drm_connector *connector); 922 921 923 922 /** 924 923 * @dp_audio_mute_stream: ··· 931 930 * Returns: 932 931 * 0 on success, a negative error code otherwise 933 932 */ 934 - int (*dp_audio_mute_stream)(struct drm_connector *connector, 935 - struct drm_bridge *bridge, 933 + int (*dp_audio_mute_stream)(struct drm_bridge *bridge, 934 + struct drm_connector *connector, 936 935 bool enable, int direction); 937 936 938 937 /** ··· 1383 1382 u32 output_fmt, 1384 1383 unsigned int *num_input_fmts); 1385 1384 1386 - enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); 1385 + enum drm_connector_status 1386 + drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector); 1387 1387 int drm_bridge_get_modes(struct drm_bridge *bridge, 1388 1388 struct drm_connector *connector); 1389 1389 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+2
include/drm/drm_device.h
··· 35 35 * struct drm_wedge_task_info - information about the guilty task of a wedge dev 36 36 */ 37 37 struct drm_wedge_task_info { 38 + /** @pid: pid of the task */ 38 39 pid_t pid; 40 + /** @comm: command name of the task */ 39 41 char comm[TASK_COMM_LEN]; 40 42 }; 41 43
+1 -2
include/drm/drm_fourcc.h
··· 54 54 #endif 55 55 56 56 struct drm_device; 57 - struct drm_mode_fb_cmd2; 58 57 59 58 /** 60 59 * struct drm_format_info - information about a DRM format ··· 308 309 const struct drm_format_info *drm_format_info(u32 format); 309 310 const struct drm_format_info * 310 311 drm_get_format_info(struct drm_device *dev, 311 - const struct drm_mode_fb_cmd2 *mode_cmd); 312 + u32 pixel_format, u64 modifier); 312 313 uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); 313 314 uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, 314 315 uint32_t bpp, uint32_t depth);
+6
include/drm/drm_gem_framebuffer_helper.h
··· 8 8 struct drm_device; 9 9 struct drm_fb_helper_surface_size; 10 10 struct drm_file; 11 + struct drm_format_info; 11 12 struct drm_framebuffer; 12 13 struct drm_framebuffer_funcs; 13 14 struct drm_gem_object; ··· 25 24 int drm_gem_fb_init_with_funcs(struct drm_device *dev, 26 25 struct drm_framebuffer *fb, 27 26 struct drm_file *file, 27 + const struct drm_format_info *info, 28 28 const struct drm_mode_fb_cmd2 *mode_cmd, 29 29 const struct drm_framebuffer_funcs *funcs); 30 30 struct drm_framebuffer * 31 31 drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, 32 + const struct drm_format_info *info, 32 33 const struct drm_mode_fb_cmd2 *mode_cmd, 33 34 const struct drm_framebuffer_funcs *funcs); 34 35 struct drm_framebuffer * 35 36 drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, 37 + const struct drm_format_info *info, 36 38 const struct drm_mode_fb_cmd2 *mode_cmd); 37 39 struct drm_framebuffer * 38 40 drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, 41 + const struct drm_format_info *info, 39 42 const struct drm_mode_fb_cmd2 *mode_cmd); 40 43 41 44 int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, ··· 52 47 (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0)) 53 48 54 49 int drm_gem_fb_afbc_init(struct drm_device *dev, 50 + const struct drm_format_info *info, 55 51 const struct drm_mode_fb_cmd2 *mode_cmd, 56 52 struct drm_afbc_framebuffer *afbc_fb); 57 53
+2 -1
include/drm/drm_mode_config.h
··· 82 82 */ 83 83 struct drm_framebuffer *(*fb_create)(struct drm_device *dev, 84 84 struct drm_file *file_priv, 85 + const struct drm_format_info *info, 85 86 const struct drm_mode_fb_cmd2 *mode_cmd); 86 87 87 88 /** ··· 96 95 * The format information specific to the given fb metadata, or 97 96 * NULL if none is found. 98 97 */ 99 - const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd); 98 + const struct drm_format_info *(*get_format_info)(u32 pixel_format, u64 modifier); 100 99 101 100 /** 102 101 * @mode_valid:
+2
include/drm/drm_modeset_helper.h
··· 26 26 struct drm_crtc; 27 27 struct drm_crtc_funcs; 28 28 struct drm_device; 29 + struct drm_format_info; 29 30 struct drm_framebuffer; 30 31 struct drm_mode_fb_cmd2; 31 32 ··· 34 33 35 34 void drm_helper_mode_fill_fb_struct(struct drm_device *dev, 36 35 struct drm_framebuffer *fb, 36 + const struct drm_format_info *info, 37 37 const struct drm_mode_fb_cmd2 *mode_cmd); 38 38 39 39 int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+23 -2
include/drm/gpu_scheduler.h
··· 391 391 * enum drm_gpu_sched_stat - the scheduler's status 392 392 * 393 393 * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use. 394 - * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded. 394 + * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset. 395 395 * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore. 396 + * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU 397 + * did not hang and is still running. 396 398 */ 397 399 enum drm_gpu_sched_stat { 398 400 DRM_GPU_SCHED_STAT_NONE, 399 - DRM_GPU_SCHED_STAT_NOMINAL, 401 + DRM_GPU_SCHED_STAT_RESET, 400 402 DRM_GPU_SCHED_STAT_ENODEV, 403 + DRM_GPU_SCHED_STAT_NO_HANG, 401 404 }; 402 405 403 406 /** ··· 515 512 * and it's time to clean it up. 516 513 */ 517 514 void (*free_job)(struct drm_sched_job *sched_job); 515 + 516 + /** 517 + * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences 518 + * get signaled in drm_sched_fini(). 519 + * 520 + * Used by the scheduler to cancel all jobs that have not been executed 521 + * with &struct drm_sched_backend_ops.run_job by the time 522 + * drm_sched_fini() gets invoked. 523 + * 524 + * Drivers need to signal the passed job's hardware fence with an 525 + * appropriate error code (e.g., -ECANCELED) in this callback. They 526 + * must not free the job. 527 + * 528 + * The scheduler will only call this callback once it stopped calling 529 + * all other callbacks forever, with the exception of &struct 530 + * drm_sched_backend_ops.free_job. 531 + */ 532 + void (*cancel_job)(struct drm_sched_job *sched_job); 518 533 }; 519 534 520 535 /**
+1
include/drm/ttm/ttm_device.h
··· 272 272 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags); 273 273 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, 274 274 gfp_t gfp_flags); 275 + int ttm_device_prepare_hibernation(struct ttm_device *bdev); 275 276 276 277 static inline struct ttm_resource_manager * 277 278 ttm_manager_type(struct ttm_device *bdev, int mem_type)
+2
include/linux/suspend.h
··· 476 476 extern void unlock_system_sleep(unsigned int); 477 477 478 478 extern bool pm_sleep_transition_in_progress(void); 479 + bool pm_hibernate_is_recovering(void); 479 480 480 481 #else /* !CONFIG_PM_SLEEP */ 481 482 ··· 507 506 static inline void unlock_system_sleep(unsigned int flags) {} 508 507 509 508 static inline bool pm_sleep_transition_in_progress(void) { return false; } 509 + static inline bool pm_hibernate_is_recovering(void) { return false; } 510 510 511 511 #endif /* !CONFIG_PM_SLEEP */ 512 512
+26
kernel/power/hibernate.c
··· 381 381 return error; 382 382 } 383 383 384 + static void shrink_shmem_memory(void) 385 + { 386 + struct sysinfo info; 387 + unsigned long nr_shmem_pages, nr_freed_pages; 388 + 389 + si_meminfo(&info); 390 + nr_shmem_pages = info.sharedram; /* current page count used for shmem */ 391 + /* 392 + * The intent is to reclaim all shmem pages. Though shrink_all_memory() can 393 + * only reclaim about half of them, it's enough for creating the hibernation 394 + * image. 395 + */ 396 + nr_freed_pages = shrink_all_memory(nr_shmem_pages); 397 + pr_debug("requested to reclaim %lu shmem pages, actually freed %lu pages\n", 398 + nr_shmem_pages, nr_freed_pages); 399 + } 400 + 384 401 /** 385 402 * hibernation_snapshot - Quiesce devices and create a hibernation image. 386 403 * @platform_mode: If set, use platform driver to prepare for the transition. ··· 438 421 dpm_complete(PMSG_RECOVER); 439 422 goto Thaw; 440 423 } 424 + 425 + /* 426 + * Device drivers may move lots of data to shmem in dpm_prepare(). The shmem 427 + * pages will use lots of system memory, causing hibernation image creation 428 + * fail due to insufficient free memory. 429 + * This call is to force flush the shmem pages to swap disk and reclaim 430 + * the system memory so that image creation can succeed. 431 + */ 432 + shrink_shmem_memory(); 441 433 442 434 console_suspend_all(); 443 435 pm_restrict_gfp_mask();
+1 -1
rust/kernel/drm/device.rs
··· 154 154 /// Additionally, callers must ensure that the `struct device`, `ptr` is pointing to, is 155 155 /// embedded in `Self`. 156 156 #[doc(hidden)] 157 - pub unsafe fn as_ref<'a>(ptr: *const bindings::drm_device) -> &'a Self { 157 + pub unsafe fn from_raw<'a>(ptr: *const bindings::drm_device) -> &'a Self { 158 158 // SAFETY: By the safety requirements of this function `ptr` is a valid pointer to a 159 159 // `struct drm_device` embedded in `Self`. 160 160 let ptr = unsafe { Self::from_drm_device(ptr) };
+4 -4
rust/kernel/drm/file.rs
··· 32 32 /// # Safety 33 33 /// 34 34 /// `raw_file` must be a valid pointer to an open `struct drm_file`, opened through `T::open`. 35 - pub unsafe fn as_ref<'a>(ptr: *mut bindings::drm_file) -> &'a File<T> { 35 + pub unsafe fn from_raw<'a>(ptr: *mut bindings::drm_file) -> &'a File<T> { 36 36 // SAFETY: `raw_file` is valid by the safety requirements of this function. 37 37 unsafe { &*ptr.cast() } 38 38 } ··· 61 61 // SAFETY: A callback from `struct drm_driver::open` guarantees that 62 62 // - `raw_dev` is valid pointer to a `struct drm_device`, 63 63 // - the corresponding `struct drm_device` has been registered. 64 - let drm = unsafe { drm::Device::as_ref(raw_dev) }; 64 + let drm = unsafe { drm::Device::from_raw(raw_dev) }; 65 65 66 66 // SAFETY: `raw_file` is a valid pointer to a `struct drm_file`. 67 - let file = unsafe { File::<T>::as_ref(raw_file) }; 67 + let file = unsafe { File::<T>::from_raw(raw_file) }; 68 68 69 69 let inner = match T::open(drm) { 70 70 Err(e) => { ··· 89 89 raw_file: *mut bindings::drm_file, 90 90 ) { 91 91 // SAFETY: This reference won't escape this function 92 - let file = unsafe { File::<T>::as_ref(raw_file) }; 92 + let file = unsafe { File::<T>::from_raw(raw_file) }; 93 93 94 94 // SAFETY: `file.driver_priv` has been created in `open_callback` through `KBox::into_raw`. 95 95 let _ = unsafe { KBox::from_raw(file.driver_priv()) };
+8 -8
rust/kernel/drm/gem/mod.rs
··· 51 51 /// - `self_ptr` must be a valid pointer to `Self`. 52 52 /// - The caller promises that holding the immutable reference returned by this function does 53 53 /// not violate rust's data aliasing rules and remains valid throughout the lifetime of `'a`. 54 - unsafe fn as_ref<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self; 54 + unsafe fn from_raw<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self; 55 55 } 56 56 57 57 // SAFETY: All gem objects are refcounted. ··· 86 86 ) -> core::ffi::c_int { 87 87 // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`. 88 88 let file = unsafe { 89 - drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::as_ref(raw_file) 89 + drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::from_raw(raw_file) 90 90 }; 91 91 // SAFETY: `open_callback` is specified in the AllocOps structure for `Object<T>`, ensuring that 92 92 // `raw_obj` is indeed contained within a `Object<T>`. 93 93 let obj = unsafe { 94 - <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::as_ref(raw_obj) 94 + <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) 95 95 }; 96 96 97 97 match T::open(obj, file) { ··· 106 106 ) { 107 107 // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`. 108 108 let file = unsafe { 109 - drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::as_ref(raw_file) 109 + drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::from_raw(raw_file) 110 110 }; 111 111 // SAFETY: `close_callback` is specified in the AllocOps structure for `Object<T>`, ensuring 112 112 // that `raw_obj` is indeed contained within a `Object<T>`. 113 113 let obj = unsafe { 114 - <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::as_ref(raw_obj) 114 + <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) 115 115 }; 116 116 117 117 T::close(obj, file); ··· 124 124 self.obj.get() 125 125 } 126 126 127 - unsafe fn as_ref<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self { 127 + unsafe fn from_raw<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self { 128 128 let self_ptr: *mut Opaque<bindings::drm_gem_object> = self_ptr.cast(); 129 129 130 130 // SAFETY: `obj` is guaranteed to be in an `Object<T>` via the safety contract of this ··· 170 170 // - A `drm::Driver` can only have a single `File` implementation. 171 171 // - `file` uses the same `drm::Driver` as `Self`. 172 172 // - Therefore, we're guaranteed that `ptr` must be a gem object embedded within `Self`. 173 - // - And we check if the pointer is null befoe calling as_ref(), ensuring that `ptr` is a 173 + // - And we check if the pointer is null befoe calling from_raw(), ensuring that `ptr` is a 174 174 // valid pointer to an initialized `Self`. 175 - let obj = unsafe { Self::as_ref(ptr) }; 175 + let obj = unsafe { Self::from_raw(ptr) }; 176 176 177 177 // SAFETY: 178 178 // - We take ownership of the reference of `drm_gem_object_lookup()`.
+2 -2
rust/kernel/drm/ioctl.rs
··· 134 134 // FIXME: Currently there is nothing enforcing that the types of the 135 135 // dev/file match the current driver these ioctls are being declared 136 136 // for, and it's not clear how to enforce this within the type system. 137 - let dev = $crate::drm::device::Device::as_ref(raw_dev); 137 + let dev = $crate::drm::device::Device::from_raw(raw_dev); 138 138 // SAFETY: The ioctl argument has size `_IOC_SIZE(cmd)`, which we 139 139 // asserted above matches the size of this type, and all bit patterns of 140 140 // UAPI structs must be valid. ··· 142 142 &*(raw_data as *const $crate::types::Opaque<$crate::uapi::$struct>) 143 143 }; 144 144 // SAFETY: This is just the DRM file structure 145 - let file = unsafe { $crate::drm::File::as_ref(raw_file) }; 145 + let file = unsafe { $crate::drm::File::from_raw(raw_file) }; 146 146 147 147 match $func(dev, data, file) { 148 148 Err(e) => e.to_errno(),