Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: create amdgpu_vkms (v4)

Modify the VKMS driver into an api that dce_virtual can use to create
virtual displays that obey drm's atomic modesetting api.

v2: Made local functions static.

v3: Switched vkms_output kzalloc for kcalloc.
Cleanup patches by moving display mode fixes to this patch.

v4: Update atomic_check and atomic_update to comply with new kms api.

Signed-off-by: Ryan Taylor <Ryan.Taylor@amd.com>
Reported-by: kernel test robot <lkp@intel.com>
Suggested-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Ryan Taylor and committed by
Alex Deucher
84ec374b d7b5dae0

+493 -11
+1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 120 120 amdgpu-y += \ 121 121 dce_v10_0.o \ 122 122 dce_v11_0.o \ 123 + amdgpu_vkms.o \ 123 124 dce_virtual.o 124 125 125 126 # add GFX block
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 916 916 917 917 /* display */ 918 918 bool enable_virtual_display; 919 + struct amdgpu_vkms_output *amdgpu_vkms_output; 919 920 struct amdgpu_mode_info mode_info; 920 921 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 921 922 struct work_struct hotplug_work;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1231 1231 int ret, retry = 0; 1232 1232 bool supports_atomic = false; 1233 1233 1234 - if (!amdgpu_virtual_display && 1234 + if (amdgpu_virtual_display || 1235 1235 amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) 1236 1236 supports_atomic = true; 1237 1237
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
··· 344 344 } 345 345 346 346 /* disable all the possible outputs/crtcs before entering KMS mode */ 347 - if (!amdgpu_device_has_dc_support(adev)) 347 + if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display) 348 348 drm_helper_disable_unused_functions(adev_to_drm(adev)); 349 349 350 350 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+446
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <drm/drm_atomic_helper.h> 4 + #include <drm/drm_simple_kms_helper.h> 5 + #include <drm/drm_vblank.h> 6 + 7 + #include "amdgpu.h" 8 + #include "amdgpu_vkms.h" 9 + #include "amdgpu_display.h" 10 + 11 + /** 12 + * DOC: amdgpu_vkms 13 + * 14 + * The amdgpu vkms interface provides a virtual KMS interface for several use 15 + * cases: devices without display hardware, platforms where the actual display 16 + * hardware is not useful (e.g., servers), SR-IOV virtual functions, device 17 + * emulation/simulation, and device bring up prior to display hardware being 18 + * usable. We previously emulated a legacy KMS interface, but there was a desire 19 + * to move to the atomic KMS interface. The vkms driver did everything we 20 + * needed, but we wanted KMS support natively in the driver without buffer 21 + * sharing and the ability to support an instance of VKMS per device. We first 22 + * looked at splitting vkms into a stub driver and a helper module that other 23 + * drivers could use to implement a virtual display, but this strategy ended up 24 + * being messy due to driver specific callbacks needed for buffer management. 25 + * Ultimately, it proved easier to import the vkms code as it mostly used core 26 + * drm helpers anyway. 27 + */ 28 + 29 + static const u32 amdgpu_vkms_formats[] = { 30 + DRM_FORMAT_XRGB8888, 31 + }; 32 + 33 + static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) 34 + { 35 + struct amdgpu_vkms_output *output = container_of(timer, 36 + struct amdgpu_vkms_output, 37 + vblank_hrtimer); 38 + struct drm_crtc *crtc = &output->crtc; 39 + u64 ret_overrun; 40 + bool ret; 41 + 42 + ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, 43 + output->period_ns); 44 + WARN_ON(ret_overrun != 1); 45 + 46 + ret = drm_crtc_handle_vblank(crtc); 47 + if (!ret) 48 + DRM_ERROR("amdgpu_vkms failure on handling vblank"); 49 + 50 + return HRTIMER_RESTART; 51 + } 52 + 53 + static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) 54 + { 55 + struct drm_device *dev = crtc->dev; 56 + unsigned int pipe = drm_crtc_index(crtc); 57 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 58 + struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); 59 + 60 + drm_calc_timestamping_constants(crtc, &crtc->mode); 61 + 62 + hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 63 + out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; 64 + out->period_ns = ktime_set(0, vblank->framedur_ns); 65 + hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); 66 + 67 + return 0; 68 + } 69 + 70 + static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) 71 + { 72 + struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); 73 + 74 + hrtimer_cancel(&out->vblank_hrtimer); 75 + } 76 + 77 + static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, 78 + int *max_error, 79 + ktime_t *vblank_time, 80 + bool in_vblank_irq) 81 + { 82 + struct drm_device *dev = crtc->dev; 83 + unsigned int pipe = crtc->index; 84 + struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); 85 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 86 + 87 + if (!READ_ONCE(vblank->enabled)) { 88 + *vblank_time = ktime_get(); 89 + return true; 90 + } 91 + 92 + *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); 93 + 94 + if (WARN_ON(*vblank_time == vblank->time)) 95 + return true; 96 + 97 + /* 98 + * To prevent races we roll the hrtimer forward before we do any 99 + * interrupt processing - this is how real hw works (the interrupt is 100 + * only generated after all the vblank registers are updated) and what 101 + * the vblank core expects. Therefore we need to always correct the 102 + * timestampe by one frame. 103 + */ 104 + *vblank_time -= output->period_ns; 105 + 106 + return true; 107 + } 108 + 109 + static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { 110 + .set_config = drm_atomic_helper_set_config, 111 + .destroy = drm_crtc_cleanup, 112 + .page_flip = drm_atomic_helper_page_flip, 113 + .reset = drm_atomic_helper_crtc_reset, 114 + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 115 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 116 + .enable_vblank = amdgpu_vkms_enable_vblank, 117 + .disable_vblank = amdgpu_vkms_disable_vblank, 118 + .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, 119 + }; 120 + 121 + static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, 122 + struct drm_atomic_state *state) 123 + { 124 + drm_crtc_vblank_on(crtc); 125 + } 126 + 127 + static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, 128 + struct drm_atomic_state *state) 129 + { 130 + drm_crtc_vblank_off(crtc); 131 + } 132 + 133 + static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, 134 + struct drm_atomic_state *state) 135 + { 136 + if (crtc->state->event) { 137 + spin_lock(&crtc->dev->event_lock); 138 + 139 + if (drm_crtc_vblank_get(crtc) != 0) 140 + drm_crtc_send_vblank_event(crtc, crtc->state->event); 141 + else 142 + drm_crtc_arm_vblank_event(crtc, crtc->state->event); 143 + 144 + spin_unlock(&crtc->dev->event_lock); 145 + 146 + crtc->state->event = NULL; 147 + } 148 + } 149 + 150 + static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { 151 + .atomic_flush = amdgpu_vkms_crtc_atomic_flush, 152 + .atomic_enable = amdgpu_vkms_crtc_atomic_enable, 153 + .atomic_disable = amdgpu_vkms_crtc_atomic_disable, 154 + }; 155 + 156 + static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 157 + struct drm_plane *primary, struct drm_plane *cursor) 158 + { 159 + int ret; 160 + 161 + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, 162 + &amdgpu_vkms_crtc_funcs, NULL); 163 + if (ret) { 164 + DRM_ERROR("Failed to init CRTC\n"); 165 + return ret; 166 + } 167 + 168 + drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); 169 + 170 + return ret; 171 + } 172 + 173 + static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { 174 + .fill_modes = drm_helper_probe_single_connector_modes, 175 + .destroy = drm_connector_cleanup, 176 + .reset = drm_atomic_helper_connector_reset, 177 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 178 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 179 + }; 180 + 181 + static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) 182 + { 183 + struct drm_device *dev = connector->dev; 184 + struct drm_display_mode *mode = NULL; 185 + unsigned i; 186 + static const struct mode_size { 187 + int w; 188 + int h; 189 + } common_modes[] = { 190 + { 640, 480}, 191 + { 720, 480}, 192 + { 800, 600}, 193 + { 848, 480}, 194 + {1024, 768}, 195 + {1152, 768}, 196 + {1280, 720}, 197 + {1280, 800}, 198 + {1280, 854}, 199 + {1280, 960}, 200 + {1280, 1024}, 201 + {1440, 900}, 202 + {1400, 1050}, 203 + {1680, 1050}, 204 + {1600, 1200}, 205 + {1920, 1080}, 206 + {1920, 1200}, 207 + {2560, 1440}, 208 + {4096, 3112}, 209 + {3656, 2664}, 210 + {3840, 2160}, 211 + {4096, 2160}, 212 + }; 213 + 214 + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 215 + mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); 216 + drm_mode_probed_add(connector, mode); 217 + } 218 + 219 + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); 220 + 221 + return ARRAY_SIZE(common_modes); 222 + } 223 + 224 + static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { 225 + .get_modes = amdgpu_vkms_conn_get_modes, 226 + }; 227 + 228 + static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { 229 + .update_plane = drm_atomic_helper_update_plane, 230 + .disable_plane = drm_atomic_helper_disable_plane, 231 + .destroy = drm_plane_cleanup, 232 + .reset = drm_atomic_helper_plane_reset, 233 + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 234 + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 235 + }; 236 + 237 + static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, 238 + struct drm_atomic_state *old_state) 239 + { 240 + return; 241 + } 242 + 243 + static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, 244 + struct drm_atomic_state *state) 245 + { 246 + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 247 + plane); 248 + struct drm_crtc_state *crtc_state; 249 + int ret; 250 + 251 + if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 252 + return 0; 253 + 254 + crtc_state = drm_atomic_get_crtc_state(state, 255 + new_plane_state->crtc); 256 + if (IS_ERR(crtc_state)) 257 + return PTR_ERR(crtc_state); 258 + 259 + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 260 + DRM_PLANE_HELPER_NO_SCALING, 261 + DRM_PLANE_HELPER_NO_SCALING, 262 + false, true); 263 + if (ret != 0) 264 + return ret; 265 + 266 + /* for now primary plane must be visible and full screen */ 267 + if (!new_plane_state->visible) 268 + return -EINVAL; 269 + 270 + return 0; 271 + } 272 + 273 + static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, 274 + struct drm_plane_state *new_state) 275 + { 276 + struct amdgpu_framebuffer *afb; 277 + struct drm_gem_object *obj; 278 + struct amdgpu_device *adev; 279 + struct amdgpu_bo *rbo; 280 + struct list_head list; 281 + struct ttm_validate_buffer tv; 282 + struct ww_acquire_ctx ticket; 283 + uint32_t domain; 284 + int r; 285 + 286 + if (!new_state->fb) { 287 + DRM_DEBUG_KMS("No FB bound\n"); 288 + return 0; 289 + } 290 + afb = to_amdgpu_framebuffer(new_state->fb); 291 + obj = new_state->fb->obj[0]; 292 + rbo = gem_to_amdgpu_bo(obj); 293 + adev = amdgpu_ttm_adev(rbo->tbo.bdev); 294 + INIT_LIST_HEAD(&list); 295 + 296 + tv.bo = &rbo->tbo; 297 + tv.num_shared = 1; 298 + list_add(&tv.head, &list); 299 + 300 + r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); 301 + if (r) { 302 + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 303 + return r; 304 + } 305 + 306 + if (plane->type != DRM_PLANE_TYPE_CURSOR) 307 + domain = amdgpu_display_supported_domains(adev, rbo->flags); 308 + else 309 + domain = AMDGPU_GEM_DOMAIN_VRAM; 310 + 311 + r = amdgpu_bo_pin(rbo, domain); 312 + if (unlikely(r != 0)) { 313 + if (r != -ERESTARTSYS) 314 + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 315 + ttm_eu_backoff_reservation(&ticket, &list); 316 + return r; 317 + } 318 + 319 + r = amdgpu_ttm_alloc_gart(&rbo->tbo); 320 + if (unlikely(r != 0)) { 321 + amdgpu_bo_unpin(rbo); 322 + ttm_eu_backoff_reservation(&ticket, &list); 323 + DRM_ERROR("%p bind failed\n", rbo); 324 + return r; 325 + } 326 + 327 + ttm_eu_backoff_reservation(&ticket, &list); 328 + 329 + afb->address = amdgpu_bo_gpu_offset(rbo); 330 + 331 + amdgpu_bo_ref(rbo); 332 + 333 + return 0; 334 + } 335 + 336 + static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, 337 + struct drm_plane_state *old_state) 338 + { 339 + struct amdgpu_bo *rbo; 340 + int r; 341 + 342 + if (!old_state->fb) 343 + return; 344 + 345 + rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 346 + r = amdgpu_bo_reserve(rbo, false); 347 + if (unlikely(r)) { 348 + DRM_ERROR("failed to reserve rbo before unpin\n"); 349 + return; 350 + } 351 + 352 + amdgpu_bo_unpin(rbo); 353 + amdgpu_bo_unreserve(rbo); 354 + amdgpu_bo_unref(&rbo); 355 + } 356 + 357 + static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { 358 + .atomic_update = amdgpu_vkms_plane_atomic_update, 359 + .atomic_check = amdgpu_vkms_plane_atomic_check, 360 + .prepare_fb = amdgpu_vkms_prepare_fb, 361 + .cleanup_fb = amdgpu_vkms_cleanup_fb, 362 + }; 363 + 364 + static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, 365 + enum drm_plane_type type, 366 + int index) 367 + { 368 + struct drm_plane *plane; 369 + int ret; 370 + 371 + plane = kzalloc(sizeof(*plane), GFP_KERNEL); 372 + if (!plane) 373 + return ERR_PTR(-ENOMEM); 374 + 375 + ret = drm_universal_plane_init(dev, plane, 1 << index, 376 + &amdgpu_vkms_plane_funcs, 377 + amdgpu_vkms_formats, 378 + ARRAY_SIZE(amdgpu_vkms_formats), 379 + NULL, type, NULL); 380 + if (ret) { 381 + kfree(plane); 382 + return ERR_PTR(ret); 383 + } 384 + 385 + drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs); 386 + 387 + return plane; 388 + } 389 + 390 + int amdgpu_vkms_output_init(struct drm_device *dev, 391 + struct amdgpu_vkms_output *output, int index) 392 + { 393 + struct drm_connector *connector = &output->connector; 394 + struct drm_encoder *encoder = &output->encoder; 395 + struct drm_crtc *crtc = &output->crtc; 396 + struct drm_plane *primary, *cursor = NULL; 397 + int ret; 398 + 399 + primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index); 400 + if (IS_ERR(primary)) 401 + return PTR_ERR(primary); 402 + 403 + ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); 404 + if (ret) 405 + goto err_crtc; 406 + 407 + ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs, 408 + DRM_MODE_CONNECTOR_VIRTUAL); 409 + if (ret) { 410 + DRM_ERROR("Failed to init connector\n"); 411 + goto err_connector; 412 + } 413 + 414 + drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs); 415 + 416 + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); 417 + if (ret) { 418 + DRM_ERROR("Failed to init encoder\n"); 419 + goto err_encoder; 420 + } 421 + encoder->possible_crtcs = 1 << index; 422 + 423 + ret = drm_connector_attach_encoder(connector, encoder); 424 + if (ret) { 425 + DRM_ERROR("Failed to attach connector to encoder\n"); 426 + goto err_attach; 427 + } 428 + 429 + drm_mode_config_reset(dev); 430 + 431 + return 0; 432 + 433 + err_attach: 434 + drm_encoder_cleanup(encoder); 435 + 436 + err_encoder: 437 + drm_connector_cleanup(connector); 438 + 439 + err_connector: 440 + drm_crtc_cleanup(crtc); 441 + 442 + err_crtc: 443 + drm_plane_cleanup(primary); 444 + 445 + return ret; 446 + }
+29
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #ifndef _AMDGPU_VKMS_H_ 4 + #define _AMDGPU_VKMS_H_ 5 + 6 + #define XRES_DEF 1024 7 + #define YRES_DEF 768 8 + 9 + #define XRES_MAX 16384 10 + #define YRES_MAX 16384 11 + 12 + #define drm_crtc_to_amdgpu_vkms_output(target) \ 13 + container_of(target, struct amdgpu_vkms_output, crtc) 14 + 15 + extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block; 16 + 17 + struct amdgpu_vkms_output { 18 + struct drm_crtc crtc; 19 + struct drm_encoder encoder; 20 + struct drm_connector connector; 21 + struct hrtimer vblank_hrtimer; 22 + ktime_t period_ns; 23 + struct drm_pending_vblank_event *event; 24 + }; 25 + 26 + int amdgpu_vkms_output_init(struct drm_device *dev, 27 + struct amdgpu_vkms_output *output, int index); 28 + 29 + #endif /* _AMDGPU_VKMS_H_ */
+14 -9
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 22 22 */ 23 23 24 24 #include <drm/drm_vblank.h> 25 + #include <drm/drm_atomic_helper.h> 25 26 26 27 #include "amdgpu.h" 27 28 #include "amdgpu_pm.h" ··· 41 40 #include "dce_virtual.h" 42 41 #include "ivsrcid/ivsrcid_vislands30.h" 43 42 #include "amdgpu_display.h" 43 + #include "amdgpu_vkms.h" 44 44 45 45 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 46 46 ··· 376 374 .force = dce_virtual_force, 377 375 }; 378 376 377 + const struct drm_mode_config_funcs dce_virtual_mode_funcs = { 378 + .fb_create = amdgpu_display_user_framebuffer_create, 379 + .atomic_check = drm_atomic_helper_check, 380 + .atomic_commit = drm_atomic_helper_commit, 381 + }; 382 + 379 383 static int dce_virtual_sw_init(void *handle) 380 384 { 381 385 int r, i; ··· 393 385 394 386 adev_to_drm(adev)->max_vblank_count = 0; 395 387 396 - adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; 388 + adev_to_drm(adev)->mode_config.funcs = &dce_virtual_mode_funcs; 397 389 398 - adev_to_drm(adev)->mode_config.max_width = 16384; 399 - adev_to_drm(adev)->mode_config.max_height = 16384; 390 + adev_to_drm(adev)->mode_config.max_width = XRES_MAX; 391 + adev_to_drm(adev)->mode_config.max_height = YRES_MAX; 400 392 401 393 adev_to_drm(adev)->mode_config.preferred_depth = 24; 402 394 adev_to_drm(adev)->mode_config.prefer_shadow = 1; ··· 407 399 if (r) 408 400 return r; 409 401 410 - adev_to_drm(adev)->mode_config.max_width = 16384; 411 - adev_to_drm(adev)->mode_config.max_height = 16384; 402 + adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); 412 403 413 404 /* allocate crtcs, encoders, connectors */ 414 405 for (i = 0; i < adev->mode_info.num_crtc; i++) { 415 - r = dce_virtual_crtc_init(adev, i); 416 - if (r) 417 - return r; 418 - r = dce_virtual_connector_encoder_init(adev, i); 406 + r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); 419 407 if (r) 420 408 return r; 421 409 } ··· 432 428 hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); 433 429 434 430 kfree(adev->mode_info.bios_hardcoded_edid); 431 + kfree(adev->amdgpu_vkms_output); 435 432 436 433 drm_kms_helper_poll_fini(adev_to_drm(adev)); 437 434