Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'topic/drm-misc-2015-08-13' of git://anongit.freedesktop.org/drm-intel into drm-next

Final drm-misc pull for 4.3:
- fbdev emulation Kconfig option for everyone thanks to Archit. It's not
everything yet bit this is fairly tricky since it spawns all drivers.
- vgaarb & vgaswitcheroo polish from Thierry
- some drm_irq.c cleanups (Thierry)
- struct_mutex crusade from me
- more fbdev panic handling removal
- various things all over in drm core&helpers

* tag 'topic/drm-misc-2015-08-13' of git://anongit.freedesktop.org/drm-intel: (65 commits)
drm/atomic: Use KMS VBLANK API
drm/irq: Document return values more consistently
drm/irq: Make pipe unsigned and name consistent
drm/irq: Check for valid VBLANK before dereference
drm/irq: Remove negative CRTC index special-case
drm/plane: Remove redundant extern
drm/plane: Use consistent data types for format count
vga_switcheroo: Remove unnecessary checks
vga_switcheroo: Wrap overly long lines
vga_switcheroo: Use pr_fmt()
vga_switcheroo: Cleanup header comment
vga_switcheroo: Use pr_*() instead of printk()
vgaarb: Fix a few checkpatch errors and warnings
vgaarb: Use vgaarb: prefix consistently in messages
vgaarb: Stop complaining about absent devices
drm/atomic: fix null pointer access to mode_fixup callback
drm/i915: Use CONFIG_DRM_FBDEV_EMULATION
drm/core: Set mode to NULL when connectors in a set drops to 0.
drm/atomic: Call ww_acquire_done after check phase is complete
drm/atomic: Paper over locking WARN in default_state_clear
...

+1163 -1107
+20
drivers/gpu/drm/Kconfig
··· 37 37 select FB 38 38 select FRAMEBUFFER_CONSOLE if !EXPERT 39 39 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE 40 + select FB_SYS_FOPS 41 + select FB_SYS_FILLRECT 42 + select FB_SYS_COPYAREA 43 + select FB_SYS_IMAGEBLIT 44 + select FB_CFB_FILLRECT 45 + select FB_CFB_COPYAREA 46 + select FB_CFB_IMAGEBLIT 40 47 help 41 48 FBDEV helpers for KMS drivers. 49 + 50 + config DRM_FBDEV_EMULATION 51 + bool "Enable legacy fbdev support for your modesetting driver" 52 + depends on DRM 53 + select DRM_KMS_HELPER 54 + select DRM_KMS_FB_HELPER 55 + default y 56 + help 57 + Choose this option if you have a need for the legacy fbdev 58 + support. Note that this support also provides the linux console 59 + support on top of your modesetting driver. 60 + 61 + If in doubt, say "Y". 42 62 43 63 config DRM_LOAD_EDID_FIRMWARE 44 64 bool "Allow to specify an EDID data set instead of probing for it"
+1 -1
drivers/gpu/drm/Makefile
··· 23 23 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 24 24 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o 25 25 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 26 - drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o 26 + drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 27 27 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 28 28 29 29 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+14 -31
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
··· 53 53 .owner = THIS_MODULE, 54 54 .fb_check_var = drm_fb_helper_check_var, 55 55 .fb_set_par = drm_fb_helper_set_par, 56 - .fb_fillrect = cfb_fillrect, 57 - .fb_copyarea = cfb_copyarea, 58 - .fb_imageblit = cfb_imageblit, 56 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 57 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 58 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 59 59 .fb_pan_display = drm_fb_helper_pan_display, 60 60 .fb_blank = drm_fb_helper_blank, 61 61 .fb_setcmap = drm_fb_helper_setcmap, ··· 179 179 struct drm_mode_fb_cmd2 mode_cmd; 180 180 struct drm_gem_object *gobj = NULL; 181 181 struct amdgpu_bo *rbo = NULL; 182 - struct device *device = &adev->pdev->dev; 183 182 int ret; 184 183 unsigned long tmp; 185 184 ··· 200 201 rbo = gem_to_amdgpu_bo(gobj); 201 202 202 203 /* okay we have an object now allocate the framebuffer */ 203 - info = framebuffer_alloc(0, device); 204 - if (info == NULL) { 205 - ret = -ENOMEM; 204 + info = drm_fb_helper_alloc_fbi(helper); 205 + if (IS_ERR(info)) { 206 + ret = PTR_ERR(info); 206 207 goto out_unref; 207 208 } 208 209 ··· 211 212 ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 212 213 if (ret) { 213 214 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 214 - goto out_unref; 215 + goto out_destroy_fbi; 215 216 } 216 217 217 218 fb = &rfbdev->rfb.base; 218 219 219 220 /* setup helper */ 220 221 rfbdev->helper.fb = fb; 221 - rfbdev->helper.fbdev = info; 222 222 223 223 memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); 224 224 ··· 237 239 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); 238 240 239 241 /* setup aperture base/size for vesafb takeover */ 240 - info->apertures = alloc_apertures(1); 241 - if (!info->apertures) { 242 - ret = -ENOMEM; 243 - goto out_unref; 244 - } 245 242 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; 246 243 info->apertures->ranges[0].size = adev->mc.aper_size; 247 244 ··· 244 251 245 252 if (info->screen_base == NULL) { 246 253 ret = -ENOSPC; 247 - goto out_unref; 248 - } 249 - 250 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 251 - if (ret) { 252 - ret = -ENOMEM; 253 - goto out_unref; 254 + goto out_destroy_fbi; 254 255 } 255 256 256 257 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); ··· 256 269 vga_switcheroo_client_fb_set(adev->ddev->pdev, info); 257 270 return 0; 258 271 272 + out_destroy_fbi: 273 + drm_fb_helper_release_fbi(helper); 259 274 out_unref: 260 275 if (rbo) { 261 276 ··· 279 290 280 291 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) 281 292 { 282 - struct fb_info *info; 283 293 struct amdgpu_framebuffer *rfb = &rfbdev->rfb; 284 294 285 - if (rfbdev->helper.fbdev) { 286 - info = rfbdev->helper.fbdev; 287 - 288 - unregister_framebuffer(info); 289 - if (info->cmap.len) 290 - fb_dealloc_cmap(&info->cmap); 291 - framebuffer_release(info); 292 - } 295 + drm_fb_helper_unregister_fbi(&rfbdev->helper); 296 + drm_fb_helper_release_fbi(&rfbdev->helper); 293 297 294 298 if (rfb->obj) { 295 299 amdgpufb_destroy_pinned_object(rfb->obj); ··· 377 395 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) 378 396 { 379 397 if (adev->mode_info.rfbdev) 380 - fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state); 398 + drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, 399 + state); 381 400 } 382 401 383 402 int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
+10 -23
drivers/gpu/drm/armada/armada_fbdev.c
··· 22 22 .owner = THIS_MODULE, 23 23 .fb_check_var = drm_fb_helper_check_var, 24 24 .fb_set_par = drm_fb_helper_set_par, 25 - .fb_fillrect = cfb_fillrect, 26 - .fb_copyarea = cfb_copyarea, 27 - .fb_imageblit = cfb_imageblit, 25 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 26 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 27 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 28 28 .fb_pan_display = drm_fb_helper_pan_display, 29 29 .fb_blank = drm_fb_helper_blank, 30 30 .fb_setcmap = drm_fb_helper_setcmap, ··· 80 80 if (IS_ERR(dfb)) 81 81 return PTR_ERR(dfb); 82 82 83 - info = framebuffer_alloc(0, dev->dev); 84 - if (!info) { 85 - ret = -ENOMEM; 83 + info = drm_fb_helper_alloc_fbi(fbh); 84 + if (IS_ERR(info)) { 85 + ret = PTR_ERR(info); 86 86 goto err_fballoc; 87 - } 88 - 89 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 90 - if (ret) { 91 - ret = -ENOMEM; 92 - goto err_fbcmap; 93 87 } 94 88 95 89 strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id)); ··· 95 101 info->screen_size = obj->obj.size; 96 102 info->screen_base = ptr; 97 103 fbh->fb = &dfb->fb; 98 - fbh->fbdev = info; 104 + 99 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 100 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 101 107 ··· 105 111 106 112 return 0; 107 113 108 - err_fbcmap: 109 - framebuffer_release(info); 110 114 err_fballoc: 111 115 dfb->fb.funcs->destroy(&dfb->fb); 112 116 return ret; ··· 163 171 164 172 return 0; 165 173 err_fb_setup: 174 + drm_fb_helper_release_fbi(fbh); 166 175 drm_fb_helper_fini(fbh); 167 176 err_fb_helper: 168 177 priv->fbdev = NULL; ··· 184 191 struct drm_fb_helper *fbh = priv->fbdev; 185 192 186 193 if (fbh) { 187 - struct fb_info *info = fbh->fbdev; 188 - 189 - if (info) { 190 - unregister_framebuffer(info); 191 - if (info->cmap.len) 192 - fb_dealloc_cmap(&info->cmap); 193 - framebuffer_release(info); 194 - } 194 + drm_fb_helper_unregister_fbi(fbh); 195 + drm_fb_helper_release_fbi(fbh); 195 196 196 197 drm_fb_helper_fini(fbh); 197 198
+17 -31
drivers/gpu/drm/ast/ast_fb.c
··· 125 125 const struct fb_fillrect *rect) 126 126 { 127 127 struct ast_fbdev *afbdev = info->par; 128 - sys_fillrect(info, rect); 128 + drm_fb_helper_sys_fillrect(info, rect); 129 129 ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width, 130 130 rect->height); 131 131 } ··· 134 134 const struct fb_copyarea *area) 135 135 { 136 136 struct ast_fbdev *afbdev = info->par; 137 - sys_copyarea(info, area); 137 + drm_fb_helper_sys_copyarea(info, area); 138 138 ast_dirty_update(afbdev, area->dx, area->dy, area->width, 139 139 area->height); 140 140 } ··· 143 143 const struct fb_image *image) 144 144 { 145 145 struct ast_fbdev *afbdev = info->par; 146 - sys_imageblit(info, image); 146 + drm_fb_helper_sys_imageblit(info, image); 147 147 ast_dirty_update(afbdev, image->dx, image->dy, image->width, 148 148 image->height); 149 149 } ··· 193 193 struct drm_framebuffer *fb; 194 194 struct fb_info *info; 195 195 int size, ret; 196 - struct device *device = &dev->pdev->dev; 197 196 void *sysram; 198 197 struct drm_gem_object *gobj = NULL; 199 198 struct ast_bo *bo = NULL; ··· 216 217 if (!sysram) 217 218 return -ENOMEM; 218 219 219 - info = framebuffer_alloc(0, device); 220 - if (!info) { 221 - ret = -ENOMEM; 222 - goto out; 220 + info = drm_fb_helper_alloc_fbi(helper); 221 + if (IS_ERR(info)) { 222 + ret = PTR_ERR(info); 223 + goto err_free_vram; 223 224 } 224 225 info->par = afbdev; 225 226 226 227 ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj); 227 228 if (ret) 228 - goto out; 229 + goto err_release_fbi; 229 230 230 231 afbdev->sysram = sysram; 231 232 afbdev->size = size; 232 233 233 234 fb = &afbdev->afb.base; 234 235 afbdev->helper.fb = fb; 235 - afbdev->helper.fbdev = info; 236 236 237 237 strcpy(info->fix.id, "astdrmfb"); 238 238 239 239 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 240 240 info->fbops = &astfb_ops; 241 241 242 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 243 - if (ret) { 244 - ret = -ENOMEM; 245 - goto out; 246 - } 247 - 248 - info->apertures = alloc_apertures(1); 249 - if (!info->apertures) { 250 - ret = -ENOMEM; 251 - goto out; 252 - } 253 242 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); 254 243 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 255 244 ··· 253 266 fb->width, fb->height); 254 267 255 268 return 0; 256 - out: 269 + 270 + err_release_fbi: 271 + drm_fb_helper_release_fbi(helper); 272 + err_free_vram: 273 + vfree(afbdev->sysram); 257 274 return ret; 258 275 } 259 276 ··· 288 297 static void ast_fbdev_destroy(struct drm_device *dev, 289 298 struct ast_fbdev *afbdev) 290 299 { 291 - struct fb_info *info; 292 300 struct ast_framebuffer *afb = &afbdev->afb; 293 - if (afbdev->helper.fbdev) { 294 - info = afbdev->helper.fbdev; 295 - unregister_framebuffer(info); 296 - if (info->cmap.len) 297 - fb_dealloc_cmap(&info->cmap); 298 - framebuffer_release(info); 299 - } 301 + 302 + drm_fb_helper_unregister_fbi(&afbdev->helper); 303 + drm_fb_helper_release_fbi(&afbdev->helper); 300 304 301 305 if (afb->obj) { 302 306 drm_gem_object_unreference_unlocked(afb->obj); ··· 363 377 if (!ast->fbdev) 364 378 return; 365 379 366 - fb_set_suspend(ast->fbdev->helper.fbdev, state); 380 + drm_fb_helper_set_suspend(&ast->fbdev->helper, state); 367 381 }
+5 -11
drivers/gpu/drm/ast/ast_main.c
··· 571 571 uint64_t *offset) 572 572 { 573 573 struct drm_gem_object *obj; 574 - int ret; 575 574 struct ast_bo *bo; 576 575 577 - mutex_lock(&dev->struct_mutex); 578 576 obj = drm_gem_object_lookup(dev, file, handle); 579 - if (obj == NULL) { 580 - ret = -ENOENT; 581 - goto out_unlock; 582 - } 577 + if (obj == NULL) 578 + return -ENOENT; 583 579 584 580 bo = gem_to_ast_bo(obj); 585 581 *offset = ast_bo_mmap_offset(bo); 586 582 587 - drm_gem_object_unreference(obj); 588 - ret = 0; 589 - out_unlock: 590 - mutex_unlock(&dev->struct_mutex); 591 - return ret; 583 + drm_gem_object_unreference_unlocked(obj); 584 + 585 + return 0; 592 586 593 587 } 594 588
+2 -2
drivers/gpu/drm/bochs/bochs_drv.c
··· 109 109 110 110 if (bochs->fb.initialized) { 111 111 console_lock(); 112 - fb_set_suspend(bochs->fb.helper.fbdev, 1); 112 + drm_fb_helper_set_suspend(&bochs->fb.helper, 1); 113 113 console_unlock(); 114 114 } 115 115 ··· 126 126 127 127 if (bochs->fb.initialized) { 128 128 console_lock(); 129 - fb_set_suspend(bochs->fb.helper.fbdev, 0); 129 + drm_fb_helper_set_suspend(&bochs->fb.helper, 0); 130 130 console_unlock(); 131 131 } 132 132
+11 -25
drivers/gpu/drm/bochs/bochs_fbdev.c
··· 24 24 .owner = THIS_MODULE, 25 25 .fb_check_var = drm_fb_helper_check_var, 26 26 .fb_set_par = drm_fb_helper_set_par, 27 - .fb_fillrect = sys_fillrect, 28 - .fb_copyarea = sys_copyarea, 29 - .fb_imageblit = sys_imageblit, 27 + .fb_fillrect = drm_fb_helper_sys_fillrect, 28 + .fb_copyarea = drm_fb_helper_sys_copyarea, 29 + .fb_imageblit = drm_fb_helper_sys_imageblit, 30 30 .fb_pan_display = drm_fb_helper_pan_display, 31 31 .fb_blank = drm_fb_helper_blank, 32 32 .fb_setcmap = drm_fb_helper_setcmap, ··· 56 56 { 57 57 struct bochs_device *bochs = 58 58 container_of(helper, struct bochs_device, fb.helper); 59 - struct drm_device *dev = bochs->dev; 60 59 struct fb_info *info; 61 60 struct drm_framebuffer *fb; 62 61 struct drm_mode_fb_cmd2 mode_cmd; 63 - struct device *device = &dev->pdev->dev; 64 62 struct drm_gem_object *gobj = NULL; 65 63 struct bochs_bo *bo = NULL; 66 64 int size, ret; ··· 104 106 ttm_bo_unreserve(&bo->bo); 105 107 106 108 /* init fb device */ 107 - info = framebuffer_alloc(0, device); 108 - if (info == NULL) 109 - return -ENOMEM; 109 + info = drm_fb_helper_alloc_fbi(helper); 110 + if (IS_ERR(info)) 111 + return PTR_ERR(info); 110 112 111 113 info->par = &bochs->fb.helper; 112 114 113 115 ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); 114 - if (ret) 116 + if (ret) { 117 + drm_fb_helper_release_fbi(helper); 115 118 return ret; 119 + } 116 120 117 121 bochs->fb.size = size; 118 122 119 123 /* setup helper */ 120 124 fb = &bochs->fb.gfb.base; 121 125 bochs->fb.helper.fb = fb; 122 - bochs->fb.helper.fbdev = info; 123 126 124 127 strcpy(info->fix.id, "bochsdrmfb"); 125 128 ··· 138 139 info->fix.smem_start = 0; 139 140 info->fix.smem_len = size; 140 141 141 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 142 - if (ret) { 143 - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); 144 - return -ENOMEM; 145 - } 146 - 147 142 return 0; 148 143 } 149 144 150 145 static int bochs_fbdev_destroy(struct bochs_device *bochs) 151 146 { 152 147 struct bochs_framebuffer *gfb = &bochs->fb.gfb; 153 - struct fb_info *info; 154 148 155 149 DRM_DEBUG_DRIVER("\n"); 156 150 157 - if (bochs->fb.helper.fbdev) { 158 - info = bochs->fb.helper.fbdev; 159 - 160 - unregister_framebuffer(info); 161 - if (info->cmap.len) 162 - fb_dealloc_cmap(&info->cmap); 163 - framebuffer_release(info); 164 - } 151 + drm_fb_helper_unregister_fbi(&bochs->fb.helper); 152 + drm_fb_helper_release_fbi(&bochs->fb.helper); 165 153 166 154 if (gfb->obj) { 167 155 drm_gem_object_unreference_unlocked(gfb->obj);
+4 -12
drivers/gpu/drm/bochs/bochs_mm.c
··· 454 454 uint32_t handle, uint64_t *offset) 455 455 { 456 456 struct drm_gem_object *obj; 457 - int ret; 458 457 struct bochs_bo *bo; 459 458 460 - mutex_lock(&dev->struct_mutex); 461 459 obj = drm_gem_object_lookup(dev, file, handle); 462 - if (obj == NULL) { 463 - ret = -ENOENT; 464 - goto out_unlock; 465 - } 460 + if (obj == NULL) 461 + return -ENOENT; 466 462 467 463 bo = gem_to_bochs_bo(obj); 468 464 *offset = bochs_bo_mmap_offset(bo); 469 465 470 - drm_gem_object_unreference(obj); 471 - ret = 0; 472 - out_unlock: 473 - mutex_unlock(&dev->struct_mutex); 474 - return ret; 475 - 466 + drm_gem_object_unreference_unlocked(obj); 467 + return 0; 476 468 } 477 469 478 470 /* ---------------------------------------------------------------------- */
+2 -2
drivers/gpu/drm/cirrus/cirrus_drv.c
··· 92 92 93 93 if (cdev->mode_info.gfbdev) { 94 94 console_lock(); 95 - fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1); 95 + drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1); 96 96 console_unlock(); 97 97 } 98 98 ··· 109 109 110 110 if (cdev->mode_info.gfbdev) { 111 111 console_lock(); 112 - fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0); 112 + drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0); 113 113 console_unlock(); 114 114 } 115 115
+8 -33
drivers/gpu/drm/cirrus/cirrus_fbdev.c
··· 98 98 const struct fb_fillrect *rect) 99 99 { 100 100 struct cirrus_fbdev *afbdev = info->par; 101 - sys_fillrect(info, rect); 101 + drm_fb_helper_sys_fillrect(info, rect); 102 102 cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width, 103 103 rect->height); 104 104 } ··· 107 107 const struct fb_copyarea *area) 108 108 { 109 109 struct cirrus_fbdev *afbdev = info->par; 110 - sys_copyarea(info, area); 110 + drm_fb_helper_sys_copyarea(info, area); 111 111 cirrus_dirty_update(afbdev, area->dx, area->dy, area->width, 112 112 area->height); 113 113 } ··· 116 116 const struct fb_image *image) 117 117 { 118 118 struct cirrus_fbdev *afbdev = info->par; 119 - sys_imageblit(info, image); 119 + drm_fb_helper_sys_imageblit(info, image); 120 120 cirrus_dirty_update(afbdev, image->dx, image->dy, image->width, 121 121 image->height); 122 122 } ··· 165 165 { 166 166 struct cirrus_fbdev *gfbdev = 167 167 container_of(helper, struct cirrus_fbdev, helper); 168 - struct drm_device *dev = gfbdev->helper.dev; 169 168 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; 170 169 struct fb_info *info; 171 170 struct drm_framebuffer *fb; 172 171 struct drm_mode_fb_cmd2 mode_cmd; 173 - struct device *device = &dev->pdev->dev; 174 172 void *sysram; 175 173 struct drm_gem_object *gobj = NULL; 176 174 struct cirrus_bo *bo = NULL; ··· 193 195 if (!sysram) 194 196 return -ENOMEM; 195 197 196 - info = framebuffer_alloc(0, device); 197 - if (info == NULL) 198 - return -ENOMEM; 198 + info = drm_fb_helper_alloc_fbi(helper); 199 + if (IS_ERR(info)) 200 + return PTR_ERR(info); 199 201 200 202 info->par = gfbdev; 201 203 ··· 214 216 215 217 /* setup helper */ 216 218 gfbdev->helper.fb = fb; 217 - gfbdev->helper.fbdev = info; 218 219 219 220 strcpy(info->fix.id, "cirrusdrmfb"); 220 - 221 221 222 222 info->flags = FBINFO_DEFAULT; 223 223 info->fbops = &cirrusfb_ops; ··· 225 229 sizes->fb_height); 226 230 227 231 /* setup aperture base/size for vesafb takeover */ 228 - info->apertures = alloc_apertures(1); 229 - if (!info->apertures) { 230 - ret = -ENOMEM; 231 - goto out_iounmap; 232 - } 233 232 info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; 234 233 info->apertures->ranges[0].size = cdev->mc.vram_size; 235 234 ··· 237 246 info->fix.mmio_start = 0; 238 247 info->fix.mmio_len = 0; 239 248 240 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 241 - if (ret) { 242 - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); 243 - ret = -ENOMEM; 244 - goto out_iounmap; 245 - } 246 - 247 249 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 248 250 DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start); 249 251 DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len); ··· 244 260 DRM_INFO(" pitch is %d\n", fb->pitches[0]); 245 261 246 262 return 0; 247 - out_iounmap: 248 - return ret; 249 263 } 250 264 251 265 static int cirrus_fbdev_destroy(struct drm_device *dev, 252 266 struct cirrus_fbdev *gfbdev) 253 267 { 254 - struct fb_info *info; 255 268 struct cirrus_framebuffer *gfb = &gfbdev->gfb; 256 269 257 - if (gfbdev->helper.fbdev) { 258 - info = gfbdev->helper.fbdev; 259 - 260 - unregister_framebuffer(info); 261 - if (info->cmap.len) 262 - fb_dealloc_cmap(&info->cmap); 263 - framebuffer_release(info); 264 - } 270 + drm_fb_helper_unregister_fbi(&gfbdev->helper); 271 + drm_fb_helper_release_fbi(&gfbdev->helper); 265 272 266 273 if (gfb->obj) { 267 274 drm_gem_object_unreference_unlocked(gfb->obj);
+4 -11
drivers/gpu/drm/cirrus/cirrus_main.c
··· 293 293 uint64_t *offset) 294 294 { 295 295 struct drm_gem_object *obj; 296 - int ret; 297 296 struct cirrus_bo *bo; 298 297 299 - mutex_lock(&dev->struct_mutex); 300 298 obj = drm_gem_object_lookup(dev, file, handle); 301 - if (obj == NULL) { 302 - ret = -ENOENT; 303 - goto out_unlock; 304 - } 299 + if (obj == NULL) 300 + return -ENOENT; 305 301 306 302 bo = gem_to_cirrus_bo(obj); 307 303 *offset = cirrus_bo_mmap_offset(bo); 308 304 309 - drm_gem_object_unreference(obj); 310 - ret = 0; 311 - out_unlock: 312 - mutex_unlock(&dev->struct_mutex); 313 - return ret; 305 + drm_gem_object_unreference_unlocked(obj); 314 306 307 + return 0; 315 308 } 316 309 317 310 bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+12 -3
drivers/gpu/drm/drm_atomic.c
··· 153 153 if (!connector) 154 154 continue; 155 155 156 - WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 157 - 158 - connector->funcs->atomic_destroy_state(connector, 156 + /* 157 + * FIXME: Async commits can race with connector unplugging and 158 + * there's currently nothing that prevents cleanup up state for 159 + * deleted connectors. As long as the callback doesn't look at 160 + * the connector we'll be fine though, so make sure that's the 161 + * case by setting all connector pointers to NULL. 162 + */ 163 + state->connector_states[i]->connector = NULL; 164 + connector->funcs->atomic_destroy_state(NULL, 159 165 state->connector_states[i]); 160 166 state->connectors[i] = NULL; 161 167 state->connector_states[i] = NULL; ··· 1229 1223 } 1230 1224 } 1231 1225 } 1226 + 1227 + if (ret == 0) 1228 + ww_acquire_done(&state->acquire_ctx->ww_ctx); 1232 1229 1233 1230 return ret; 1234 1231 }
+3 -3
drivers/gpu/drm/drm_atomic_helper.c
··· 299 299 encoder->base.id, encoder->name); 300 300 return ret; 301 301 } 302 - } else { 302 + } else if (funcs->mode_fixup) { 303 303 ret = funcs->mode_fixup(encoder, &crtc_state->mode, 304 304 &crtc_state->adjusted_mode); 305 305 if (!ret) { ··· 958 958 continue; 959 959 960 960 old_crtc_state->enable = true; 961 - old_crtc_state->last_vblank_count = drm_vblank_count(dev, i); 961 + old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc); 962 962 } 963 963 964 964 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { ··· 967 967 968 968 ret = wait_event_timeout(dev->vblank[i].queue, 969 969 old_crtc_state->last_vblank_count != 970 - drm_vblank_count(dev, i), 970 + drm_crtc_vblank_count(crtc), 971 971 msecs_to_jiffies(50)); 972 972 973 973 drm_crtc_vblank_put(crtc);
+4 -2
drivers/gpu/drm/drm_crtc.c
··· 1151 1151 int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, 1152 1152 unsigned long possible_crtcs, 1153 1153 const struct drm_plane_funcs *funcs, 1154 - const uint32_t *formats, uint32_t format_count, 1154 + const uint32_t *formats, unsigned int format_count, 1155 1155 enum drm_plane_type type) 1156 1156 { 1157 1157 struct drm_mode_config *config = &dev->mode_config; ··· 1225 1225 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, 1226 1226 unsigned long possible_crtcs, 1227 1227 const struct drm_plane_funcs *funcs, 1228 - const uint32_t *formats, uint32_t format_count, 1228 + const uint32_t *formats, unsigned int format_count, 1229 1229 bool is_primary) 1230 1230 { 1231 1231 enum drm_plane_type type; ··· 5273 5273 if (encoder->funcs->reset) 5274 5274 encoder->funcs->reset(encoder); 5275 5275 5276 + mutex_lock(&dev->mode_config.mutex); 5276 5277 drm_for_each_connector(connector, dev) { 5277 5278 connector->status = connector_status_unknown; 5278 5279 5279 5280 if (connector->funcs->reset) 5280 5281 connector->funcs->reset(connector); 5281 5282 } 5283 + mutex_unlock(&dev->mode_config.mutex); 5282 5284 } 5283 5285 EXPORT_SYMBOL(drm_mode_config_reset); 5284 5286
+1 -1
drivers/gpu/drm/drm_edid.c
··· 3802 3802 struct drm_display_mode *mode; 3803 3803 struct drm_device *dev = connector->dev; 3804 3804 3805 - count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); 3805 + count = ARRAY_SIZE(drm_dmt_modes); 3806 3806 if (hdisplay < 0) 3807 3807 hdisplay = 0; 3808 3808 if (vdisplay < 0)
+11 -34
drivers/gpu/drm/drm_fb_cma_helper.c
··· 222 222 223 223 static struct fb_ops drm_fbdev_cma_ops = { 224 224 .owner = THIS_MODULE, 225 - .fb_fillrect = sys_fillrect, 226 - .fb_copyarea = sys_copyarea, 227 - .fb_imageblit = sys_imageblit, 225 + .fb_fillrect = drm_fb_helper_sys_fillrect, 226 + .fb_copyarea = drm_fb_helper_sys_copyarea, 227 + .fb_imageblit = drm_fb_helper_sys_imageblit, 228 228 .fb_check_var = drm_fb_helper_check_var, 229 229 .fb_set_par = drm_fb_helper_set_par, 230 230 .fb_blank = drm_fb_helper_blank, ··· 263 263 if (IS_ERR(obj)) 264 264 return -ENOMEM; 265 265 266 - fbi = framebuffer_alloc(0, dev->dev); 267 - if (!fbi) { 268 - dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); 269 - ret = -ENOMEM; 266 + fbi = drm_fb_helper_alloc_fbi(helper); 267 + if (IS_ERR(fbi)) { 268 + ret = PTR_ERR(fbi); 270 269 goto err_drm_gem_cma_free_object; 271 270 } 272 271 ··· 273 274 if (IS_ERR(fbdev_cma->fb)) { 274 275 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); 275 276 ret = PTR_ERR(fbdev_cma->fb); 276 - goto err_framebuffer_release; 277 + goto err_fb_info_destroy; 277 278 } 278 279 279 280 fb = &fbdev_cma->fb->fb; 280 281 helper->fb = fb; 281 - helper->fbdev = fbi; 282 282 283 283 fbi->par = helper; 284 284 fbi->flags = FBINFO_FLAG_DEFAULT; 285 285 fbi->fbops = &drm_fbdev_cma_ops; 286 - 287 - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 288 - if (ret) { 289 - dev_err(dev->dev, "Failed to allocate color map.\n"); 290 - goto err_drm_fb_cma_destroy; 291 - } 292 286 293 287 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 294 288 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); ··· 297 305 298 306 return 0; 299 307 300 - err_drm_fb_cma_destroy: 301 - drm_framebuffer_unregister_private(fb); 302 - drm_fb_cma_destroy(fb); 303 - err_framebuffer_release: 304 - framebuffer_release(fbi); 308 + err_fb_info_destroy: 309 + drm_fb_helper_release_fbi(helper); 305 310 err_drm_gem_cma_free_object: 306 311 drm_gem_cma_free_object(&obj->base); 307 312 return ret; ··· 374 385 */ 375 386 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) 376 387 { 377 - if (fbdev_cma->fb_helper.fbdev) { 378 - struct fb_info *info; 379 - int ret; 380 - 381 - info = fbdev_cma->fb_helper.fbdev; 382 - ret = unregister_framebuffer(info); 383 - if (ret < 0) 384 - DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); 385 - 386 - if (info->cmap.len) 387 - fb_dealloc_cmap(&info->cmap); 388 - 389 - framebuffer_release(info); 390 - } 388 + drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper); 389 + drm_fb_helper_release_fbi(&fbdev_cma->fb_helper); 391 390 392 391 if (fbdev_cma->fb) { 393 392 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+269 -67
drivers/gpu/drm/drm_fb_helper.c
··· 56 56 * Teardown is done with drm_fb_helper_fini(). 57 57 * 58 58 * At runtime drivers should restore the fbdev console by calling 59 - * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They 60 - * should also notify the fb helper code from updates to the output 59 + * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback. 60 + * They should also notify the fb helper code from updates to the output 61 61 * configuration by calling drm_fb_helper_hotplug_event(). For easier 62 62 * integration with the output polling code in drm_crtc_helper.c the modeset 63 63 * code provides a ->output_poll_changed callback. ··· 168 168 } 169 169 set->num_connectors--; 170 170 171 - /* because i915 is pissy about this.. 171 + /* 172 172 * TODO maybe need to makes sure we set it back to !=NULL somewhere? 173 173 */ 174 - if (set->num_connectors == 0) 174 + if (set->num_connectors == 0) { 175 175 set->fb = NULL; 176 + drm_mode_destroy(connector->dev, set->mode); 177 + set->mode = NULL; 178 + } 176 179 } 177 180 178 181 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, ··· 357 354 } 358 355 return error; 359 356 } 360 - /** 361 - * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration 362 - * @fb_helper: fbcon to restore 363 - * 364 - * This should be called from driver's drm ->lastclose callback 365 - * when implementing an fbcon on top of kms using this helper. This ensures that 366 - * the user isn't greeted with a black screen when e.g. X dies. 367 - * 368 - * Use this variant if you need to bypass locking (panic), or already 369 - * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked() 370 - */ 371 - static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) 372 - { 373 - return restore_fbdev_mode(fb_helper); 374 - } 375 357 376 358 /** 377 359 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration ··· 386 398 } 387 399 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 388 400 389 - /* 390 - * restore fbcon display for all kms driver's using this helper, used for sysrq 391 - * and panic handling. 392 - */ 393 - static bool drm_fb_helper_force_kernel_mode(void) 394 - { 395 - bool ret, error = false; 396 - struct drm_fb_helper *helper; 397 - 398 - if (list_empty(&kernel_fb_helper_list)) 399 - return false; 400 - 401 - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 402 - struct drm_device *dev = helper->dev; 403 - 404 - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 405 - continue; 406 - 407 - /* 408 - * NOTE: Use trylock mode to avoid deadlocks and sleeping in 409 - * panic context. 410 - */ 411 - if (__drm_modeset_lock_all(dev, true) != 0) { 412 - error = true; 413 - continue; 414 - } 415 - 416 - ret = drm_fb_helper_restore_fbdev_mode(helper); 417 - if (ret) 418 - error = true; 419 - 420 - drm_modeset_unlock_all(dev); 421 - } 422 - return error; 423 - } 424 - 425 401 static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) 426 402 { 427 403 struct drm_device *dev = fb_helper->dev; ··· 411 459 } 412 460 413 461 #ifdef CONFIG_MAGIC_SYSRQ 462 + /* 463 + * restore fbcon display for all kms driver's using this helper, used for sysrq 464 + * and panic handling. 465 + */ 466 + static bool drm_fb_helper_force_kernel_mode(void) 467 + { 468 + bool ret, error = false; 469 + struct drm_fb_helper *helper; 470 + 471 + if (list_empty(&kernel_fb_helper_list)) 472 + return false; 473 + 474 + list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 475 + struct drm_device *dev = helper->dev; 476 + 477 + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 478 + continue; 479 + 480 + drm_modeset_lock_all(dev); 481 + ret = restore_fbdev_mode(helper); 482 + if (ret) 483 + error = true; 484 + drm_modeset_unlock_all(dev); 485 + } 486 + return error; 487 + } 488 + 414 489 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 415 490 { 416 491 bool ret; ··· 468 489 struct drm_crtc *crtc; 469 490 struct drm_connector *connector; 470 491 int i, j; 471 - 472 - /* 473 - * fbdev->blank can be called from irq context in case of a panic. 474 - * Since we already have our own special panic handler which will 475 - * restore the fbdev console mode completely, just bail out early. 476 - */ 477 - if (oops_in_progress) 478 - return; 479 492 480 493 /* 481 494 * For each CRTC in this fb, turn the connectors on/off. ··· 502 531 */ 503 532 int drm_fb_helper_blank(int blank, struct fb_info *info) 504 533 { 534 + if (oops_in_progress) 535 + return -EBUSY; 536 + 505 537 switch (blank) { 506 538 /* Display: On; HSync: On, VSync: On */ 507 539 case FB_BLANK_UNBLANK: ··· 628 654 } 629 655 EXPORT_SYMBOL(drm_fb_helper_init); 630 656 657 + /** 658 + * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members 659 + * @fb_helper: driver-allocated fbdev helper 660 + * 661 + * A helper to alloc fb_info and the members cmap and apertures. Called 662 + * by the driver within the fb_probe fb_helper callback function. 663 + * 664 + * RETURNS: 665 + * fb_info pointer if things went okay, pointer containing error code 666 + * otherwise 667 + */ 668 + struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) 669 + { 670 + struct device *dev = fb_helper->dev->dev; 671 + struct fb_info *info; 672 + int ret; 673 + 674 + info = framebuffer_alloc(0, dev); 675 + if (!info) 676 + return ERR_PTR(-ENOMEM); 677 + 678 + ret = fb_alloc_cmap(&info->cmap, 256, 0); 679 + if (ret) 680 + goto err_release; 681 + 682 + info->apertures = alloc_apertures(1); 683 + if (!info->apertures) { 684 + ret = -ENOMEM; 685 + goto err_free_cmap; 686 + } 687 + 688 + fb_helper->fbdev = info; 689 + 690 + return info; 691 + 692 + err_free_cmap: 693 + fb_dealloc_cmap(&info->cmap); 694 + err_release: 695 + framebuffer_release(info); 696 + return ERR_PTR(ret); 697 + } 698 + EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); 699 + 700 + /** 701 + * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device 702 + * @fb_helper: driver-allocated fbdev helper 703 + * 704 + * A wrapper around unregister_framebuffer, to release the fb_info 705 + * framebuffer device 706 + */ 707 + void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) 708 + { 709 + if (fb_helper && fb_helper->fbdev) 710 + unregister_framebuffer(fb_helper->fbdev); 711 + } 712 + EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); 713 + 714 + /** 715 + * drm_fb_helper_release_fbi - dealloc fb_info and its members 716 + * @fb_helper: driver-allocated fbdev helper 717 + * 718 + * A helper to free memory taken by fb_info and the members cmap and 719 + * apertures 720 + */ 721 + void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper) 722 + { 723 + if (fb_helper) { 724 + struct fb_info *info = fb_helper->fbdev; 725 + 726 + if (info) { 727 + if (info->cmap.len) 728 + fb_dealloc_cmap(&info->cmap); 729 + framebuffer_release(info); 730 + } 731 + 732 + fb_helper->fbdev = NULL; 733 + } 734 + } 735 + EXPORT_SYMBOL(drm_fb_helper_release_fbi); 736 + 631 737 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) 632 738 { 633 739 if (!list_empty(&fb_helper->kernel_fb_list)) { ··· 721 667 722 668 } 723 669 EXPORT_SYMBOL(drm_fb_helper_fini); 670 + 671 + /** 672 + * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer 673 + * @fb_helper: driver-allocated fbdev helper 674 + * 675 + * A wrapper around unlink_framebuffer implemented by fbdev core 676 + */ 677 + void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) 678 + { 679 + if (fb_helper && fb_helper->fbdev) 680 + unlink_framebuffer(fb_helper->fbdev); 681 + } 682 + EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); 683 + 684 + /** 685 + * drm_fb_helper_sys_read - wrapper around fb_sys_read 686 + * @info: fb_info struct pointer 687 + * @buf: userspace buffer to read from framebuffer memory 688 + * @count: number of bytes to read from framebuffer memory 689 + * @ppos: read offset within framebuffer memory 690 + * 691 + * A wrapper around fb_sys_read implemented by fbdev core 692 + */ 693 + ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 694 + size_t count, loff_t *ppos) 695 + { 696 + return fb_sys_read(info, buf, count, ppos); 697 + } 698 + EXPORT_SYMBOL(drm_fb_helper_sys_read); 699 + 700 + /** 701 + * drm_fb_helper_sys_write - wrapper around fb_sys_write 702 + * @info: fb_info struct pointer 703 + * @buf: userspace buffer to write to framebuffer memory 704 + * @count: number of bytes to write to framebuffer memory 705 + * @ppos: write offset within framebuffer memory 706 + * 707 + * A wrapper around fb_sys_write implemented by fbdev core 708 + */ 709 + ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 710 + size_t count, loff_t *ppos) 711 + { 712 + return fb_sys_write(info, buf, count, ppos); 713 + } 714 + EXPORT_SYMBOL(drm_fb_helper_sys_write); 715 + 716 + /** 717 + * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect 718 + * @info: fbdev registered by the helper 719 + * @rect: info about rectangle to fill 720 + * 721 + * A wrapper around sys_fillrect implemented by fbdev core 722 + */ 723 + void drm_fb_helper_sys_fillrect(struct fb_info *info, 724 + const struct fb_fillrect *rect) 725 + { 726 + sys_fillrect(info, rect); 727 + } 728 + EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); 729 + 730 + /** 731 + * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea 732 + * @info: fbdev registered by the helper 733 + * @area: info about area to copy 734 + * 735 + * A wrapper around sys_copyarea implemented by fbdev core 736 + */ 737 + void drm_fb_helper_sys_copyarea(struct fb_info *info, 738 + const struct fb_copyarea *area) 739 + { 740 + sys_copyarea(info, area); 741 + } 742 + EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); 743 + 744 + /** 745 + * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit 746 + * @info: fbdev registered by the helper 747 + * @image: info about image to blit 748 + * 749 + * A wrapper around sys_imageblit implemented by fbdev core 750 + */ 751 + void drm_fb_helper_sys_imageblit(struct fb_info *info, 752 + const struct fb_image *image) 753 + { 754 + sys_imageblit(info, image); 755 + } 756 + EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); 757 + 758 + /** 759 + * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect 760 + * @info: fbdev registered by the helper 761 + * @rect: info about rectangle to fill 762 + * 763 + * A wrapper around cfb_imageblit implemented by fbdev core 764 + */ 765 + void drm_fb_helper_cfb_fillrect(struct fb_info *info, 766 + const struct fb_fillrect *rect) 767 + { 768 + cfb_fillrect(info, rect); 769 + } 770 + EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); 771 + 772 + /** 773 + * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea 774 + * @info: fbdev registered by the helper 775 + * @area: info about area to copy 776 + * 777 + * A wrapper around cfb_copyarea implemented by fbdev core 778 + */ 779 + void drm_fb_helper_cfb_copyarea(struct fb_info *info, 780 + const struct fb_copyarea *area) 781 + { 782 + cfb_copyarea(info, area); 783 + } 784 + EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); 785 + 786 + /** 787 + * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit 788 + * @info: fbdev registered by the helper 789 + * @image: info about image to blit 790 + * 791 + * A wrapper around cfb_imageblit implemented by fbdev core 792 + */ 793 + void drm_fb_helper_cfb_imageblit(struct fb_info *info, 794 + const struct fb_image *image) 795 + { 796 + cfb_imageblit(info, image); 797 + } 798 + EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); 799 + 800 + /** 801 + * drm_fb_helper_set_suspend - wrapper around fb_set_suspend 802 + * @fb_helper: driver-allocated fbdev helper 803 + * @state: desired state, zero to resume, non-zero to suspend 804 + * 805 + * A wrapper around fb_set_suspend implemented by fbdev core 806 + */ 807 + void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state) 808 + { 809 + if (fb_helper && fb_helper->fbdev) 810 + fb_set_suspend(fb_helper->fbdev, state); 811 + } 812 + EXPORT_SYMBOL(drm_fb_helper_set_suspend); 724 813 725 814 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 726 815 u16 blue, u16 regno, struct fb_info *info) ··· 952 755 int i, j, rc = 0; 953 756 int start; 954 757 955 - if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { 758 + if (oops_in_progress) 956 759 return -EBUSY; 957 - } 760 + 761 + drm_modeset_lock_all(dev); 958 762 if (!drm_fb_helper_is_bound(fb_helper)) { 959 763 drm_modeset_unlock_all(dev); 960 764 return -EBUSY; ··· 1104 906 struct drm_fb_helper *fb_helper = info->par; 1105 907 struct fb_var_screeninfo *var = &info->var; 1106 908 909 + if (oops_in_progress) 910 + return -EBUSY; 911 + 1107 912 if (var->pixclock != 0) { 1108 913 DRM_ERROR("PIXEL CLOCK SET\n"); 1109 914 return -EINVAL; ··· 1132 931 int ret = 0; 1133 932 int i; 1134 933 1135 - if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { 934 + if (oops_in_progress) 1136 935 return -EBUSY; 1137 - } 936 + 937 + drm_modeset_lock_all(dev); 1138 938 if (!drm_fb_helper_is_bound(fb_helper)) { 1139 939 drm_modeset_unlock_all(dev); 1140 940 return -EBUSY;
+1 -1
drivers/gpu/drm/drm_gem.c
··· 766 766 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 767 767 struct drm_device *dev = obj->dev; 768 768 769 - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 769 + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 770 770 771 771 if (dev->driver->gem_free_object != NULL) 772 772 dev->driver->gem_free_object(obj);
+1 -6
drivers/gpu/drm/drm_gem_cma_helper.c
··· 289 289 { 290 290 struct drm_gem_object *gem_obj; 291 291 292 - mutex_lock(&drm->struct_mutex); 293 - 294 292 gem_obj = drm_gem_object_lookup(drm, file_priv, handle); 295 293 if (!gem_obj) { 296 294 dev_err(drm->dev, "failed to lookup GEM object\n"); 297 - mutex_unlock(&drm->struct_mutex); 298 295 return -EINVAL; 299 296 } 300 297 301 298 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 302 299 303 - drm_gem_object_unreference(gem_obj); 304 - 305 - mutex_unlock(&drm->struct_mutex); 300 + drm_gem_object_unreference_unlocked(gem_obj); 306 301 307 302 return 0; 308 303 }
+171 -159
drivers/gpu/drm/drm_irq.c
··· 43 43 #include <linux/export.h> 44 44 45 45 /* Access macro for slots in vblank timestamp ringbuffer. */ 46 - #define vblanktimestamp(dev, crtc, count) \ 47 - ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE]) 46 + #define vblanktimestamp(dev, pipe, count) \ 47 + ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE]) 48 48 49 49 /* Retry timestamp calculation up to 3 times to satisfy 50 50 * drm_timestamp_precision before giving up. ··· 57 57 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 58 58 59 59 static bool 60 - drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 60 + drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, 61 61 struct timeval *tvblank, unsigned flags); 62 62 63 63 static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ ··· 107 107 /** 108 108 * drm_update_vblank_count - update the master vblank counter 109 109 * @dev: DRM device 110 - * @crtc: counter to update 110 + * @pipe: counter to update 111 111 * 112 112 * Call back into the driver to update the appropriate vblank counter 113 113 * (specified by @crtc). Deal with wraparound, if it occurred, and ··· 120 120 * Note: caller must hold dev->vbl_lock since this reads & writes 121 121 * device vblank fields. 122 122 */ 123 - static void drm_update_vblank_count(struct drm_device *dev, int crtc) 123 + static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe) 124 124 { 125 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 125 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 126 126 u32 cur_vblank, diff; 127 127 bool rc; 128 128 struct timeval t_vblank; ··· 140 140 * corresponding vblank timestamp. 141 141 */ 142 142 do { 143 - cur_vblank = dev->driver->get_vblank_counter(dev, crtc); 144 - rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); 145 - } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); 143 + cur_vblank = dev->driver->get_vblank_counter(dev, pipe); 144 + rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0); 145 + } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe)); 146 146 147 147 /* Deal with counter wrap */ 148 148 diff = cur_vblank - vblank->last; 149 149 if (cur_vblank < vblank->last) { 150 150 diff += dev->max_vblank_count + 1; 151 151 152 - DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 153 - crtc, vblank->last, cur_vblank, diff); 152 + DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 153 + pipe, vblank->last, cur_vblank, diff); 154 154 } 155 155 156 - DRM_DEBUG("updating vblank count on crtc %d, missed %d\n", 157 - crtc, diff); 156 + DRM_DEBUG("updating vblank count on crtc %u, missed %d\n", 157 + pipe, diff); 158 158 159 159 if (diff == 0) 160 160 return; ··· 167 167 if (!rc) 168 168 t_vblank = (struct timeval) {0, 0}; 169 169 170 - store_vblank(dev, crtc, diff, &t_vblank); 170 + store_vblank(dev, pipe, diff, &t_vblank); 171 171 } 172 172 173 173 /* ··· 176 176 * are preserved, even if there are any spurious vblank irq's after 177 177 * disable. 178 178 */ 179 - static void vblank_disable_and_save(struct drm_device *dev, int crtc) 179 + static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe) 180 180 { 181 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 181 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 182 182 unsigned long irqflags; 183 183 u32 vblcount; 184 184 s64 diff_ns; ··· 206 206 * vblank interrupt is disabled. 207 207 */ 208 208 if (!vblank->enabled && 209 - drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) { 210 - drm_update_vblank_count(dev, crtc); 209 + drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) { 210 + drm_update_vblank_count(dev, pipe); 211 211 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 212 212 return; 213 213 } ··· 218 218 * hardware potentially runtime suspended. 219 219 */ 220 220 if (vblank->enabled) { 221 - dev->driver->disable_vblank(dev, crtc); 221 + dev->driver->disable_vblank(dev, pipe); 222 222 vblank->enabled = false; 223 223 } 224 224 ··· 235 235 * delayed gpu counter increment. 236 236 */ 237 237 do { 238 - vblank->last = dev->driver->get_vblank_counter(dev, crtc); 239 - vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 240 - } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); 238 + vblank->last = dev->driver->get_vblank_counter(dev, pipe); 239 + vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0); 240 + } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc); 241 241 242 242 if (!count) 243 243 vblrc = 0; ··· 247 247 */ 248 248 vblcount = vblank->count; 249 249 diff_ns = timeval_to_ns(&tvblank) - 250 - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 250 + timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount)); 251 251 252 252 /* If there is at least 1 msec difference between the last stored 253 253 * timestamp and tvblank, then we are currently executing our ··· 262 262 * hope for the best. 263 263 */ 264 264 if (vblrc && (abs64(diff_ns) > 1000000)) 265 - store_vblank(dev, crtc, 1, &tvblank); 265 + store_vblank(dev, pipe, 1, &tvblank); 266 266 267 267 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 268 268 } ··· 271 271 { 272 272 struct drm_vblank_crtc *vblank = (void *)arg; 273 273 struct drm_device *dev = vblank->dev; 274 + unsigned int pipe = vblank->pipe; 274 275 unsigned long irqflags; 275 - int crtc = vblank->crtc; 276 276 277 277 if (!dev->vblank_disable_allowed) 278 278 return; 279 279 280 280 spin_lock_irqsave(&dev->vbl_lock, irqflags); 281 281 if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) { 282 - DRM_DEBUG("disabling vblank on crtc %d\n", crtc); 283 - vblank_disable_and_save(dev, crtc); 282 + DRM_DEBUG("disabling vblank on crtc %u\n", pipe); 283 + vblank_disable_and_save(dev, pipe); 284 284 } 285 285 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 286 286 } ··· 293 293 */ 294 294 void drm_vblank_cleanup(struct drm_device *dev) 295 295 { 296 - int crtc; 296 + unsigned int pipe; 297 297 298 298 /* Bail if the driver didn't call drm_vblank_init() */ 299 299 if (dev->num_crtcs == 0) 300 300 return; 301 301 302 - for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 303 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 302 + for (pipe = 0; pipe < dev->num_crtcs; pipe++) { 303 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 304 304 305 305 WARN_ON(vblank->enabled && 306 306 drm_core_check_feature(dev, DRIVER_MODESET)); ··· 316 316 317 317 /** 318 318 * drm_vblank_init - initialize vblank support 319 - * @dev: drm_device 320 - * @num_crtcs: number of crtcs supported by @dev 319 + * @dev: DRM device 320 + * @num_crtcs: number of CRTCs supported by @dev 321 321 * 322 322 * This function initializes vblank support for @num_crtcs display pipelines. 323 323 * 324 324 * Returns: 325 325 * Zero on success or a negative error code on failure. 326 326 */ 327 - int drm_vblank_init(struct drm_device *dev, int num_crtcs) 327 + int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) 328 328 { 329 - int i, ret = -ENOMEM; 329 + int ret = -ENOMEM; 330 + unsigned int i; 330 331 331 332 spin_lock_init(&dev->vbl_lock); 332 333 spin_lock_init(&dev->vblank_time_lock); ··· 342 341 struct drm_vblank_crtc *vblank = &dev->vblank[i]; 343 342 344 343 vblank->dev = dev; 345 - vblank->crtc = i; 344 + vblank->pipe = i; 346 345 init_waitqueue_head(&vblank->queue); 347 346 setup_timer(&vblank->disable_timer, vblank_disable_fn, 348 347 (unsigned long)vblank); ··· 625 624 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 626 625 framedur_ns /= 2; 627 626 } else 628 - DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", 627 + DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n", 629 628 crtc->base.id); 630 629 631 630 crtc->pixeldur_ns = pixeldur_ns; 632 631 crtc->linedur_ns = linedur_ns; 633 632 crtc->framedur_ns = framedur_ns; 634 633 635 - DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", 634 + DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", 636 635 crtc->base.id, mode->crtc_htotal, 637 636 mode->crtc_vtotal, mode->crtc_vdisplay); 638 - DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", 637 + DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n", 639 638 crtc->base.id, dotclock, framedur_ns, 640 639 linedur_ns, pixeldur_ns); 641 640 } ··· 644 643 /** 645 644 * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper 646 645 * @dev: DRM device 647 - * @crtc: Which CRTC's vblank timestamp to retrieve 646 + * @pipe: index of CRTC whose vblank timestamp to retrieve 648 647 * @max_error: Desired maximum allowable error in timestamps (nanosecs) 649 648 * On return contains true maximum error of timestamp 650 649 * @vblank_time: Pointer to struct timeval which should receive the timestamp ··· 687 686 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. 688 687 * 689 688 */ 690 - int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, 689 + int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 690 + unsigned int pipe, 691 691 int *max_error, 692 692 struct timeval *vblank_time, 693 693 unsigned flags, ··· 702 700 int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; 703 701 bool invbl; 704 702 705 - if (crtc < 0 || crtc >= dev->num_crtcs) { 706 - DRM_ERROR("Invalid crtc %d\n", crtc); 703 + if (pipe >= dev->num_crtcs) { 704 + DRM_ERROR("Invalid crtc %u\n", pipe); 707 705 return -EINVAL; 708 706 } 709 707 ··· 722 720 * Happens during initial modesetting of a crtc. 723 721 */ 724 722 if (framedur_ns == 0) { 725 - DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); 723 + DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe); 726 724 return -EAGAIN; 727 725 } 728 726 ··· 738 736 * Get vertical and horizontal scanout position vpos, hpos, 739 737 * and bounding timestamps stime, etime, pre/post query. 740 738 */ 741 - vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, 739 + vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos, 742 740 &hpos, &stime, &etime); 743 741 744 742 /* Return as no-op if scanout query unsupported or failed. */ 745 743 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 746 - DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 747 - crtc, vbl_status); 744 + DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n", 745 + pipe, vbl_status); 748 746 return -EIO; 749 747 } 750 748 ··· 758 756 759 757 /* Noisy system timing? */ 760 758 if (i == DRM_TIMESTAMP_MAXRETRIES) { 761 - DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", 762 - crtc, duration_ns/1000, *max_error/1000, i); 759 + DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n", 760 + pipe, duration_ns/1000, *max_error/1000, i); 763 761 } 764 762 765 763 /* Return upper bound of timestamp precision error. */ ··· 792 790 etime = ktime_sub_ns(etime, delta_ns); 793 791 *vblank_time = ktime_to_timeval(etime); 794 792 795 - DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 796 - crtc, (int)vbl_status, hpos, vpos, 793 + DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 794 + pipe, (int)vbl_status, hpos, vpos, 797 795 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, 798 796 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 799 797 duration_ns/1000, i); ··· 818 816 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 819 817 * vblank interval 820 818 * @dev: DRM device 821 - * @crtc: which CRTC's vblank timestamp to retrieve 819 + * @pipe: index of CRTC whose vblank timestamp to retrieve 822 820 * @tvblank: Pointer to target struct timeval which should receive the timestamp 823 821 * @flags: Flags to pass to driver: 824 822 * 0 = Default, ··· 835 833 * True if timestamp is considered to be very precise, false otherwise. 836 834 */ 837 835 static bool 838 - drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 836 + drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, 839 837 struct timeval *tvblank, unsigned flags) 840 838 { 841 839 int ret; ··· 845 843 846 844 /* Query driver if possible and precision timestamping enabled. */ 847 845 if (dev->driver->get_vblank_timestamp && (max_error > 0)) { 848 - ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, 846 + ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error, 849 847 tvblank, flags); 850 848 if (ret > 0) 851 849 return true; ··· 862 860 /** 863 861 * drm_vblank_count - retrieve "cooked" vblank counter value 864 862 * @dev: DRM device 865 - * @crtc: which counter to retrieve 863 + * @pipe: index of CRTC for which to retrieve the counter 866 864 * 867 865 * Fetches the "cooked" vblank count value that represents the number of 868 866 * vblank events since the system was booted, including lost events due to ··· 873 871 * Returns: 874 872 * The software vblank counter. 875 873 */ 876 - u32 drm_vblank_count(struct drm_device *dev, int crtc) 874 + u32 drm_vblank_count(struct drm_device *dev, int pipe) 877 875 { 878 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 876 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 879 877 880 - if (WARN_ON(crtc >= dev->num_crtcs)) 878 + if (WARN_ON(pipe >= dev->num_crtcs)) 881 879 return 0; 880 + 882 881 return vblank->count; 883 882 } 884 883 EXPORT_SYMBOL(drm_vblank_count); ··· 904 901 EXPORT_SYMBOL(drm_crtc_vblank_count); 905 902 906 903 /** 907 - * drm_vblank_count_and_time - retrieve "cooked" vblank counter value 908 - * and the system timestamp corresponding to that vblank counter value. 909 - * 904 + * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the 905 + * system timestamp corresponding to that vblank counter value. 910 906 * @dev: DRM device 911 - * @crtc: which counter to retrieve 907 + * @pipe: index of CRTC whose counter to retrieve 912 908 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. 913 909 * 914 910 * Fetches the "cooked" vblank count value that represents the number of ··· 915 913 * modesetting activity. Returns corresponding system timestamp of the time 916 914 * of the vblank interval that corresponds to the current vblank counter value. 917 915 */ 918 - u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 916 + u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 919 917 struct timeval *vblanktime) 920 918 { 921 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 919 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 922 920 u32 cur_vblank; 923 921 924 - if (WARN_ON(crtc >= dev->num_crtcs)) 922 + if (WARN_ON(pipe >= dev->num_crtcs)) 925 923 return 0; 926 924 927 925 /* ··· 932 930 do { 933 931 cur_vblank = vblank->count; 934 932 smp_rmb(); 935 - *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 933 + *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); 936 934 smp_rmb(); 937 935 } while (cur_vblank != vblank->count); 938 936 ··· 959 957 /** 960 958 * drm_send_vblank_event - helper to send vblank event after pageflip 961 959 * @dev: DRM device 962 - * @crtc: CRTC in question 960 + * @pipe: CRTC index 963 961 * @e: the event to send 964 962 * 965 963 * Updates sequence # and timestamp on event, and sends it to userspace. ··· 967 965 * 968 966 * This is the legacy version of drm_crtc_send_vblank_event(). 969 967 */ 970 - void drm_send_vblank_event(struct drm_device *dev, int crtc, 971 - struct drm_pending_vblank_event *e) 968 + void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, 969 + struct drm_pending_vblank_event *e) 972 970 { 973 971 struct timeval now; 974 972 unsigned int seq; 975 973 976 - if (crtc >= 0) { 977 - seq = drm_vblank_count_and_time(dev, crtc, &now); 974 + if (dev->num_crtcs > 0) { 975 + seq = drm_vblank_count_and_time(dev, pipe, &now); 978 976 } else { 979 977 seq = 0; 980 978 981 979 now = get_drm_timestamp(); 982 980 } 983 - e->pipe = crtc; 981 + e->pipe = pipe; 984 982 send_vblank_event(dev, e, seq, &now); 985 983 } 986 984 EXPORT_SYMBOL(drm_send_vblank_event); ··· 1005 1003 /** 1006 1004 * drm_vblank_enable - enable the vblank interrupt on a CRTC 1007 1005 * @dev: DRM device 1008 - * @crtc: CRTC in question 1006 + * @pipe: CRTC index 1007 + * 1008 + * Returns: 1009 + * Zero on success or a negative error code on failure. 1009 1010 */ 1010 - static int drm_vblank_enable(struct drm_device *dev, int crtc) 1011 + static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) 1011 1012 { 1012 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1013 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1013 1014 int ret = 0; 1014 1015 1015 1016 assert_spin_locked(&dev->vbl_lock); ··· 1027 1022 * timestamps. Filtercode in drm_handle_vblank() will 1028 1023 * prevent double-accounting of same vblank interval. 1029 1024 */ 1030 - ret = dev->driver->enable_vblank(dev, crtc); 1031 - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 1025 + ret = dev->driver->enable_vblank(dev, pipe); 1026 + DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret); 1032 1027 if (ret) 1033 1028 atomic_dec(&vblank->refcount); 1034 1029 else { 1035 1030 vblank->enabled = true; 1036 - drm_update_vblank_count(dev, crtc); 1031 + drm_update_vblank_count(dev, pipe); 1037 1032 } 1038 1033 } 1039 1034 ··· 1045 1040 /** 1046 1041 * drm_vblank_get - get a reference count on vblank events 1047 1042 * @dev: DRM device 1048 - * @crtc: which CRTC to own 1043 + * @pipe: index of CRTC to own 1049 1044 * 1050 1045 * Acquire a reference count on vblank events to avoid having them disabled 1051 1046 * while in use. ··· 1053 1048 * This is the legacy version of drm_crtc_vblank_get(). 1054 1049 * 1055 1050 * Returns: 1056 - * Zero on success, nonzero on failure. 1051 + * Zero on success or a negative error code on failure. 1057 1052 */ 1058 - int drm_vblank_get(struct drm_device *dev, int crtc) 1053 + int drm_vblank_get(struct drm_device *dev, unsigned int pipe) 1059 1054 { 1060 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1055 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1061 1056 unsigned long irqflags; 1062 1057 int ret = 0; 1063 1058 1064 1059 if (!dev->num_crtcs) 1065 1060 return -EINVAL; 1066 1061 1067 - if (WARN_ON(crtc >= dev->num_crtcs)) 1062 + if (WARN_ON(pipe >= dev->num_crtcs)) 1068 1063 return -EINVAL; 1069 1064 1070 1065 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1071 1066 /* Going from 0->1 means we have to enable interrupts again */ 1072 1067 if (atomic_add_return(1, &vblank->refcount) == 1) { 1073 - ret = drm_vblank_enable(dev, crtc); 1068 + ret = drm_vblank_enable(dev, pipe); 1074 1069 } else { 1075 1070 if (!vblank->enabled) { 1076 1071 atomic_dec(&vblank->refcount); ··· 1093 1088 * This is the native kms version of drm_vblank_get(). 1094 1089 * 1095 1090 * Returns: 1096 - * Zero on success, nonzero on failure. 1091 + * Zero on success or a negative error code on failure. 1097 1092 */ 1098 1093 int drm_crtc_vblank_get(struct drm_crtc *crtc) 1099 1094 { ··· 1102 1097 EXPORT_SYMBOL(drm_crtc_vblank_get); 1103 1098 1104 1099 /** 1105 - * drm_vblank_put - give up ownership of vblank events 1100 + * drm_vblank_put - release ownership of vblank events 1106 1101 * @dev: DRM device 1107 - * @crtc: which counter to give up 1102 + * @pipe: index of CRTC to release 1108 1103 * 1109 1104 * Release ownership of a given vblank counter, turning off interrupts 1110 1105 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 1111 1106 * 1112 1107 * This is the legacy version of drm_crtc_vblank_put(). 1113 1108 */ 1114 - void drm_vblank_put(struct drm_device *dev, int crtc) 1109 + void drm_vblank_put(struct drm_device *dev, unsigned int pipe) 1115 1110 { 1116 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1111 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1117 1112 1118 - if (WARN_ON(atomic_read(&vblank->refcount) == 0)) 1113 + if (WARN_ON(pipe >= dev->num_crtcs)) 1119 1114 return; 1120 1115 1121 - if (WARN_ON(crtc >= dev->num_crtcs)) 1116 + if (WARN_ON(atomic_read(&vblank->refcount) == 0)) 1122 1117 return; 1123 1118 1124 1119 /* Last user schedules interrupt disable */ ··· 1152 1147 /** 1153 1148 * drm_wait_one_vblank - wait for one vblank 1154 1149 * @dev: DRM device 1155 - * @crtc: crtc index 1150 + * @pipe: CRTC index 1156 1151 * 1157 1152 * This waits for one vblank to pass on @crtc, using the irq driver interfaces. 1158 1153 * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. 1159 1154 * due to lack of driver support or because the crtc is off. 1160 1155 */ 1161 - void drm_wait_one_vblank(struct drm_device *dev, int crtc) 1156 + void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) 1162 1157 { 1158 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1163 1159 int ret; 1164 1160 u32 last; 1165 1161 1166 - ret = drm_vblank_get(dev, crtc); 1167 - if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret)) 1162 + if (WARN_ON(pipe >= dev->num_crtcs)) 1168 1163 return; 1169 1164 1170 - last = drm_vblank_count(dev, crtc); 1165 + ret = drm_vblank_get(dev, pipe); 1166 + if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret)) 1167 + return; 1171 1168 1172 - ret = wait_event_timeout(dev->vblank[crtc].queue, 1173 - last != drm_vblank_count(dev, crtc), 1169 + last = drm_vblank_count(dev, pipe); 1170 + 1171 + ret = wait_event_timeout(vblank->queue, 1172 + last != drm_vblank_count(dev, pipe), 1174 1173 msecs_to_jiffies(100)); 1175 1174 1176 - WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc); 1175 + WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe); 1177 1176 1178 - drm_vblank_put(dev, crtc); 1177 + drm_vblank_put(dev, pipe); 1179 1178 } 1180 1179 EXPORT_SYMBOL(drm_wait_one_vblank); 1181 1180 ··· 1200 1191 /** 1201 1192 * drm_vblank_off - disable vblank events on a CRTC 1202 1193 * @dev: DRM device 1203 - * @crtc: CRTC in question 1194 + * @pipe: CRTC index 1204 1195 * 1205 1196 * Drivers can use this function to shut down the vblank interrupt handling when 1206 1197 * disabling a crtc. This function ensures that the latest vblank frame count is ··· 1211 1202 * 1212 1203 * This is the legacy version of drm_crtc_vblank_off(). 1213 1204 */ 1214 - void drm_vblank_off(struct drm_device *dev, int crtc) 1205 + void drm_vblank_off(struct drm_device *dev, unsigned int pipe) 1215 1206 { 1216 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1207 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1217 1208 struct drm_pending_vblank_event *e, *t; 1218 1209 struct timeval now; 1219 1210 unsigned long irqflags; 1220 1211 unsigned int seq; 1221 1212 1222 - if (WARN_ON(crtc >= dev->num_crtcs)) 1213 + if (WARN_ON(pipe >= dev->num_crtcs)) 1223 1214 return; 1224 1215 1225 1216 spin_lock_irqsave(&dev->event_lock, irqflags); 1226 1217 1227 1218 spin_lock(&dev->vbl_lock); 1228 - vblank_disable_and_save(dev, crtc); 1219 + vblank_disable_and_save(dev, pipe); 1229 1220 wake_up(&vblank->queue); 1230 1221 1231 1222 /* ··· 1239 1230 spin_unlock(&dev->vbl_lock); 1240 1231 1241 1232 /* Send any queued vblank events, lest the natives grow disquiet */ 1242 - seq = drm_vblank_count_and_time(dev, crtc, &now); 1233 + seq = drm_vblank_count_and_time(dev, pipe, &now); 1243 1234 1244 1235 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1245 - if (e->pipe != crtc) 1236 + if (e->pipe != pipe) 1246 1237 continue; 1247 1238 DRM_DEBUG("Sending premature vblank event on disable: \ 1248 1239 wanted %d, current %d\n", 1249 1240 e->event.sequence, seq); 1250 1241 list_del(&e->base.link); 1251 - drm_vblank_put(dev, e->pipe); 1242 + drm_vblank_put(dev, pipe); 1252 1243 send_vblank_event(dev, e, seq, &now); 1253 1244 } 1254 1245 spin_unlock_irqrestore(&dev->event_lock, irqflags); ··· 1309 1300 /** 1310 1301 * drm_vblank_on - enable vblank events on a CRTC 1311 1302 * @dev: DRM device 1312 - * @crtc: CRTC in question 1303 + * @pipe: CRTC index 1313 1304 * 1314 1305 * This functions restores the vblank interrupt state captured with 1315 1306 * drm_vblank_off() again. Note that calls to drm_vblank_on() and ··· 1318 1309 * 1319 1310 * This is the legacy version of drm_crtc_vblank_on(). 1320 1311 */ 1321 - void drm_vblank_on(struct drm_device *dev, int crtc) 1312 + void drm_vblank_on(struct drm_device *dev, unsigned int pipe) 1322 1313 { 1323 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1314 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1324 1315 unsigned long irqflags; 1325 1316 1326 - if (WARN_ON(crtc >= dev->num_crtcs)) 1317 + if (WARN_ON(pipe >= dev->num_crtcs)) 1327 1318 return; 1328 1319 1329 1320 spin_lock_irqsave(&dev->vbl_lock, irqflags); ··· 1341 1332 * vblank counter value before and after a modeset 1342 1333 */ 1343 1334 vblank->last = 1344 - (dev->driver->get_vblank_counter(dev, crtc) - 1) & 1335 + (dev->driver->get_vblank_counter(dev, pipe) - 1) & 1345 1336 dev->max_vblank_count; 1346 1337 /* 1347 1338 * re-enable interrupts if there are users left, or the ··· 1349 1340 */ 1350 1341 if (atomic_read(&vblank->refcount) != 0 || 1351 1342 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0)) 1352 - WARN_ON(drm_vblank_enable(dev, crtc)); 1343 + WARN_ON(drm_vblank_enable(dev, pipe)); 1353 1344 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1354 1345 } 1355 1346 EXPORT_SYMBOL(drm_vblank_on); ··· 1374 1365 /** 1375 1366 * drm_vblank_pre_modeset - account for vblanks across mode sets 1376 1367 * @dev: DRM device 1377 - * @crtc: CRTC in question 1368 + * @pipe: CRTC index 1378 1369 * 1379 1370 * Account for vblank events across mode setting events, which will likely 1380 1371 * reset the hardware frame counter. ··· 1394 1385 * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc 1395 1386 * again. 1396 1387 */ 1397 - void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 1388 + void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe) 1398 1389 { 1399 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1390 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1400 1391 1401 1392 /* vblank is not initialized (IRQ not installed ?), or has been freed */ 1402 1393 if (!dev->num_crtcs) 1403 1394 return; 1404 1395 1405 - if (WARN_ON(crtc >= dev->num_crtcs)) 1396 + if (WARN_ON(pipe >= dev->num_crtcs)) 1406 1397 return; 1407 1398 1408 1399 /* ··· 1414 1405 */ 1415 1406 if (!vblank->inmodeset) { 1416 1407 vblank->inmodeset = 0x1; 1417 - if (drm_vblank_get(dev, crtc) == 0) 1408 + if (drm_vblank_get(dev, pipe) == 0) 1418 1409 vblank->inmodeset |= 0x2; 1419 1410 } 1420 1411 } ··· 1423 1414 /** 1424 1415 * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes 1425 1416 * @dev: DRM device 1426 - * @crtc: CRTC in question 1417 + * @pipe: CRTC index 1427 1418 * 1428 1419 * This function again drops the temporary vblank reference acquired in 1429 1420 * drm_vblank_pre_modeset. 1430 1421 */ 1431 - void drm_vblank_post_modeset(struct drm_device *dev, int crtc) 1422 + void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe) 1432 1423 { 1433 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1424 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1434 1425 unsigned long irqflags; 1435 1426 1436 1427 /* vblank is not initialized (IRQ not installed ?), or has been freed */ 1437 1428 if (!dev->num_crtcs) 1429 + return; 1430 + 1431 + if (WARN_ON(pipe >= dev->num_crtcs)) 1438 1432 return; 1439 1433 1440 1434 if (vblank->inmodeset) { ··· 1446 1434 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1447 1435 1448 1436 if (vblank->inmodeset & 0x2) 1449 - drm_vblank_put(dev, crtc); 1437 + drm_vblank_put(dev, pipe); 1450 1438 1451 1439 vblank->inmodeset = 0; 1452 1440 } ··· 1468 1456 struct drm_file *file_priv) 1469 1457 { 1470 1458 struct drm_modeset_ctl *modeset = data; 1471 - unsigned int crtc; 1459 + unsigned int pipe; 1472 1460 1473 1461 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1474 1462 if (!dev->num_crtcs) ··· 1478 1466 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1479 1467 return 0; 1480 1468 1481 - crtc = modeset->crtc; 1482 - if (crtc >= dev->num_crtcs) 1469 + pipe = modeset->crtc; 1470 + if (pipe >= dev->num_crtcs) 1483 1471 return -EINVAL; 1484 1472 1485 1473 switch (modeset->cmd) { 1486 1474 case _DRM_PRE_MODESET: 1487 - drm_vblank_pre_modeset(dev, crtc); 1475 + drm_vblank_pre_modeset(dev, pipe); 1488 1476 break; 1489 1477 case _DRM_POST_MODESET: 1490 - drm_vblank_post_modeset(dev, crtc); 1478 + drm_vblank_post_modeset(dev, pipe); 1491 1479 break; 1492 1480 default: 1493 1481 return -EINVAL; ··· 1496 1484 return 0; 1497 1485 } 1498 1486 1499 - static int drm_queue_vblank_event(struct drm_device *dev, int pipe, 1487 + static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, 1500 1488 union drm_wait_vblank *vblwait, 1501 1489 struct drm_file *file_priv) 1502 1490 { ··· 1550 1538 vblwait->reply.sequence = vblwait->request.sequence; 1551 1539 } 1552 1540 1553 - DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", 1541 + DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", 1554 1542 vblwait->request.sequence, seq, pipe); 1555 1543 1556 1544 trace_drm_vblank_event_queued(current->pid, pipe, ··· 1599 1587 struct drm_vblank_crtc *vblank; 1600 1588 union drm_wait_vblank *vblwait = data; 1601 1589 int ret; 1602 - unsigned int flags, seq, crtc, high_crtc; 1590 + unsigned int flags, seq, pipe, high_pipe; 1603 1591 1604 1592 if (!dev->irq_enabled) 1605 1593 return -EINVAL; ··· 1618 1606 } 1619 1607 1620 1608 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 1621 - high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); 1622 - if (high_crtc) 1623 - crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; 1609 + high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); 1610 + if (high_pipe) 1611 + pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT; 1624 1612 else 1625 - crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 1626 - if (crtc >= dev->num_crtcs) 1613 + pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 1614 + if (pipe >= dev->num_crtcs) 1627 1615 return -EINVAL; 1628 1616 1629 - vblank = &dev->vblank[crtc]; 1617 + vblank = &dev->vblank[pipe]; 1630 1618 1631 - ret = drm_vblank_get(dev, crtc); 1619 + ret = drm_vblank_get(dev, pipe); 1632 1620 if (ret) { 1633 1621 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); 1634 1622 return ret; 1635 1623 } 1636 - seq = drm_vblank_count(dev, crtc); 1624 + seq = drm_vblank_count(dev, pipe); 1637 1625 1638 1626 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 1639 1627 case _DRM_VBLANK_RELATIVE: ··· 1650 1638 /* must hold on to the vblank ref until the event fires 1651 1639 * drm_vblank_put will be called asynchronously 1652 1640 */ 1653 - return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1641 + return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); 1654 1642 } 1655 1643 1656 1644 if ((flags & _DRM_VBLANK_NEXTONMISS) && ··· 1658 1646 vblwait->request.sequence = seq + 1; 1659 1647 } 1660 1648 1661 - DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1662 - vblwait->request.sequence, crtc); 1649 + DRM_DEBUG("waiting on vblank count %d, crtc %u\n", 1650 + vblwait->request.sequence, pipe); 1663 1651 vblank->last_wait = vblwait->request.sequence; 1664 1652 DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, 1665 - (((drm_vblank_count(dev, crtc) - 1653 + (((drm_vblank_count(dev, pipe) - 1666 1654 vblwait->request.sequence) <= (1 << 23)) || 1667 1655 !vblank->enabled || 1668 1656 !dev->irq_enabled)); ··· 1670 1658 if (ret != -EINTR) { 1671 1659 struct timeval now; 1672 1660 1673 - vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); 1661 + vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now); 1674 1662 vblwait->reply.tval_sec = now.tv_sec; 1675 1663 vblwait->reply.tval_usec = now.tv_usec; 1676 1664 ··· 1681 1669 } 1682 1670 1683 1671 done: 1684 - drm_vblank_put(dev, crtc); 1672 + drm_vblank_put(dev, pipe); 1685 1673 return ret; 1686 1674 } 1687 1675 1688 - static void drm_handle_vblank_events(struct drm_device *dev, int crtc) 1676 + static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) 1689 1677 { 1690 1678 struct drm_pending_vblank_event *e, *t; 1691 1679 struct timeval now; ··· 1693 1681 1694 1682 assert_spin_locked(&dev->event_lock); 1695 1683 1696 - seq = drm_vblank_count_and_time(dev, crtc, &now); 1684 + seq = drm_vblank_count_and_time(dev, pipe, &now); 1697 1685 1698 1686 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1699 - if (e->pipe != crtc) 1687 + if (e->pipe != pipe) 1700 1688 continue; 1701 1689 if ((seq - e->event.sequence) > (1<<23)) 1702 1690 continue; ··· 1705 1693 e->event.sequence, seq); 1706 1694 1707 1695 list_del(&e->base.link); 1708 - drm_vblank_put(dev, e->pipe); 1696 + drm_vblank_put(dev, pipe); 1709 1697 send_vblank_event(dev, e, seq, &now); 1710 1698 } 1711 1699 1712 - trace_drm_vblank_event(crtc, seq); 1700 + trace_drm_vblank_event(pipe, seq); 1713 1701 } 1714 1702 1715 1703 /** 1716 1704 * drm_handle_vblank - handle a vblank event 1717 1705 * @dev: DRM device 1718 - * @crtc: where this event occurred 1706 + * @pipe: index of CRTC where this event occurred 1719 1707 * 1720 1708 * Drivers should call this routine in their vblank interrupt handlers to 1721 1709 * update the vblank counter and send any signals that may be pending. 1722 1710 * 1723 1711 * This is the legacy version of drm_crtc_handle_vblank(). 1724 1712 */ 1725 - bool drm_handle_vblank(struct drm_device *dev, int crtc) 1713 + bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) 1726 1714 { 1727 - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1715 + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1728 1716 u32 vblcount; 1729 1717 s64 diff_ns; 1730 1718 struct timeval tvblank; ··· 1733 1721 if (WARN_ON_ONCE(!dev->num_crtcs)) 1734 1722 return false; 1735 1723 1736 - if (WARN_ON(crtc >= dev->num_crtcs)) 1724 + if (WARN_ON(pipe >= dev->num_crtcs)) 1737 1725 return false; 1738 1726 1739 1727 spin_lock_irqsave(&dev->event_lock, irqflags); ··· 1757 1745 1758 1746 /* Get current timestamp and count. */ 1759 1747 vblcount = vblank->count; 1760 - drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1748 + drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1761 1749 1762 1750 /* Compute time difference to timestamp of last vblank */ 1763 1751 diff_ns = timeval_to_ns(&tvblank) - 1764 - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 1752 + timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount)); 1765 1753 1766 1754 /* Update vblank timestamp and count if at least 1767 1755 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds ··· 1773 1761 * ignore those for accounting. 1774 1762 */ 1775 1763 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) 1776 - store_vblank(dev, crtc, 1, &tvblank); 1764 + store_vblank(dev, pipe, 1, &tvblank); 1777 1765 else 1778 - DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1779 - crtc, (int) diff_ns); 1766 + DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n", 1767 + pipe, (int) diff_ns); 1780 1768 1781 1769 spin_unlock(&dev->vblank_time_lock); 1782 1770 1783 1771 wake_up(&vblank->queue); 1784 - drm_handle_vblank_events(dev, crtc); 1772 + drm_handle_vblank_events(dev, pipe); 1785 1773 1786 1774 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1787 1775
+11 -41
drivers/gpu/drm/drm_modeset_lock.c
··· 55 55 * drm_modeset_acquire_fini(&ctx); 56 56 */ 57 57 58 - 59 58 /** 60 - * __drm_modeset_lock_all - internal helper to grab all modeset locks 61 - * @dev: DRM device 62 - * @trylock: trylock mode for atomic contexts 59 + * drm_modeset_lock_all - take all modeset locks 60 + * @dev: drm device 63 61 * 64 - * This is a special version of drm_modeset_lock_all() which can also be used in 65 - * atomic contexts. Then @trylock must be set to true. 66 - * 67 - * Returns: 68 - * 0 on success or negative error code on failure. 62 + * This function takes all modeset locks, suitable where a more fine-grained 63 + * scheme isn't (yet) implemented. Locks must be dropped with 64 + * drm_modeset_unlock_all. 69 65 */ 70 - int __drm_modeset_lock_all(struct drm_device *dev, 71 - bool trylock) 66 + void drm_modeset_lock_all(struct drm_device *dev) 72 67 { 73 68 struct drm_mode_config *config = &dev->mode_config; 74 69 struct drm_modeset_acquire_ctx *ctx; 75 70 int ret; 76 71 77 - ctx = kzalloc(sizeof(*ctx), 78 - trylock ? GFP_ATOMIC : GFP_KERNEL); 79 - if (!ctx) 80 - return -ENOMEM; 72 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 73 + if (WARN_ON(!ctx)) 74 + return; 81 75 82 - if (trylock) { 83 - if (!mutex_trylock(&config->mutex)) { 84 - ret = -EBUSY; 85 - goto out; 86 - } 87 - } else { 88 - mutex_lock(&config->mutex); 89 - } 76 + mutex_lock(&config->mutex); 90 77 91 78 drm_modeset_acquire_init(ctx, 0); 92 - ctx->trylock_only = trylock; 93 79 94 80 retry: 95 81 ret = drm_modeset_lock(&config->connection_mutex, ctx); ··· 94 108 95 109 drm_warn_on_modeset_not_all_locked(dev); 96 110 97 - return 0; 111 + return; 98 112 99 113 fail: 100 114 if (ret == -EDEADLK) { ··· 102 116 goto retry; 103 117 } 104 118 105 - out: 106 119 kfree(ctx); 107 - return ret; 108 - } 109 - EXPORT_SYMBOL(__drm_modeset_lock_all); 110 - 111 - /** 112 - * drm_modeset_lock_all - take all modeset locks 113 - * @dev: drm device 114 - * 115 - * This function takes all modeset locks, suitable where a more fine-grained 116 - * scheme isn't (yet) implemented. Locks must be dropped with 117 - * drm_modeset_unlock_all. 118 - */ 119 - void drm_modeset_lock_all(struct drm_device *dev) 120 - { 121 - WARN_ON(__drm_modeset_lock_all(dev, false) != 0); 122 120 } 123 121 EXPORT_SYMBOL(drm_modeset_lock_all); 124 122
+12 -35
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 65 65 static struct fb_ops exynos_drm_fb_ops = { 66 66 .owner = THIS_MODULE, 67 67 .fb_mmap = exynos_drm_fb_mmap, 68 - .fb_fillrect = cfb_fillrect, 69 - .fb_copyarea = cfb_copyarea, 70 - .fb_imageblit = cfb_imageblit, 68 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 69 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 70 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 71 71 .fb_check_var = drm_fb_helper_check_var, 72 72 .fb_set_par = drm_fb_helper_set_par, 73 73 .fb_blank = drm_fb_helper_blank, ··· 142 142 143 143 mutex_lock(&dev->struct_mutex); 144 144 145 - fbi = framebuffer_alloc(0, &pdev->dev); 146 - if (!fbi) { 145 + fbi = drm_fb_helper_alloc_fbi(helper); 146 + if (IS_ERR(fbi)) { 147 147 DRM_ERROR("failed to allocate fb info.\n"); 148 - ret = -ENOMEM; 148 + ret = PTR_ERR(fbi); 149 149 goto out; 150 150 } 151 151 ··· 165 165 166 166 if (IS_ERR(exynos_gem_obj)) { 167 167 ret = PTR_ERR(exynos_gem_obj); 168 - goto err_release_framebuffer; 168 + goto err_release_fbi; 169 169 } 170 170 171 171 exynos_fbdev->exynos_gem_obj = exynos_gem_obj; ··· 178 178 goto err_destroy_gem; 179 179 } 180 180 181 - helper->fbdev = fbi; 182 - 183 181 fbi->par = helper; 184 182 fbi->flags = FBINFO_FLAG_DEFAULT; 185 183 fbi->fbops = &exynos_drm_fb_ops; 186 184 187 - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 188 - if (ret) { 189 - DRM_ERROR("failed to allocate cmap.\n"); 190 - goto err_destroy_framebuffer; 191 - } 192 - 193 185 ret = exynos_drm_fbdev_update(helper, sizes, helper->fb); 194 186 if (ret < 0) 195 - goto err_dealloc_cmap; 187 + goto err_destroy_framebuffer; 196 188 197 189 mutex_unlock(&dev->struct_mutex); 198 190 return ret; 199 191 200 - err_dealloc_cmap: 201 - fb_dealloc_cmap(&fbi->cmap); 202 192 err_destroy_framebuffer: 203 193 drm_framebuffer_cleanup(helper->fb); 204 194 err_destroy_gem: 205 195 exynos_drm_gem_destroy(exynos_gem_obj); 206 - err_release_framebuffer: 207 - framebuffer_release(fbi); 196 + err_release_fbi: 197 + drm_fb_helper_release_fbi(helper); 208 198 209 199 /* 210 200 * if failed, all resources allocated above would be released by ··· 302 312 } 303 313 } 304 314 305 - /* release linux framebuffer */ 306 - if (fb_helper->fbdev) { 307 - struct fb_info *info; 308 - int ret; 309 - 310 - info = fb_helper->fbdev; 311 - ret = unregister_framebuffer(info); 312 - if (ret < 0) 313 - DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); 314 - 315 - if (info->cmap.len) 316 - fb_dealloc_cmap(&info->cmap); 317 - 318 - framebuffer_release(info); 319 - } 315 + drm_fb_helper_unregister_fbi(fb_helper); 316 + drm_fb_helper_release_fbi(fb_helper); 320 317 321 318 drm_fb_helper_fini(fb_helper); 322 319 }
+3 -3
drivers/gpu/drm/gma500/accel_2d.c
··· 276 276 break; 277 277 default: 278 278 /* software fallback */ 279 - cfb_copyarea(info, a); 279 + drm_fb_helper_cfb_copyarea(info, a); 280 280 return; 281 281 } 282 282 283 283 if (!gma_power_begin(dev, false)) { 284 - cfb_copyarea(info, a); 284 + drm_fb_helper_cfb_copyarea(info, a); 285 285 return; 286 286 } 287 287 psb_accel_2d_copy(dev_priv, ··· 308 308 /* Avoid the 8 pixel erratum */ 309 309 if (region->width == 8 || region->height == 8 || 310 310 (info->flags & FBINFO_HWACCEL_DISABLED)) 311 - return cfb_copyarea(info, region); 311 + return drm_fb_helper_cfb_copyarea(info, region); 312 312 313 313 psbfb_copyarea_accel(info, region); 314 314 }
+16 -32
drivers/gpu/drm/gma500/framebuffer.c
··· 194 194 .fb_set_par = drm_fb_helper_set_par, 195 195 .fb_blank = drm_fb_helper_blank, 196 196 .fb_setcolreg = psbfb_setcolreg, 197 - .fb_fillrect = cfb_fillrect, 197 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 198 198 .fb_copyarea = psbfb_copyarea, 199 - .fb_imageblit = cfb_imageblit, 199 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 200 200 .fb_mmap = psbfb_mmap, 201 201 .fb_sync = psbfb_sync, 202 202 .fb_ioctl = psbfb_ioctl, ··· 208 208 .fb_set_par = drm_fb_helper_set_par, 209 209 .fb_blank = drm_fb_helper_blank, 210 210 .fb_setcolreg = psbfb_setcolreg, 211 - .fb_fillrect = cfb_fillrect, 212 - .fb_copyarea = cfb_copyarea, 213 - .fb_imageblit = cfb_imageblit, 211 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 212 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 213 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 214 214 .fb_pan_display = psbfb_pan, 215 215 .fb_mmap = psbfb_mmap, 216 216 .fb_ioctl = psbfb_ioctl, ··· 222 222 .fb_set_par = drm_fb_helper_set_par, 223 223 .fb_blank = drm_fb_helper_blank, 224 224 .fb_setcolreg = psbfb_setcolreg, 225 - .fb_fillrect = cfb_fillrect, 226 - .fb_copyarea = cfb_copyarea, 227 - .fb_imageblit = cfb_imageblit, 225 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 226 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 227 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 228 228 .fb_mmap = psbfb_mmap, 229 229 .fb_ioctl = psbfb_ioctl, 230 230 }; ··· 343 343 struct drm_framebuffer *fb; 344 344 struct psb_framebuffer *psbfb = &fbdev->pfb; 345 345 struct drm_mode_fb_cmd2 mode_cmd; 346 - struct device *device = &dev->pdev->dev; 347 346 int size; 348 347 int ret; 349 348 struct gtt_range *backing; ··· 408 409 409 410 mutex_lock(&dev->struct_mutex); 410 411 411 - info = framebuffer_alloc(0, device); 412 - if (!info) { 413 - ret = -ENOMEM; 412 + info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper); 413 + if (IS_ERR(info)) { 414 + ret = PTR_ERR(info); 414 415 goto out_err1; 415 416 } 416 417 info->par = fbdev; ··· 425 426 psbfb->fbdev = info; 426 427 427 428 fbdev->psb_fb_helper.fb = fb; 428 - fbdev->psb_fb_helper.fbdev = info; 429 429 430 430 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 431 431 strcpy(info->fix.id, "psbdrmfb"); ··· 438 440 } else /* Software */ 439 441 info->fbops = &psbfb_unaccel_ops; 440 442 441 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 442 - if (ret) { 443 - ret = -ENOMEM; 444 - goto out_unref; 445 - } 446 - 447 443 info->fix.smem_start = dev->mode_config.fb_base; 448 444 info->fix.smem_len = size; 449 445 info->fix.ywrapstep = gtt_roll; ··· 448 456 info->screen_size = size; 449 457 450 458 if (dev_priv->gtt.stolen_size) { 451 - info->apertures = alloc_apertures(1); 452 - if (!info->apertures) { 453 - ret = -ENOMEM; 454 - goto out_unref; 455 - } 456 459 info->apertures->ranges[0].base = dev->mode_config.fb_base; 457 460 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size; 458 461 } ··· 470 483 psb_gtt_free_range(dev, backing); 471 484 else 472 485 drm_gem_object_unreference(&backing->gem); 486 + 487 + drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); 473 488 out_err1: 474 489 mutex_unlock(&dev->struct_mutex); 475 490 psb_gtt_free_range(dev, backing); ··· 559 570 560 571 static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) 561 572 { 562 - struct fb_info *info; 563 573 struct psb_framebuffer *psbfb = &fbdev->pfb; 564 574 565 - if (fbdev->psb_fb_helper.fbdev) { 566 - info = fbdev->psb_fb_helper.fbdev; 567 - unregister_framebuffer(info); 568 - if (info->cmap.len) 569 - fb_dealloc_cmap(&info->cmap); 570 - framebuffer_release(info); 571 - } 575 + drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper); 576 + drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); 577 + 572 578 drm_fb_helper_fini(&fbdev->psb_fb_helper); 573 579 drm_framebuffer_unregister_private(&psbfb->base); 574 580 drm_framebuffer_cleanup(&psbfb->base);
-15
drivers/gpu/drm/i915/Kconfig
··· 36 36 i810 driver instead, and the Atom z5xx series has an entirely 37 37 different implementation. 38 38 39 - config DRM_I915_FBDEV 40 - bool "Enable legacy fbdev support for the modesetting intel driver" 41 - depends on DRM_I915 42 - select DRM_KMS_FB_HELPER 43 - select FB_CFB_FILLRECT 44 - select FB_CFB_COPYAREA 45 - select FB_CFB_IMAGEBLIT 46 - default y 47 - help 48 - Choose this option if you have a need for the legacy fbdev 49 - support. Note that this support also provide the linux console 50 - support on top of the intel modesetting driver. 51 - 52 - If in doubt, say "Y". 53 - 54 39 config DRM_I915_PRELIMINARY_HW_SUPPORT 55 40 bool "Enable preliminary support for prerelease Intel hardware by default" 56 41 depends on DRM_I915
+1 -1
drivers/gpu/drm/i915/Makefile
··· 59 59 intel_sideband.o \ 60 60 intel_sprite.o 61 61 i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 62 - i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o 62 + i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o 63 63 64 64 # modesetting output/encoder code 65 65 i915-y += dvo_ch7017.o \
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 1868 1868 struct intel_framebuffer *fb; 1869 1869 struct drm_framebuffer *drm_fb; 1870 1870 1871 - #ifdef CONFIG_DRM_I915_FBDEV 1871 + #ifdef CONFIG_DRM_FBDEV_EMULATION 1872 1872 struct drm_i915_private *dev_priv = dev->dev_private; 1873 1873 1874 1874 ifbdev = dev_priv->fbdev;
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 1854 1854 1855 1855 struct drm_i915_gem_object *vlv_pctx; 1856 1856 1857 - #ifdef CONFIG_DRM_I915_FBDEV 1857 + #ifdef CONFIG_DRM_FBDEV_EMULATION 1858 1858 /* list of fbdev register on this device */ 1859 1859 struct intel_fbdev *fbdev; 1860 1860 struct work_struct fbdev_suspend_work;
+3 -3
drivers/gpu/drm/i915/intel_display.c
··· 10221 10221 mode_fits_in_fbdev(struct drm_device *dev, 10222 10222 struct drm_display_mode *mode) 10223 10223 { 10224 - #ifdef CONFIG_DRM_I915_FBDEV 10224 + #ifdef CONFIG_DRM_FBDEV_EMULATION 10225 10225 struct drm_i915_private *dev_priv = dev->dev_private; 10226 10226 struct drm_i915_gem_object *obj; 10227 10227 struct drm_framebuffer *fb; ··· 13681 13681 struct intel_plane *primary; 13682 13682 struct intel_plane_state *state; 13683 13683 const uint32_t *intel_primary_formats; 13684 - int num_formats; 13684 + unsigned int num_formats; 13685 13685 13686 13686 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13687 13687 if (primary == NULL) ··· 14475 14475 return intel_framebuffer_create(dev, mode_cmd, obj); 14476 14476 } 14477 14477 14478 - #ifndef CONFIG_DRM_I915_FBDEV 14478 + #ifndef CONFIG_DRM_FBDEV_EMULATION 14479 14479 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14480 14480 { 14481 14481 }
+2 -2
drivers/gpu/drm/i915/intel_dp_mst.c
··· 395 395 396 396 static void intel_connector_add_to_fbdev(struct intel_connector *connector) 397 397 { 398 - #ifdef CONFIG_DRM_I915_FBDEV 398 + #ifdef CONFIG_DRM_FBDEV_EMULATION 399 399 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 400 400 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); 401 401 #endif ··· 403 403 404 404 static void intel_connector_remove_from_fbdev(struct intel_connector *connector) 405 405 { 406 - #ifdef CONFIG_DRM_I915_FBDEV 406 + #ifdef CONFIG_DRM_FBDEV_EMULATION 407 407 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 408 408 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); 409 409 #endif
+1 -1
drivers/gpu/drm/i915/intel_drv.h
··· 1203 1203 1204 1204 1205 1205 /* legacy fbdev emulation in intel_fbdev.c */ 1206 - #ifdef CONFIG_DRM_I915_FBDEV 1206 + #ifdef CONFIG_DRM_FBDEV_EMULATION 1207 1207 extern int intel_fbdev_init(struct drm_device *dev); 1208 1208 extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); 1209 1209 extern void intel_fbdev_fini(struct drm_device *dev);
+12 -49
drivers/gpu/drm/i915/intel_fbdev.c
··· 55 55 ret = drm_fb_helper_set_par(info); 56 56 57 57 if (ret == 0) { 58 - /* 59 - * FIXME: fbdev presumes that all callbacks also work from 60 - * atomic contexts and relies on that for emergency oops 61 - * printing. KMS totally doesn't do that and the locking here is 62 - * by far not the only place this goes wrong. Ignore this for 63 - * now until we solve this for real. 64 - */ 65 58 mutex_lock(&fb_helper->dev->struct_mutex); 66 59 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 67 60 mutex_unlock(&fb_helper->dev->struct_mutex); ··· 73 80 ret = drm_fb_helper_blank(blank, info); 74 81 75 82 if (ret == 0) { 76 - /* 77 - * FIXME: fbdev presumes that all callbacks also work from 78 - * atomic contexts and relies on that for emergency oops 79 - * printing. KMS totally doesn't do that and the locking here is 80 - * by far not the only place this goes wrong. Ignore this for 81 - * now until we solve this for real. 82 - */ 83 83 mutex_lock(&fb_helper->dev->struct_mutex); 84 84 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 85 85 mutex_unlock(&fb_helper->dev->struct_mutex); ··· 92 106 ret = drm_fb_helper_pan_display(var, info); 93 107 94 108 if (ret == 0) { 95 - /* 96 - * FIXME: fbdev presumes that all callbacks also work from 97 - * atomic contexts and relies on that for emergency oops 98 - * printing. KMS totally doesn't do that and the locking here is 99 - * by far not the only place this goes wrong. Ignore this for 100 - * now until we solve this for real. 101 - */ 102 109 mutex_lock(&fb_helper->dev->struct_mutex); 103 110 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 104 111 mutex_unlock(&fb_helper->dev->struct_mutex); ··· 104 125 .owner = THIS_MODULE, 105 126 .fb_check_var = drm_fb_helper_check_var, 106 127 .fb_set_par = intel_fbdev_set_par, 107 - .fb_fillrect = cfb_fillrect, 108 - .fb_copyarea = cfb_copyarea, 109 - .fb_imageblit = cfb_imageblit, 128 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 129 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 130 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 110 131 .fb_pan_display = intel_fbdev_pan_display, 111 132 .fb_blank = intel_fbdev_blank, 112 133 .fb_setcmap = drm_fb_helper_setcmap, ··· 215 236 obj = intel_fb->obj; 216 237 size = obj->base.size; 217 238 218 - info = framebuffer_alloc(0, &dev->pdev->dev); 219 - if (!info) { 220 - ret = -ENOMEM; 239 + info = drm_fb_helper_alloc_fbi(helper); 240 + if (IS_ERR(info)) { 241 + ret = PTR_ERR(info); 221 242 goto out_unpin; 222 243 } 223 244 ··· 226 247 fb = &ifbdev->fb->base; 227 248 228 249 ifbdev->helper.fb = fb; 229 - ifbdev->helper.fbdev = info; 230 250 231 251 strcpy(info->fix.id, "inteldrmfb"); 232 252 233 253 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 234 254 info->fbops = &intelfb_ops; 235 255 236 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 237 - if (ret) { 238 - ret = -ENOMEM; 239 - goto out_unpin; 240 - } 241 256 /* setup aperture base/size for vesafb takeover */ 242 - info->apertures = alloc_apertures(1); 243 - if (!info->apertures) { 244 - ret = -ENOMEM; 245 - goto out_unpin; 246 - } 247 257 info->apertures->ranges[0].base = dev->mode_config.fb_base; 248 258 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; 249 259 ··· 244 276 size); 245 277 if (!info->screen_base) { 246 278 ret = -ENOSPC; 247 - goto out_unpin; 279 + goto out_destroy_fbi; 248 280 } 249 281 info->screen_size = size; 250 282 ··· 271 303 vga_switcheroo_client_fb_set(dev->pdev, info); 272 304 return 0; 273 305 306 + out_destroy_fbi: 307 + drm_fb_helper_release_fbi(helper); 274 308 out_unpin: 275 309 i915_gem_object_ggtt_unpin(obj); 276 310 drm_gem_object_unreference(&obj->base); ··· 514 544 static void intel_fbdev_destroy(struct drm_device *dev, 515 545 struct intel_fbdev *ifbdev) 516 546 { 517 - if (ifbdev->helper.fbdev) { 518 - struct fb_info *info = ifbdev->helper.fbdev; 519 547 520 - unregister_framebuffer(info); 521 - iounmap(info->screen_base); 522 - if (info->cmap.len) 523 - fb_dealloc_cmap(&info->cmap); 524 - 525 - framebuffer_release(info); 526 - } 548 + drm_fb_helper_unregister_fbi(&ifbdev->helper); 549 + drm_fb_helper_release_fbi(&ifbdev->helper); 527 550 528 551 drm_fb_helper_fini(&ifbdev->helper); 529 552 ··· 765 802 if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) 766 803 memset_io(info->screen_base, 0, info->screen_size); 767 804 768 - fb_set_suspend(info, state); 805 + drm_fb_helper_set_suspend(&ifbdev->helper, state); 769 806 console_unlock(); 770 807 } 771 808
+10 -12
drivers/gpu/drm/mgag200/mgag200_cursor.c
··· 70 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 71 71 BUG_ON(pixels_current == pixels_prev); 72 72 73 + obj = drm_gem_object_lookup(dev, file_priv, handle); 74 + if (!obj) 75 + return -ENOENT; 76 + 73 77 ret = mgag200_bo_reserve(pixels_1, true); 74 78 if (ret) { 75 79 WREG8(MGA_CURPOSXL, 0); 76 80 WREG8(MGA_CURPOSXH, 0); 77 - return ret; 81 + goto out_unref; 78 82 } 79 83 ret = mgag200_bo_reserve(pixels_2, true); 80 84 if (ret) { 81 85 WREG8(MGA_CURPOSXL, 0); 82 86 WREG8(MGA_CURPOSXH, 0); 83 87 mgag200_bo_unreserve(pixels_1); 84 - return ret; 88 + goto out_unreserve1; 85 89 } 86 90 87 91 if (!handle) { ··· 109 105 goto out1; 110 106 } 111 107 } 112 - 113 - mutex_lock(&dev->struct_mutex); 114 - obj = drm_gem_object_lookup(dev, file_priv, handle); 115 - if (!obj) { 116 - mutex_unlock(&dev->struct_mutex); 117 - ret = -ENOENT; 118 - goto out1; 119 - } 120 - drm_gem_object_unreference(obj); 121 - mutex_unlock(&dev->struct_mutex); 122 108 123 109 bo = gem_to_mga_bo(obj); 124 110 ret = mgag200_bo_reserve(bo, true); ··· 246 252 if (ret) 247 253 mga_hide_cursor(mdev); 248 254 mgag200_bo_unreserve(pixels_1); 255 + out_unreserve1: 249 256 mgag200_bo_unreserve(pixels_2); 257 + out_unref: 258 + drm_gem_object_unreference_unlocked(obj); 259 + 250 260 return ret; 251 261 } 252 262
+8 -31
drivers/gpu/drm/mgag200/mgag200_fb.c
··· 101 101 const struct fb_fillrect *rect) 102 102 { 103 103 struct mga_fbdev *mfbdev = info->par; 104 - sys_fillrect(info, rect); 104 + drm_fb_helper_sys_fillrect(info, rect); 105 105 mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width, 106 106 rect->height); 107 107 } ··· 110 110 const struct fb_copyarea *area) 111 111 { 112 112 struct mga_fbdev *mfbdev = info->par; 113 - sys_copyarea(info, area); 113 + drm_fb_helper_sys_copyarea(info, area); 114 114 mga_dirty_update(mfbdev, area->dx, area->dy, area->width, 115 115 area->height); 116 116 } ··· 119 119 const struct fb_image *image) 120 120 { 121 121 struct mga_fbdev *mfbdev = info->par; 122 - sys_imageblit(info, image); 122 + drm_fb_helper_sys_imageblit(info, image); 123 123 mga_dirty_update(mfbdev, image->dx, image->dy, image->width, 124 124 image->height); 125 125 } ··· 166 166 struct fb_info *info; 167 167 struct drm_framebuffer *fb; 168 168 struct drm_gem_object *gobj = NULL; 169 - struct device *device = &dev->pdev->dev; 170 169 int ret; 171 170 void *sysram; 172 171 int size; ··· 188 189 if (!sysram) 189 190 return -ENOMEM; 190 191 191 - info = framebuffer_alloc(0, device); 192 - if (info == NULL) 193 - return -ENOMEM; 192 + info = drm_fb_helper_alloc_fbi(helper); 193 + if (IS_ERR(info)) 194 + return PTR_ERR(info); 194 195 195 196 info->par = mfbdev; 196 197 ··· 205 206 206 207 /* setup helper */ 207 208 mfbdev->helper.fb = fb; 208 - mfbdev->helper.fbdev = info; 209 - 210 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 211 - if (ret) { 212 - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); 213 - ret = -ENOMEM; 214 - goto out; 215 - } 216 209 217 210 strcpy(info->fix.id, "mgadrmfb"); 218 211 ··· 212 221 info->fbops = &mgag200fb_ops; 213 222 214 223 /* setup aperture base/size for vesafb takeover */ 215 - info->apertures = alloc_apertures(1); 216 - if (!info->apertures) { 217 - ret = -ENOMEM; 218 - goto out; 219 - } 220 224 info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base; 221 225 info->apertures->ranges[0].size = mdev->mc.vram_size; 222 226 ··· 226 240 DRM_DEBUG_KMS("allocated %dx%d\n", 227 241 fb->width, fb->height); 228 242 return 0; 229 - out: 230 - return ret; 231 243 } 232 244 233 245 static int mga_fbdev_destroy(struct drm_device *dev, 234 246 struct mga_fbdev *mfbdev) 235 247 { 236 - struct fb_info *info; 237 248 struct mga_framebuffer *mfb = &mfbdev->mfb; 238 249 239 - if (mfbdev->helper.fbdev) { 240 - info = mfbdev->helper.fbdev; 241 - 242 - unregister_framebuffer(info); 243 - if (info->cmap.len) 244 - fb_dealloc_cmap(&info->cmap); 245 - framebuffer_release(info); 246 - } 250 + drm_fb_helper_unregister_fbi(&mfbdev->helper); 251 + drm_fb_helper_release_fbi(&mfbdev->helper); 247 252 248 253 if (mfb->obj) { 249 254 drm_gem_object_unreference_unlocked(mfb->obj);
+4 -12
drivers/gpu/drm/mgag200/mgag200_main.c
··· 345 345 uint64_t *offset) 346 346 { 347 347 struct drm_gem_object *obj; 348 - int ret; 349 348 struct mgag200_bo *bo; 350 349 351 - mutex_lock(&dev->struct_mutex); 352 350 obj = drm_gem_object_lookup(dev, file, handle); 353 - if (obj == NULL) { 354 - ret = -ENOENT; 355 - goto out_unlock; 356 - } 351 + if (obj == NULL) 352 + return -ENOENT; 357 353 358 354 bo = gem_to_mga_bo(obj); 359 355 *offset = mgag200_bo_mmap_offset(bo); 360 356 361 - drm_gem_object_unreference(obj); 362 - ret = 0; 363 - out_unlock: 364 - mutex_unlock(&dev->struct_mutex); 365 - return ret; 366 - 357 + drm_gem_object_unreference_unlocked(obj); 358 + return 0; 367 359 }
+10 -24
drivers/gpu/drm/msm/msm_fbdev.c
··· 43 43 /* Note: to properly handle manual update displays, we wrap the 44 44 * basic fbdev ops which write to the framebuffer 45 45 */ 46 - .fb_read = fb_sys_read, 47 - .fb_write = fb_sys_write, 48 - .fb_fillrect = sys_fillrect, 49 - .fb_copyarea = sys_copyarea, 50 - .fb_imageblit = sys_imageblit, 46 + .fb_read = drm_fb_helper_sys_read, 47 + .fb_write = drm_fb_helper_sys_write, 48 + .fb_fillrect = drm_fb_helper_sys_fillrect, 49 + .fb_copyarea = drm_fb_helper_sys_copyarea, 50 + .fb_imageblit = drm_fb_helper_sys_imageblit, 51 51 .fb_mmap = msm_fbdev_mmap, 52 52 53 53 .fb_check_var = drm_fb_helper_check_var, ··· 144 144 goto fail_unlock; 145 145 } 146 146 147 - fbi = framebuffer_alloc(0, dev->dev); 148 - if (!fbi) { 147 + fbi = drm_fb_helper_alloc_fbi(helper); 148 + if (IS_ERR(fbi)) { 149 149 dev_err(dev->dev, "failed to allocate fb info\n"); 150 - ret = -ENOMEM; 150 + ret = PTR_ERR(fbi); 151 151 goto fail_unlock; 152 152 } 153 153 ··· 155 155 156 156 fbdev->fb = fb; 157 157 helper->fb = fb; 158 - helper->fbdev = fbi; 159 158 160 159 fbi->par = helper; 161 160 fbi->flags = FBINFO_DEFAULT; 162 161 fbi->fbops = &msm_fb_ops; 163 162 164 163 strcpy(fbi->fix.id, "msm"); 165 - 166 - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 167 - if (ret) { 168 - ret = -ENOMEM; 169 - goto fail_unlock; 170 - } 171 164 172 165 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 173 166 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); ··· 184 191 fail: 185 192 186 193 if (ret) { 187 - framebuffer_release(fbi); 188 194 if (fb) { 189 195 drm_framebuffer_unregister_private(fb); 190 196 drm_framebuffer_remove(fb); ··· 258 266 struct msm_drm_private *priv = dev->dev_private; 259 267 struct drm_fb_helper *helper = priv->fbdev; 260 268 struct msm_fbdev *fbdev; 261 - struct fb_info *fbi; 262 269 263 270 DBG(); 264 271 265 - fbi = helper->fbdev; 266 - 267 - /* only cleanup framebuffer if it is present */ 268 - if (fbi) { 269 - unregister_framebuffer(fbi); 270 - framebuffer_release(fbi); 271 - } 272 + drm_fb_helper_unregister_fbi(helper); 273 + drm_fb_helper_release_fbi(helper); 272 274 273 275 drm_fb_helper_fini(helper); 274 276
+1 -1
drivers/gpu/drm/nouveau/dispnv04/overlay.c
··· 261 261 { 262 262 struct nouveau_drm *drm = nouveau_drm(device); 263 263 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 264 - int num_formats = ARRAY_SIZE(formats); 264 + unsigned int num_formats = ARRAY_SIZE(formats); 265 265 int ret; 266 266 267 267 if (!plane)
+12 -27
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 84 84 85 85 if (ret != -ENODEV) 86 86 nouveau_fbcon_gpu_lockup(info); 87 - cfb_fillrect(info, rect); 87 + drm_fb_helper_cfb_fillrect(info, rect); 88 88 } 89 89 90 90 static void ··· 116 116 117 117 if (ret != -ENODEV) 118 118 nouveau_fbcon_gpu_lockup(info); 119 - cfb_copyarea(info, image); 119 + drm_fb_helper_cfb_copyarea(info, image); 120 120 } 121 121 122 122 static void ··· 148 148 149 149 if (ret != -ENODEV) 150 150 nouveau_fbcon_gpu_lockup(info); 151 - cfb_imageblit(info, image); 151 + drm_fb_helper_cfb_imageblit(info, image); 152 152 } 153 153 154 154 static int ··· 197 197 .owner = THIS_MODULE, 198 198 .fb_check_var = drm_fb_helper_check_var, 199 199 .fb_set_par = drm_fb_helper_set_par, 200 - .fb_fillrect = cfb_fillrect, 201 - .fb_copyarea = cfb_copyarea, 202 - .fb_imageblit = cfb_imageblit, 200 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 201 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 202 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 203 203 .fb_pan_display = drm_fb_helper_pan_display, 204 204 .fb_blank = drm_fb_helper_blank, 205 205 .fb_setcmap = drm_fb_helper_setcmap, ··· 319 319 struct nouveau_channel *chan; 320 320 struct nouveau_bo *nvbo; 321 321 struct drm_mode_fb_cmd2 mode_cmd; 322 - struct pci_dev *pdev = dev->pdev; 323 322 int size, ret; 324 323 325 324 mode_cmd.width = sizes->surface_width; ··· 364 365 365 366 mutex_lock(&dev->struct_mutex); 366 367 367 - info = framebuffer_alloc(0, &pdev->dev); 368 - if (!info) { 369 - ret = -ENOMEM; 368 + info = drm_fb_helper_alloc_fbi(helper); 369 + if (IS_ERR(info)) { 370 + ret = PTR_ERR(info); 370 371 goto out_unlock; 371 372 } 372 373 info->skip_vt_switch = 1; 373 - 374 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 375 - if (ret) { 376 - ret = -ENOMEM; 377 - framebuffer_release(info); 378 - goto out_unlock; 379 - } 380 374 381 375 info->par = fbcon; 382 376 ··· 380 388 381 389 /* setup helper */ 382 390 fbcon->helper.fb = fb; 383 - fbcon->helper.fbdev = info; 384 391 385 392 strcpy(info->fix.id, "nouveaufb"); 386 393 if (!chan) ··· 441 450 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) 442 451 { 443 452 struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb; 444 - struct fb_info *info; 445 453 446 - if (fbcon->helper.fbdev) { 447 - info = fbcon->helper.fbdev; 448 - unregister_framebuffer(info); 449 - if (info->cmap.len) 450 - fb_dealloc_cmap(&info->cmap); 451 - framebuffer_release(info); 452 - } 454 + drm_fb_helper_unregister_fbi(&fbcon->helper); 455 + drm_fb_helper_release_fbi(&fbcon->helper); 453 456 454 457 if (nouveau_fb->nvbo) { 455 458 nouveau_bo_unmap(nouveau_fb->nvbo); ··· 481 496 console_lock(); 482 497 if (state == FBINFO_STATE_RUNNING) 483 498 nouveau_fbcon_accel_restore(dev); 484 - fb_set_suspend(drm->fbcon->helper.fbdev, state); 499 + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 485 500 if (state != FBINFO_STATE_RUNNING) 486 501 nouveau_fbcon_accel_save_disable(dev); 487 502 console_unlock();
-2
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 424 424 void 425 425 nouveau_ttm_fini(struct nouveau_drm *drm) 426 426 { 427 - mutex_lock(&drm->dev->struct_mutex); 428 427 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 429 428 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 430 - mutex_unlock(&drm->dev->struct_mutex); 431 429 432 430 ttm_bo_device_release(&drm->ttm.bdev); 433 431
+4 -2
drivers/gpu/drm/omapdrm/omap_crtc.c
··· 388 388 copy_timings_drm_to_omap(&omap_crtc->timings, mode); 389 389 } 390 390 391 - static void omap_crtc_atomic_begin(struct drm_crtc *crtc) 391 + static void omap_crtc_atomic_begin(struct drm_crtc *crtc, 392 + struct drm_crtc_state *old_crtc_state) 392 393 { 393 394 } 394 395 395 - static void omap_crtc_atomic_flush(struct drm_crtc *crtc) 396 + static void omap_crtc_atomic_flush(struct drm_crtc *crtc, 397 + struct drm_crtc_state *old_crtc_state) 396 398 { 397 399 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 398 400
+13 -25
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 86 86 /* Note: to properly handle manual update displays, we wrap the 87 87 * basic fbdev ops which write to the framebuffer 88 88 */ 89 - .fb_read = fb_sys_read, 90 - .fb_write = fb_sys_write, 91 - .fb_fillrect = sys_fillrect, 92 - .fb_copyarea = sys_copyarea, 93 - .fb_imageblit = sys_imageblit, 89 + .fb_read = drm_fb_helper_sys_read, 90 + .fb_write = drm_fb_helper_sys_write, 91 + .fb_fillrect = drm_fb_helper_sys_fillrect, 92 + .fb_copyarea = drm_fb_helper_sys_copyarea, 93 + .fb_imageblit = drm_fb_helper_sys_imageblit, 94 94 95 95 .fb_check_var = drm_fb_helper_check_var, 96 96 .fb_set_par = drm_fb_helper_set_par, ··· 179 179 180 180 mutex_lock(&dev->struct_mutex); 181 181 182 - fbi = framebuffer_alloc(0, dev->dev); 183 - if (!fbi) { 182 + fbi = drm_fb_helper_alloc_fbi(helper); 183 + if (IS_ERR(fbi)) { 184 184 dev_err(dev->dev, "failed to allocate fb info\n"); 185 - ret = -ENOMEM; 185 + ret = PTR_ERR(fbi); 186 186 goto fail_unlock; 187 187 } 188 188 ··· 190 190 191 191 fbdev->fb = fb; 192 192 helper->fb = fb; 193 - helper->fbdev = fbi; 194 193 195 194 fbi->par = helper; 196 195 fbi->flags = FBINFO_DEFAULT; 197 196 fbi->fbops = &omap_fb_ops; 198 197 199 198 strcpy(fbi->fix.id, MODULE_NAME); 200 - 201 - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 202 - if (ret) { 203 - ret = -ENOMEM; 204 - goto fail_unlock; 205 - } 206 199 207 200 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 208 201 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); ··· 229 236 fail: 230 237 231 238 if (ret) { 232 - if (fbi) 233 - framebuffer_release(fbi); 239 + 240 + drm_fb_helper_release_fbi(helper); 241 + 234 242 if (fb) { 235 243 drm_framebuffer_unregister_private(fb); 236 244 drm_framebuffer_remove(fb); ··· 306 312 struct omap_drm_private *priv = dev->dev_private; 307 313 struct drm_fb_helper *helper = priv->fbdev; 308 314 struct omap_fbdev *fbdev; 309 - struct fb_info *fbi; 310 315 311 316 DBG(); 312 317 313 - fbi = helper->fbdev; 314 - 315 - /* only cleanup framebuffer if it is present */ 316 - if (fbi) { 317 - unregister_framebuffer(fbi); 318 - framebuffer_release(fbi); 319 - } 318 + drm_fb_helper_unregister_fbi(helper); 319 + drm_fb_helper_release_fbi(helper); 320 320 321 321 drm_fb_helper_fini(helper); 322 322
+13 -27
drivers/gpu/drm/qxl/qxl_fb.c
··· 197 197 { 198 198 struct qxl_fbdev *qfbdev = info->par; 199 199 200 - sys_fillrect(info, rect); 200 + drm_fb_helper_sys_fillrect(info, rect); 201 201 qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width, 202 202 rect->height); 203 203 } ··· 207 207 { 208 208 struct qxl_fbdev *qfbdev = info->par; 209 209 210 - sys_copyarea(info, area); 210 + drm_fb_helper_sys_copyarea(info, area); 211 211 qxl_dirty_update(qfbdev, area->dx, area->dy, area->width, 212 212 area->height); 213 213 } ··· 217 217 { 218 218 struct qxl_fbdev *qfbdev = info->par; 219 219 220 - sys_imageblit(info, image); 220 + drm_fb_helper_sys_imageblit(info, image); 221 221 qxl_dirty_update(qfbdev, image->dx, image->dy, image->width, 222 222 image->height); 223 223 } ··· 345 345 struct drm_mode_fb_cmd2 mode_cmd; 346 346 struct drm_gem_object *gobj = NULL; 347 347 struct qxl_bo *qbo = NULL; 348 - struct device *device = &qdev->pdev->dev; 349 348 int ret; 350 349 int size; 351 350 int bpp = sizes->surface_bpp; ··· 373 374 shadow); 374 375 size = mode_cmd.pitches[0] * mode_cmd.height; 375 376 376 - info = framebuffer_alloc(0, device); 377 - if (info == NULL) { 378 - ret = -ENOMEM; 377 + info = drm_fb_helper_alloc_fbi(&qfbdev->helper); 378 + if (IS_ERR(info)) { 379 + ret = PTR_ERR(info); 379 380 goto out_unref; 380 381 } 381 382 ··· 387 388 388 389 /* setup helper with fb data */ 389 390 qfbdev->helper.fb = fb; 390 - qfbdev->helper.fbdev = info; 391 + 391 392 qfbdev->shadow = shadow; 392 393 strcpy(info->fix.id, "qxldrmfb"); 393 394 ··· 409 410 sizes->fb_height); 410 411 411 412 /* setup aperture base/size for vesafb takeover */ 412 - info->apertures = alloc_apertures(1); 413 - if (!info->apertures) { 414 - ret = -ENOMEM; 415 - goto out_unref; 416 - } 417 413 info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base; 418 414 info->apertures->ranges[0].size = qdev->vram_size; 419 415 ··· 417 423 418 424 if (info->screen_base == NULL) { 419 425 ret = -ENOSPC; 420 - goto out_unref; 421 - } 422 - 423 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 424 - if (ret) { 425 - ret = -ENOMEM; 426 - goto out_unref; 426 + goto out_destroy_fbi; 427 427 } 428 428 429 429 info->fbdefio = &qxl_defio; ··· 429 441 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height); 430 442 return 0; 431 443 444 + out_destroy_fbi: 445 + drm_fb_helper_release_fbi(&qfbdev->helper); 432 446 out_unref: 433 447 if (qbo) { 434 448 ret = qxl_bo_reserve(qbo, false); ··· 469 479 470 480 static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) 471 481 { 472 - struct fb_info *info; 473 482 struct qxl_framebuffer *qfb = &qfbdev->qfb; 474 483 475 - if (qfbdev->helper.fbdev) { 476 - info = qfbdev->helper.fbdev; 484 + drm_fb_helper_unregister_fbi(&qfbdev->helper); 485 + drm_fb_helper_release_fbi(&qfbdev->helper); 477 486 478 - unregister_framebuffer(info); 479 - framebuffer_release(info); 480 - } 481 487 if (qfb->obj) { 482 488 qxlfb_destroy_pinned_object(qfb->obj); 483 489 qfb->obj = NULL; ··· 543 557 544 558 void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) 545 559 { 546 - fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state); 560 + drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); 547 561 } 548 562 549 563 bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
+1 -3
drivers/gpu/drm/qxl/qxl_object.c
··· 272 272 return; 273 273 dev_err(qdev->dev, "Userspace still has active objects !\n"); 274 274 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 275 - mutex_lock(&qdev->ddev->struct_mutex); 276 275 dev_err(qdev->dev, "%p %p %lu %lu force free\n", 277 276 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 278 277 *((unsigned long *)&bo->gem_base.refcount)); ··· 279 280 list_del_init(&bo->list); 280 281 mutex_unlock(&qdev->gem.mutex); 281 282 /* this should unref the ttm bo */ 282 - drm_gem_object_unreference(&bo->gem_base); 283 - mutex_unlock(&qdev->ddev->struct_mutex); 283 + drm_gem_object_unreference_unlocked(&bo->gem_base); 284 284 } 285 285 } 286 286
+12 -30
drivers/gpu/drm/radeon/radeon_fb.c
··· 82 82 .owner = THIS_MODULE, 83 83 .fb_check_var = drm_fb_helper_check_var, 84 84 .fb_set_par = radeon_fb_helper_set_par, 85 - .fb_fillrect = cfb_fillrect, 86 - .fb_copyarea = cfb_copyarea, 87 - .fb_imageblit = cfb_imageblit, 85 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 86 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 87 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 88 88 .fb_pan_display = drm_fb_helper_pan_display, 89 89 .fb_blank = drm_fb_helper_blank, 90 90 .fb_setcmap = drm_fb_helper_setcmap, ··· 227 227 struct drm_mode_fb_cmd2 mode_cmd; 228 228 struct drm_gem_object *gobj = NULL; 229 229 struct radeon_bo *rbo = NULL; 230 - struct device *device = &rdev->pdev->dev; 231 230 int ret; 232 231 unsigned long tmp; 233 232 ··· 249 250 rbo = gem_to_radeon_bo(gobj); 250 251 251 252 /* okay we have an object now allocate the framebuffer */ 252 - info = framebuffer_alloc(0, device); 253 - if (info == NULL) { 254 - ret = -ENOMEM; 253 + info = drm_fb_helper_alloc_fbi(helper); 254 + if (IS_ERR(info)) { 255 + ret = PTR_ERR(info); 255 256 goto out_unref; 256 257 } 257 258 ··· 261 262 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 262 263 if (ret) { 263 264 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 264 - goto out_unref; 265 + goto out_destroy_fbi; 265 266 } 266 267 267 268 fb = &rfbdev->rfb.base; 268 269 269 270 /* setup helper */ 270 271 rfbdev->helper.fb = fb; 271 - rfbdev->helper.fbdev = info; 272 272 273 273 memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); 274 274 ··· 287 289 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); 288 290 289 291 /* setup aperture base/size for vesafb takeover */ 290 - info->apertures = alloc_apertures(1); 291 - if (!info->apertures) { 292 - ret = -ENOMEM; 293 - goto out_unref; 294 - } 295 292 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; 296 293 info->apertures->ranges[0].size = rdev->mc.aper_size; 297 294 ··· 294 301 295 302 if (info->screen_base == NULL) { 296 303 ret = -ENOSPC; 297 - goto out_unref; 298 - } 299 - 300 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 301 - if (ret) { 302 - ret = -ENOMEM; 303 - goto out_unref; 304 + goto out_destroy_fbi; 304 305 } 305 306 306 307 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); ··· 306 319 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); 307 320 return 0; 308 321 322 + out_destroy_fbi: 323 + drm_fb_helper_release_fbi(helper); 309 324 out_unref: 310 325 if (rbo) { 311 326 ··· 328 339 329 340 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) 330 341 { 331 - struct fb_info *info; 332 342 struct radeon_framebuffer *rfb = &rfbdev->rfb; 333 343 334 - if (rfbdev->helper.fbdev) { 335 - info = rfbdev->helper.fbdev; 336 - 337 - unregister_framebuffer(info); 338 - if (info->cmap.len) 339 - fb_dealloc_cmap(&info->cmap); 340 - framebuffer_release(info); 341 - } 344 + drm_fb_helper_unregister_fbi(&rfbdev->helper); 345 + drm_fb_helper_release_fbi(&rfbdev->helper); 342 346 343 347 if (rfb->obj) { 344 348 radeonfb_destroy_pinned_object(rfb->obj);
+12 -35
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
··· 37 37 static struct fb_ops rockchip_drm_fbdev_ops = { 38 38 .owner = THIS_MODULE, 39 39 .fb_mmap = rockchip_fbdev_mmap, 40 - .fb_fillrect = cfb_fillrect, 41 - .fb_copyarea = cfb_copyarea, 42 - .fb_imageblit = cfb_imageblit, 40 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 41 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 42 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 43 43 .fb_check_var = drm_fb_helper_check_var, 44 44 .fb_set_par = drm_fb_helper_set_par, 45 45 .fb_blank = drm_fb_helper_blank, ··· 77 77 78 78 private->fbdev_bo = &rk_obj->base; 79 79 80 - fbi = framebuffer_alloc(0, dev->dev); 81 - if (!fbi) { 82 - dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); 83 - ret = -ENOMEM; 80 + fbi = drm_fb_helper_alloc_fbi(helper); 81 + if (IS_ERR(fbi)) { 82 + dev_err(dev->dev, "Failed to create framebuffer info.\n"); 83 + ret = PTR_ERR(fbi); 84 84 goto err_rockchip_gem_free_object; 85 85 } 86 86 ··· 89 89 if (IS_ERR(helper->fb)) { 90 90 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); 91 91 ret = PTR_ERR(helper->fb); 92 - goto err_framebuffer_release; 92 + goto err_release_fbi; 93 93 } 94 - 95 - helper->fbdev = fbi; 96 94 97 95 fbi->par = helper; 98 96 fbi->flags = FBINFO_FLAG_DEFAULT; 99 97 fbi->fbops = &rockchip_drm_fbdev_ops; 100 - 101 - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 102 - if (ret) { 103 - dev_err(dev->dev, "Failed to allocate color map.\n"); 104 - goto err_drm_framebuffer_unref; 105 - } 106 98 107 99 fb = helper->fb; 108 100 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); ··· 116 124 117 125 return 0; 118 126 119 - err_drm_framebuffer_unref: 120 - drm_framebuffer_unreference(helper->fb); 121 - err_framebuffer_release: 122 - framebuffer_release(fbi); 127 + err_release_fbi: 128 + drm_fb_helper_release_fbi(helper); 123 129 err_rockchip_gem_free_object: 124 130 rockchip_gem_free_object(&rk_obj->base); 125 131 return ret; ··· 180 190 181 191 helper = &private->fbdev_helper; 182 192 183 - if (helper->fbdev) { 184 - struct fb_info *info; 185 - int ret; 186 - 187 - info = helper->fbdev; 188 - ret = unregister_framebuffer(info); 189 - if (ret < 0) 190 - DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n", 191 - ret); 192 - 193 - if (info->cmap.len) 194 - fb_dealloc_cmap(&info->cmap); 195 - 196 - framebuffer_release(info); 197 - } 193 + drm_fb_helper_unregister_fbi(helper); 194 + drm_fb_helper_release_fbi(helper); 198 195 199 196 if (helper->fb) 200 197 drm_framebuffer_unreference(helper->fb);
+4 -8
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
··· 200 200 struct drm_gem_object *obj; 201 201 int ret; 202 202 203 - mutex_lock(&dev->struct_mutex); 204 - 205 203 obj = drm_gem_object_lookup(dev, file_priv, handle); 206 204 if (!obj) { 207 205 DRM_ERROR("failed to lookup gem object.\n"); 208 - ret = -EINVAL; 209 - goto unlock; 206 + return -EINVAL; 210 207 } 211 208 212 209 ret = drm_gem_create_mmap_offset(obj); ··· 214 217 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); 215 218 216 219 out: 217 - drm_gem_object_unreference(obj); 218 - unlock: 219 - mutex_unlock(&dev->struct_mutex); 220 - return ret; 220 + drm_gem_object_unreference_unlocked(obj); 221 + 222 + return 0; 221 223 } 222 224 223 225 /*
+9 -26
drivers/gpu/drm/tegra/fb.c
··· 184 184 #ifdef CONFIG_DRM_TEGRA_FBDEV 185 185 static struct fb_ops tegra_fb_ops = { 186 186 .owner = THIS_MODULE, 187 - .fb_fillrect = sys_fillrect, 188 - .fb_copyarea = sys_copyarea, 189 - .fb_imageblit = sys_imageblit, 187 + .fb_fillrect = drm_fb_helper_sys_fillrect, 188 + .fb_copyarea = drm_fb_helper_sys_copyarea, 189 + .fb_imageblit = drm_fb_helper_sys_imageblit, 190 190 .fb_check_var = drm_fb_helper_check_var, 191 191 .fb_set_par = drm_fb_helper_set_par, 192 192 .fb_blank = drm_fb_helper_blank, ··· 224 224 if (IS_ERR(bo)) 225 225 return PTR_ERR(bo); 226 226 227 - info = framebuffer_alloc(0, drm->dev); 228 - if (!info) { 227 + info = drm_fb_helper_alloc_fbi(helper); 228 + if (IS_ERR(info)) { 229 229 dev_err(drm->dev, "failed to allocate framebuffer info\n"); 230 230 drm_gem_object_unreference_unlocked(&bo->gem); 231 - return -ENOMEM; 231 + return PTR_ERR(info); 232 232 } 233 233 234 234 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); ··· 247 247 info->par = helper; 248 248 info->flags = FBINFO_FLAG_DEFAULT; 249 249 info->fbops = &tegra_fb_ops; 250 - 251 - err = fb_alloc_cmap(&info->cmap, 256, 0); 252 - if (err < 0) { 253 - dev_err(drm->dev, "failed to allocate color map: %d\n", err); 254 - goto destroy; 255 - } 256 250 257 251 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 258 252 drm_fb_helper_fill_var(info, helper, fb->width, fb->height); ··· 276 282 drm_framebuffer_unregister_private(fb); 277 283 tegra_fb_destroy(fb); 278 284 release: 279 - framebuffer_release(info); 285 + drm_fb_helper_release_fbi(helper); 280 286 return err; 281 287 } 282 288 ··· 341 347 342 348 static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) 343 349 { 344 - struct fb_info *info = fbdev->base.fbdev; 345 350 346 - if (info) { 347 - int err; 348 - 349 - err = unregister_framebuffer(info); 350 - if (err < 0) 351 - DRM_DEBUG_KMS("failed to unregister framebuffer\n"); 352 - 353 - if (info->cmap.len) 354 - fb_dealloc_cmap(&info->cmap); 355 - 356 - framebuffer_release(info); 357 - } 351 + drm_fb_helper_unregister_fbi(&fbdev->base); 352 + drm_fb_helper_release_fbi(&fbdev->base); 358 353 359 354 if (fbdev->fb) { 360 355 drm_framebuffer_unregister_private(&fbdev->fb->base);
+2 -2
drivers/gpu/drm/ttm/ttm_tt.c
··· 340 340 swap_storage = shmem_file_setup("ttm swap", 341 341 ttm->num_pages << PAGE_SHIFT, 342 342 0); 343 - if (unlikely(IS_ERR(swap_storage))) { 343 + if (IS_ERR(swap_storage)) { 344 344 pr_err("Failed allocating swap storage\n"); 345 345 return PTR_ERR(swap_storage); 346 346 } ··· 354 354 if (unlikely(from_page == NULL)) 355 355 continue; 356 356 to_page = shmem_read_mapping_page(swap_space, i); 357 - if (unlikely(IS_ERR(to_page))) { 357 + if (IS_ERR(to_page)) { 358 358 ret = PTR_ERR(to_page); 359 359 goto out_err; 360 360 }
+12 -29
drivers/gpu/drm/udl/udl_fb.c
··· 288 288 { 289 289 struct udl_fbdev *ufbdev = info->par; 290 290 291 - sys_fillrect(info, rect); 291 + drm_fb_helper_sys_fillrect(info, rect); 292 292 293 293 udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width, 294 294 rect->height); ··· 298 298 { 299 299 struct udl_fbdev *ufbdev = info->par; 300 300 301 - sys_copyarea(info, region); 301 + drm_fb_helper_sys_copyarea(info, region); 302 302 303 303 udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width, 304 304 region->height); ··· 308 308 { 309 309 struct udl_fbdev *ufbdev = info->par; 310 310 311 - sys_imageblit(info, image); 311 + drm_fb_helper_sys_imageblit(info, image); 312 312 313 313 udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width, 314 314 image->height); ··· 476 476 container_of(helper, struct udl_fbdev, helper); 477 477 struct drm_device *dev = ufbdev->helper.dev; 478 478 struct fb_info *info; 479 - struct device *device = dev->dev; 480 479 struct drm_framebuffer *fb; 481 480 struct drm_mode_fb_cmd2 mode_cmd; 482 481 struct udl_gem_object *obj; ··· 505 506 goto out_gfree; 506 507 } 507 508 508 - info = framebuffer_alloc(0, device); 509 - if (!info) { 510 - ret = -ENOMEM; 509 + info = drm_fb_helper_alloc_fbi(helper); 510 + if (IS_ERR(info)) { 511 + ret = PTR_ERR(info); 511 512 goto out_gfree; 512 513 } 513 514 info->par = ufbdev; 514 515 515 516 ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj); 516 517 if (ret) 517 - goto out_gfree; 518 + goto out_destroy_fbi; 518 519 519 520 fb = &ufbdev->ufb.base; 520 521 521 522 ufbdev->helper.fb = fb; 522 - ufbdev->helper.fbdev = info; 523 523 524 524 strcpy(info->fix.id, "udldrmfb"); 525 525 ··· 531 533 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 532 534 drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); 533 535 534 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 535 - if (ret) { 536 - ret = -ENOMEM; 537 - goto out_gfree; 538 - } 539 - 540 - 541 536 DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", 542 537 fb->width, fb->height, 543 538 ufbdev->ufb.obj->vmapping); 544 539 545 540 return ret; 541 + out_destroy_fbi: 542 + drm_fb_helper_release_fbi(helper); 546 543 out_gfree: 547 544 drm_gem_object_unreference(&ufbdev->ufb.obj->base); 548 545 out: ··· 551 558 static void udl_fbdev_destroy(struct drm_device *dev, 552 559 struct udl_fbdev *ufbdev) 553 560 { 554 - struct fb_info *info; 555 - if (ufbdev->helper.fbdev) { 556 - info = ufbdev->helper.fbdev; 557 - unregister_framebuffer(info); 558 - if (info->cmap.len) 559 - fb_dealloc_cmap(&info->cmap); 560 - framebuffer_release(info); 561 - } 561 + drm_fb_helper_unregister_fbi(&ufbdev->helper); 562 + drm_fb_helper_release_fbi(&ufbdev->helper); 562 563 drm_fb_helper_fini(&ufbdev->helper); 563 564 drm_framebuffer_unregister_private(&ufbdev->ufb.base); 564 565 drm_framebuffer_cleanup(&ufbdev->ufb.base); ··· 618 631 return; 619 632 620 633 ufbdev = udl->fbdev; 621 - if (ufbdev->helper.fbdev) { 622 - struct fb_info *info; 623 - info = ufbdev->helper.fbdev; 624 - unlink_framebuffer(info); 625 - } 634 + drm_fb_helper_unlink_fbi(&ufbdev->helper); 626 635 } 627 636 628 637 struct drm_framebuffer *
+9 -23
drivers/gpu/drm/virtio/virtgpu_fb.c
··· 173 173 const struct fb_fillrect *rect) 174 174 { 175 175 struct virtio_gpu_fbdev *vfbdev = info->par; 176 - sys_fillrect(info, rect); 176 + drm_fb_helper_sys_fillrect(info, rect); 177 177 virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy, 178 178 rect->width, rect->height); 179 179 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); ··· 183 183 const struct fb_copyarea *area) 184 184 { 185 185 struct virtio_gpu_fbdev *vfbdev = info->par; 186 - sys_copyarea(info, area); 186 + drm_fb_helper_sys_copyarea(info, area); 187 187 virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy, 188 188 area->width, area->height); 189 189 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); ··· 193 193 const struct fb_image *image) 194 194 { 195 195 struct virtio_gpu_fbdev *vfbdev = info->par; 196 - sys_imageblit(info, image); 196 + drm_fb_helper_sys_imageblit(info, image); 197 197 virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy, 198 198 image->width, image->height); 199 199 schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); ··· 230 230 struct drm_framebuffer *fb; 231 231 struct drm_mode_fb_cmd2 mode_cmd = {}; 232 232 struct virtio_gpu_object *obj; 233 - struct device *device = vgdev->dev; 234 233 uint32_t resid, format, size; 235 234 int ret; 236 235 ··· 316 317 if (ret) 317 318 goto err_obj_attach; 318 319 319 - info = framebuffer_alloc(0, device); 320 - if (!info) { 321 - ret = -ENOMEM; 320 + info = drm_fb_helper_alloc_fbi(helper); 321 + if (IS_ERR(info)) { 322 + ret = PTR_ERR(info); 322 323 goto err_fb_alloc; 323 - } 324 - 325 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 326 - if (ret) { 327 - ret = -ENOMEM; 328 - goto err_fb_alloc_cmap; 329 324 } 330 325 331 326 info->par = helper; ··· 332 339 fb = &vfbdev->vgfb.base; 333 340 334 341 vfbdev->helper.fb = fb; 335 - vfbdev->helper.fbdev = info; 336 342 337 343 strcpy(info->fix.id, "virtiodrmfb"); 338 344 info->flags = FBINFO_DEFAULT; ··· 349 357 return 0; 350 358 351 359 err_fb_init: 352 - fb_dealloc_cmap(&info->cmap); 353 - err_fb_alloc_cmap: 354 - framebuffer_release(info); 360 + drm_fb_helper_release_fbi(helper); 355 361 err_fb_alloc: 356 362 virtio_gpu_cmd_resource_inval_backing(vgdev, resid); 357 363 err_obj_attach: ··· 361 371 static int virtio_gpu_fbdev_destroy(struct drm_device *dev, 362 372 struct virtio_gpu_fbdev *vgfbdev) 363 373 { 364 - struct fb_info *info; 365 374 struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb; 366 375 367 - if (vgfbdev->helper.fbdev) { 368 - info = vgfbdev->helper.fbdev; 376 + drm_fb_helper_unregister_fbi(&vgfbdev->helper); 377 + drm_fb_helper_release_fbi(&vgfbdev->helper); 369 378 370 - unregister_framebuffer(info); 371 - framebuffer_release(info); 372 - } 373 379 if (vgfb->obj) 374 380 vgfb->obj = NULL; 375 381 drm_fb_helper_fini(&vgfbdev->helper);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 159 159 160 160 if (dev_priv->has_mob) { 161 161 uctx->man = vmw_cmdbuf_res_man_create(dev_priv); 162 - if (unlikely(IS_ERR(uctx->man))) { 162 + if (IS_ERR(uctx->man)) { 163 163 ret = PTR_ERR(uctx->man); 164 164 uctx->man = NULL; 165 165 goto out_err;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 1054 1054 return -EINVAL; 1055 1055 1056 1056 vmaster = vmw_master_check(dev, file_priv, flags); 1057 - if (unlikely(IS_ERR(vmaster))) { 1057 + if (IS_ERR(vmaster)) { 1058 1058 ret = PTR_ERR(vmaster); 1059 1059 1060 1060 if (ret != -ERESTARTSYS)
+56 -39
drivers/gpu/vga/vga_switcheroo.c
··· 6 6 * Licensed under GPLv2 7 7 * 8 8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs 9 - 10 - Switcher interface - methods require for ATPX and DCM 11 - - switchto - this throws the output MUX switch 12 - - discrete_set_power - sets the power state for the discrete card 13 - 14 - GPU driver interface 15 - - set_gpu_state - this should do the equiv of s/r for the card 16 - - this should *not* set the discrete power state 17 - - switch_check - check if the device is in a position to switch now 9 + * 10 + * Switcher interface - methods require for ATPX and DCM 11 + * - switchto - this throws the output MUX switch 12 + * - discrete_set_power - sets the power state for the discrete card 13 + * 14 + * GPU driver interface 15 + * - set_gpu_state - this should do the equiv of s/r for the card 16 + * - this should *not* set the discrete power state 17 + * - switch_check - check if the device is in a position to switch now 18 18 */ 19 + 20 + #define pr_fmt(fmt) "vga_switcheroo: " fmt 19 21 20 22 #include <linux/module.h> 21 23 #include <linux/seq_file.h> ··· 113 111 114 112 vgasr_priv.handler = handler; 115 113 if (vga_switcheroo_ready()) { 116 - printk(KERN_INFO "vga_switcheroo: enabled\n"); 114 + pr_info("enabled\n"); 117 115 vga_switcheroo_enable(); 118 116 } 119 117 mutex_unlock(&vgasr_mutex); ··· 126 124 mutex_lock(&vgasr_mutex); 127 125 vgasr_priv.handler = NULL; 128 126 if (vgasr_priv.active) { 129 - pr_info("vga_switcheroo: disabled\n"); 127 + pr_info("disabled\n"); 130 128 vga_switcheroo_debugfs_fini(&vgasr_priv); 131 129 vgasr_priv.active = false; 132 130 } ··· 157 155 vgasr_priv.registered_clients++; 158 156 159 157 if (vga_switcheroo_ready()) { 160 - printk(KERN_INFO "vga_switcheroo: enabled\n"); 158 + pr_info("enabled\n"); 161 159 vga_switcheroo_enable(); 162 160 } 163 161 mutex_unlock(&vgasr_mutex); ··· 169 167 bool driver_power_control) 170 168 { 171 169 return register_client(pdev, ops, -1, 172 - pdev == vga_default_device(), driver_power_control); 170 + pdev == vga_default_device(), 171 + driver_power_control); 173 172 } 174 173 EXPORT_SYMBOL(vga_switcheroo_register_client); 175 174 ··· 186 183 find_client_from_pci(struct list_head *head, struct pci_dev *pdev) 187 184 { 188 185 struct vga_switcheroo_client *client; 186 + 189 187 list_for_each_entry(client, head, list) 190 188 if (client->pdev == pdev) 191 189 return client; ··· 197 193 find_client_from_id(struct list_head *head, int client_id) 198 194 { 199 195 struct vga_switcheroo_client *client; 196 + 200 197 list_for_each_entry(client, head, list) 201 198 if (client->id == client_id) 202 199 return client; ··· 208 203 find_active_client(struct list_head *head) 209 204 { 210 205 struct vga_switcheroo_client *client; 206 + 211 207 list_for_each_entry(client, head, list) 212 208 if (client->active && client_is_vga(client)) 213 209 return client; ··· 241 235 kfree(client); 242 236 } 243 237 if (vgasr_priv.active && vgasr_priv.registered_clients < 2) { 244 - printk(KERN_INFO "vga_switcheroo: disabled\n"); 238 + pr_info("disabled\n"); 245 239 vga_switcheroo_debugfs_fini(&vgasr_priv); 246 240 vgasr_priv.active = false; 247 241 } ··· 266 260 { 267 261 struct vga_switcheroo_client *client; 268 262 int i = 0; 263 + 269 264 mutex_lock(&vgasr_mutex); 270 265 list_for_each_entry(client, &vgasr_priv.clients, list) { 271 266 seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i, 272 - client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", 267 + client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : 268 + "IGD", 273 269 client_is_vga(client) ? "" : "-Audio", 274 270 client->active ? '+' : ' ', 275 271 client->driver_power_control ? "Dyn" : "", ··· 355 347 356 348 if (new_client->fb_info) { 357 349 struct fb_event event; 350 + 358 351 console_lock(); 359 352 event.info = new_client->fb_info; 360 353 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); ··· 384 375 385 376 list_for_each_entry(client, &vgasr_priv.clients, list) { 386 377 if (!client->ops->can_switch(client->pdev)) { 387 - printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id); 378 + pr_err("client %x refused switch\n", client->id); 388 379 return false; 389 380 } 390 381 } ··· 493 484 if (can_switch) { 494 485 ret = vga_switchto_stage1(client); 495 486 if (ret) 496 - printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); 487 + pr_err("switching failed stage 1 %d\n", ret); 497 488 498 489 ret = vga_switchto_stage2(client); 499 490 if (ret) 500 - printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret); 491 + pr_err("switching failed stage 2 %d\n", ret); 501 492 502 493 } else { 503 - printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); 494 + pr_info("setting delayed switch to client %d\n", client->id); 504 495 vgasr_priv.delayed_switch_active = true; 505 496 vgasr_priv.delayed_client_id = client_id; 506 497 507 498 ret = vga_switchto_stage1(client); 508 499 if (ret) 509 - printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret); 500 + pr_err("delayed switching stage 1 failed %d\n", ret); 510 501 } 511 502 512 503 out: ··· 525 516 526 517 static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv) 527 518 { 528 - if (priv->switch_file) { 529 - debugfs_remove(priv->switch_file); 530 - priv->switch_file = NULL; 531 - } 532 - if (priv->debugfs_root) { 533 - debugfs_remove(priv->debugfs_root); 534 - priv->debugfs_root = NULL; 535 - } 519 + debugfs_remove(priv->switch_file); 520 + priv->switch_file = NULL; 521 + 522 + debugfs_remove(priv->debugfs_root); 523 + priv->debugfs_root = NULL; 536 524 } 537 525 538 526 static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv) 539 527 { 528 + static const char mp[] = "/sys/kernel/debug"; 529 + 540 530 /* already initialised */ 541 531 if (priv->debugfs_root) 542 532 return 0; 543 533 priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL); 544 534 545 535 if (!priv->debugfs_root) { 546 - printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n"); 536 + pr_err("Cannot create %s/vgaswitcheroo\n", mp); 547 537 goto fail; 548 538 } 549 539 550 540 priv->switch_file = debugfs_create_file("switch", 0644, 551 - priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops); 541 + priv->debugfs_root, NULL, 542 + &vga_switcheroo_debugfs_fops); 552 543 if (!priv->switch_file) { 553 - printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n"); 544 + pr_err("cannot create %s/vgaswitcheroo/switch\n", mp); 554 545 goto fail; 555 546 } 556 547 return 0; ··· 569 560 if (!vgasr_priv.delayed_switch_active) 570 561 goto err; 571 562 572 - printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id); 563 + pr_info("processing delayed switch to %d\n", 564 + vgasr_priv.delayed_client_id); 573 565 574 566 client = find_client_from_id(&vgasr_priv.clients, 575 567 vgasr_priv.delayed_client_id); ··· 579 569 580 570 ret = vga_switchto_stage2(client); 581 571 if (ret) 582 - printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); 572 + pr_err("delayed switching failed stage 2 %d\n", ret); 583 573 584 574 vgasr_priv.delayed_switch_active = false; 585 575 err = 0; ··· 589 579 } 590 580 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 591 581 592 - static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state) 582 + static void vga_switcheroo_power_switch(struct pci_dev *pdev, 583 + enum vga_switcheroo_state state) 593 584 { 594 585 struct vga_switcheroo_client *client; 595 586 ··· 609 598 610 599 /* force a PCI device to a certain state - mainly to turn off audio clients */ 611 600 612 - void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) 601 + void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, 602 + enum vga_switcheroo_state dynamic) 613 603 { 614 604 struct vga_switcheroo_client *client; 615 605 ··· 656 644 657 645 /* this version is for the case where the power switch is separate 658 646 to the device being powered down. */ 659 - int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) 647 + int vga_switcheroo_init_domain_pm_ops(struct device *dev, 648 + struct dev_pm_domain *domain) 660 649 { 661 650 /* copy over all the bus versions */ 662 651 if (dev->bus && dev->bus->pm) { ··· 688 675 /* we need to check if we have to switch back on the video 689 676 device so the audio device can come back */ 690 677 list_for_each_entry(client, &vgasr_priv.clients, list) { 691 - if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) { 678 + if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && 679 + client_is_vga(client)) { 692 680 found = client; 693 681 ret = pm_runtime_get_sync(&client->pdev->dev); 694 682 if (ret) { ··· 709 695 return ret; 710 696 } 711 697 712 - int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) 698 + int 699 + vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, 700 + struct dev_pm_domain *domain) 713 701 { 714 702 /* copy over all the bus versions */ 715 703 if (dev->bus && dev->bus->pm) { 716 704 domain->ops = *dev->bus->pm; 717 - domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio; 705 + domain->ops.runtime_resume = 706 + vga_switcheroo_runtime_resume_hdmi_audio; 718 707 719 708 dev->pm_domain = domain; 720 709 return 0;
+82 -60
drivers/gpu/vga/vgaarb.c
··· 29 29 * 30 30 */ 31 31 32 + #define pr_fmt(fmt) "vgaarb: " fmt 33 + 32 34 #include <linux/module.h> 33 35 #include <linux/kernel.h> 34 36 #include <linux/pci.h> ··· 136 134 { 137 135 return vga_default; 138 136 } 139 - 140 137 EXPORT_SYMBOL_GPL(vga_default_device); 141 138 142 139 void vga_set_default_device(struct pci_dev *pdev) ··· 299 298 300 299 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); 301 300 302 - if (!vgadev->bridge_has_one_vga) { 301 + if (!vgadev->bridge_has_one_vga) 303 302 vga_irq_set_state(vgadev, true); 304 - } 303 + 305 304 vgadev->owns |= wants; 306 305 lock_them: 307 306 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); ··· 453 452 } 454 453 EXPORT_SYMBOL(vga_put); 455 454 456 - /* Rules for using a bridge to control a VGA descendant decoding: 457 - if a bridge has only one VGA descendant then it can be used 458 - to control the VGA routing for that device. 459 - It should always use the bridge closest to the device to control it. 460 - If a bridge has a direct VGA descendant, but also have a sub-bridge 461 - VGA descendant then we cannot use that bridge to control the direct VGA descendant. 462 - So for every device we register, we need to iterate all its parent bridges 463 - so we can invalidate any devices using them properly. 464 - */ 455 + /* 456 + * Rules for using a bridge to control a VGA descendant decoding: if a bridge 457 + * has only one VGA descendant then it can be used to control the VGA routing 458 + * for that device. It should always use the bridge closest to the device to 459 + * control it. If a bridge has a direct VGA descendant, but also have a sub- 460 + * bridge VGA descendant then we cannot use that bridge to control the direct 461 + * VGA descendant. So for every device we register, we need to iterate all 462 + * its parent bridges so we can invalidate any devices using them properly. 463 + */ 465 464 static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) 466 465 { 467 466 struct vga_device *same_bridge_vgadev; ··· 485 484 486 485 /* see if the share a bridge with this device */ 487 486 if (new_bridge == bridge) { 488 - /* if their direct parent bridge is the same 489 - as any bridge of this device then it can't be used 490 - for that device */ 487 + /* 488 + * If their direct parent bridge is the same 489 + * as any bridge of this device then it can't 490 + * be used for that device. 491 + */ 491 492 same_bridge_vgadev->bridge_has_one_vga = false; 492 493 } 493 494 494 - /* now iterate the previous devices bridge hierarchy */ 495 - /* if the new devices parent bridge is in the other devices 496 - hierarchy then we can't use it to control this device */ 495 + /* 496 + * Now iterate the previous devices bridge hierarchy. 497 + * If the new devices parent bridge is in the other 498 + * devices hierarchy then we can't use it to control 499 + * this device 500 + */ 497 501 while (bus) { 498 502 bridge = bus->self; 499 - if (bridge) { 500 - if (bridge == vgadev->pdev->bus->self) 501 - vgadev->bridge_has_one_vga = false; 502 - } 503 + 504 + if (bridge && bridge == vgadev->pdev->bus->self) 505 + vgadev->bridge_has_one_vga = false; 506 + 503 507 bus = bus->parent; 504 508 } 505 509 } ··· 533 527 /* Allocate structure */ 534 528 vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL); 535 529 if (vgadev == NULL) { 536 - pr_err("vgaarb: failed to allocate pci device\n"); 537 - /* What to do on allocation failure ? For now, let's 538 - * just do nothing, I'm not sure there is anything saner 539 - * to be done 530 + pr_err("failed to allocate pci device\n"); 531 + /* 532 + * What to do on allocation failure ? For now, let's just do 533 + * nothing, I'm not sure there is anything saner to be done. 540 534 */ 541 535 return false; 542 536 } ··· 572 566 bridge = bus->self; 573 567 if (bridge) { 574 568 u16 l; 575 - pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 576 - &l); 569 + 570 + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l); 577 571 if (!(l & PCI_BRIDGE_CTL_VGA)) { 578 572 vgadev->owns = 0; 579 573 break; ··· 587 581 */ 588 582 if (vga_default == NULL && 589 583 ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) { 590 - pr_info("vgaarb: setting as boot device: PCI:%s\n", 591 - pci_name(pdev)); 584 + pr_info("setting as boot device: PCI:%s\n", pci_name(pdev)); 592 585 vga_set_default_device(pdev); 593 586 } 594 587 ··· 596 591 /* Add to the list */ 597 592 list_add(&vgadev->list, &vga_list); 598 593 vga_count++; 599 - pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n", 594 + pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n", 600 595 pci_name(pdev), 601 596 vga_iostate_to_str(vgadev->decodes), 602 597 vga_iostate_to_str(vgadev->owns), ··· 656 651 decodes_unlocked = vgadev->locks & decodes_removed; 657 652 vgadev->decodes = new_decodes; 658 653 659 - pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", 654 + pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", 660 655 pci_name(vgadev->pdev), 661 656 vga_iostate_to_str(old_decodes), 662 657 vga_iostate_to_str(vgadev->decodes), ··· 678 673 if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && 679 674 new_decodes & VGA_RSRC_LEGACY_MASK) 680 675 vga_decode_count++; 681 - pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 676 + pr_debug("decoding count now is: %d\n", vga_decode_count); 682 677 } 683 678 684 - static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 679 + static void __vga_set_legacy_decoding(struct pci_dev *pdev, 680 + unsigned int decodes, 681 + bool userspace) 685 682 { 686 683 struct vga_device *vgadev; 687 684 unsigned long flags; ··· 719 712 /* call with NULL to unregister */ 720 713 int vga_client_register(struct pci_dev *pdev, void *cookie, 721 714 void (*irq_set_state)(void *cookie, bool state), 722 - unsigned int (*set_vga_decode)(void *cookie, bool decode)) 715 + unsigned int (*set_vga_decode)(void *cookie, 716 + bool decode)) 723 717 { 724 718 int ret = -ENODEV; 725 719 struct vga_device *vgadev; ··· 840 832 return 1; 841 833 } 842 834 843 - static ssize_t vga_arb_read(struct file *file, char __user * buf, 835 + static ssize_t vga_arb_read(struct file *file, char __user *buf, 844 836 size_t count, loff_t *ppos) 845 837 { 846 838 struct vga_arb_private *priv = file->private_data; ··· 907 899 * TODO: To avoid parsing inside kernel and to improve the speed we may 908 900 * consider use ioctl here 909 901 */ 910 - static ssize_t vga_arb_write(struct file *file, const char __user * buf, 902 + static ssize_t vga_arb_write(struct file *file, const char __user *buf, 911 903 size_t count, loff_t *ppos) 912 904 { 913 905 struct vga_arb_private *priv = file->private_data; ··· 1083 1075 ret_val = -EPROTO; 1084 1076 goto done; 1085 1077 } 1086 - pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, 1078 + pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos, 1087 1079 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1088 1080 1089 1081 pdev = pci_get_domain_bus_and_slot(domain, bus, devfn); 1090 - pr_debug("vgaarb: pdev %p\n", pdev); 1082 + pr_debug("pdev %p\n", pdev); 1091 1083 if (!pdev) { 1092 - pr_err("vgaarb: invalid PCI address %x:%x:%x\n", 1084 + pr_err("invalid PCI address %x:%x:%x\n", 1093 1085 domain, bus, devfn); 1094 1086 ret_val = -ENODEV; 1095 1087 goto done; ··· 1097 1089 } 1098 1090 1099 1091 vgadev = vgadev_find(pdev); 1100 - pr_debug("vgaarb: vgadev %p\n", vgadev); 1092 + pr_debug("vgadev %p\n", vgadev); 1101 1093 if (vgadev == NULL) { 1102 - pr_err("vgaarb: this pci device is not a vga device\n"); 1103 - pci_dev_put(pdev); 1094 + if (pdev) { 1095 + pr_err("this pci device is not a vga device\n"); 1096 + pci_dev_put(pdev); 1097 + } 1098 + 1104 1099 ret_val = -ENODEV; 1105 1100 goto done; 1106 1101 } ··· 1120 1109 } 1121 1110 } 1122 1111 if (i == MAX_USER_CARDS) { 1123 - pr_err("vgaarb: maximum user cards (%d) number reached!\n", 1112 + pr_err("maximum user cards (%d) number reached!\n", 1124 1113 MAX_USER_CARDS); 1125 1114 pci_dev_put(pdev); 1126 1115 /* XXX: which value to return? */ ··· 1136 1125 } else if (strncmp(curr_pos, "decodes ", 8) == 0) { 1137 1126 curr_pos += 8; 1138 1127 remaining -= 8; 1139 - pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); 1128 + pr_debug("client 0x%p called 'decodes'\n", priv); 1140 1129 1141 1130 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 1142 1131 ret_val = -EPROTO; ··· 1161 1150 return ret_val; 1162 1151 } 1163 1152 1164 - static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) 1153 + static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait) 1165 1154 { 1166 1155 struct vga_arb_private *priv = file->private_data; 1167 1156 ··· 1257 1246 else 1258 1247 new_state = true; 1259 1248 if (vgadev->set_vga_decode) { 1260 - new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state); 1249 + new_decodes = vgadev->set_vga_decode(vgadev->cookie, 1250 + new_state); 1261 1251 vga_update_device_decodes(vgadev, new_decodes); 1262 1252 } 1263 1253 } ··· 1312 1300 1313 1301 rc = misc_register(&vga_arb_device); 1314 1302 if (rc < 0) 1315 - pr_err("vgaarb: error %d registering device\n", rc); 1303 + pr_err("error %d registering device\n", rc); 1316 1304 1317 1305 bus_register_notifier(&pci_bus_type, &pci_notifier); 1318 1306 ··· 1324 1312 PCI_ANY_ID, pdev)) != NULL) 1325 1313 vga_arbiter_add_pci_device(pdev); 1326 1314 1327 - pr_info("vgaarb: loaded\n"); 1315 + pr_info("loaded\n"); 1328 1316 1329 1317 list_for_each_entry(vgadev, &vga_list, list) { 1330 1318 #if defined(CONFIG_X86) || defined(CONFIG_IA64) 1331 - /* Override I/O based detection done by vga_arbiter_add_pci_device() 1332 - * as it may take the wrong device (e.g. on Apple system under EFI). 1319 + /* 1320 + * Override vga_arbiter_add_pci_device()'s I/O based detection 1321 + * as it may take the wrong device (e.g. on Apple system under 1322 + * EFI). 1333 1323 * 1334 - * Select the device owning the boot framebuffer if there is one. 1324 + * Select the device owning the boot framebuffer if there is 1325 + * one. 1335 1326 */ 1336 - resource_size_t start, end; 1327 + resource_size_t start, end, limit; 1328 + unsigned long flags; 1337 1329 int i; 1330 + 1331 + limit = screen_info.lfb_base + screen_info.lfb_size; 1338 1332 1339 1333 /* Does firmware framebuffer belong to us? */ 1340 1334 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1341 - if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM)) 1335 + flags = pci_resource_flags(vgadev->pdev, i); 1336 + 1337 + if ((flags & IORESOURCE_MEM) == 0) 1342 1338 continue; 1343 1339 1344 1340 start = pci_resource_start(vgadev->pdev, i); ··· 1355 1335 if (!start || !end) 1356 1336 continue; 1357 1337 1358 - if (screen_info.lfb_base < start || 1359 - (screen_info.lfb_base + screen_info.lfb_size) >= end) 1338 + if (screen_info.lfb_base < start || limit >= end) 1360 1339 continue; 1340 + 1361 1341 if (!vga_default_device()) 1362 - pr_info("vgaarb: setting as boot device: PCI:%s\n", 1342 + pr_info("setting as boot device: PCI:%s\n", 1363 1343 pci_name(vgadev->pdev)); 1364 1344 else if (vgadev->pdev != vga_default_device()) 1365 - pr_info("vgaarb: overriding boot device: PCI:%s\n", 1345 + pr_info("overriding boot device: PCI:%s\n", 1366 1346 pci_name(vgadev->pdev)); 1367 1347 vga_set_default_device(vgadev->pdev); 1368 1348 } 1369 1349 #endif 1370 1350 if (vgadev->bridge_has_one_vga) 1371 - pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev)); 1351 + pr_info("bridge control possible %s\n", 1352 + pci_name(vgadev->pdev)); 1372 1353 else 1373 - pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev)); 1354 + pr_info("no bridge control possible %s\n", 1355 + pci_name(vgadev->pdev)); 1374 1356 } 1375 1357 return rc; 1376 1358 }
-2
drivers/video/Kconfig
··· 22 22 source "drivers/gpu/host1x/Kconfig" 23 23 source "drivers/gpu/ipu-v3/Kconfig" 24 24 25 - menu "Direct Rendering Manager" 26 25 source "drivers/gpu/drm/Kconfig" 27 - endmenu 28 26 29 27 menu "Frame buffer Devices" 30 28 source "drivers/video/fbdev/Kconfig"
+16 -16
include/drm/drmP.h
··· 681 681 682 682 struct drm_pending_vblank_event { 683 683 struct drm_pending_event base; 684 - int pipe; 684 + unsigned int pipe; 685 685 struct drm_event_vblank event; 686 686 }; 687 687 ··· 700 700 /* for wraparound handling */ 701 701 u32 last_wait; /* Last vblank seqno waited per CRTC */ 702 702 unsigned int inmodeset; /* Display driver is setting mode */ 703 - int crtc; /* crtc index */ 703 + unsigned int pipe; /* crtc index */ 704 704 bool enabled; /* so we don't call enable more than 705 705 once per disable */ 706 706 }; ··· 920 920 extern int drm_irq_install(struct drm_device *dev, int irq); 921 921 extern int drm_irq_uninstall(struct drm_device *dev); 922 922 923 - extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 923 + extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); 924 924 extern int drm_wait_vblank(struct drm_device *dev, void *data, 925 925 struct drm_file *filp); 926 - extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 926 + extern u32 drm_vblank_count(struct drm_device *dev, int pipe); 927 927 extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 928 - extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 928 + extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 929 929 struct timeval *vblanktime); 930 - extern void drm_send_vblank_event(struct drm_device *dev, int crtc, 931 - struct drm_pending_vblank_event *e); 930 + extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, 931 + struct drm_pending_vblank_event *e); 932 932 extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 933 933 struct drm_pending_vblank_event *e); 934 - extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 934 + extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); 935 935 extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 936 - extern int drm_vblank_get(struct drm_device *dev, int crtc); 937 - extern void drm_vblank_put(struct drm_device *dev, int crtc); 936 + extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); 937 + extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe); 938 938 extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 939 939 extern void drm_crtc_vblank_put(struct drm_crtc *crtc); 940 - extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); 940 + extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); 941 941 extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); 942 - extern void drm_vblank_off(struct drm_device *dev, int crtc); 943 - extern void drm_vblank_on(struct drm_device *dev, int crtc); 942 + extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe); 943 + extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe); 944 944 extern void drm_crtc_vblank_off(struct drm_crtc *crtc); 945 945 extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 946 946 extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 947 947 extern void drm_vblank_cleanup(struct drm_device *dev); 948 948 949 949 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 950 - int crtc, int *max_error, 950 + unsigned int pipe, int *max_error, 951 951 struct timeval *vblank_time, 952 952 unsigned flags, 953 953 const struct drm_crtc *refcrtc, ··· 968 968 } 969 969 970 970 /* Modesetting support */ 971 - extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 972 - extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 971 + extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe); 972 + extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe); 973 973 974 974 /* Stub support (drm_stub.h) */ 975 975 extern struct drm_master *drm_master_get(struct drm_master *master);
+3 -3
include/drm/drm_crtc.h
··· 865 865 866 866 uint32_t possible_crtcs; 867 867 uint32_t *format_types; 868 - uint32_t format_count; 868 + unsigned int format_count; 869 869 bool format_default; 870 870 871 871 struct drm_crtc *crtc; ··· 1270 1270 unsigned long possible_crtcs, 1271 1271 const struct drm_plane_funcs *funcs, 1272 1272 const uint32_t *formats, 1273 - uint32_t format_count, 1273 + unsigned int format_count, 1274 1274 enum drm_plane_type type); 1275 1275 extern int drm_plane_init(struct drm_device *dev, 1276 1276 struct drm_plane *plane, 1277 1277 unsigned long possible_crtcs, 1278 1278 const struct drm_plane_funcs *funcs, 1279 - const uint32_t *formats, uint32_t format_count, 1279 + const uint32_t *formats, unsigned int format_count, 1280 1280 bool is_primary); 1281 1281 extern void drm_plane_cleanup(struct drm_plane *plane); 1282 1282 extern unsigned int drm_plane_index(struct drm_plane *plane);
+212
include/drm/drm_fb_helper.h
··· 122 122 bool delayed_hotplug; 123 123 }; 124 124 125 + #ifdef CONFIG_DRM_FBDEV_EMULATION 125 126 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, 126 127 const struct drm_fb_helper_funcs *funcs); 127 128 int drm_fb_helper_init(struct drm_device *dev, ··· 137 136 struct fb_info *info); 138 137 139 138 bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); 139 + 140 + struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); 141 + void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); 142 + void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper); 140 143 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 141 144 uint32_t fb_width, uint32_t fb_height); 142 145 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 143 146 uint32_t depth); 147 + 148 + void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); 149 + 150 + ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 151 + size_t count, loff_t *ppos); 152 + ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 153 + size_t count, loff_t *ppos); 154 + 155 + void drm_fb_helper_sys_fillrect(struct fb_info *info, 156 + const struct fb_fillrect *rect); 157 + void drm_fb_helper_sys_copyarea(struct fb_info *info, 158 + const struct fb_copyarea *area); 159 + void drm_fb_helper_sys_imageblit(struct fb_info *info, 160 + const struct fb_image *image); 161 + 162 + void drm_fb_helper_cfb_fillrect(struct fb_info *info, 163 + const struct fb_fillrect *rect); 164 + void drm_fb_helper_cfb_copyarea(struct fb_info *info, 165 + const struct fb_copyarea *area); 166 + void drm_fb_helper_cfb_imageblit(struct fb_info *info, 167 + const struct fb_image *image); 168 + 169 + void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state); 144 170 145 171 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 146 172 ··· 186 158 int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); 187 159 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 188 160 struct drm_connector *connector); 161 + #else 162 + static inline void drm_fb_helper_prepare(struct drm_device *dev, 163 + struct drm_fb_helper *helper, 164 + const struct drm_fb_helper_funcs *funcs) 165 + { 166 + } 167 + 168 + static inline int drm_fb_helper_init(struct drm_device *dev, 169 + struct drm_fb_helper *helper, int crtc_count, 170 + int max_conn) 171 + { 172 + return 0; 173 + } 174 + 175 + static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) 176 + { 177 + } 178 + 179 + static inline int drm_fb_helper_blank(int blank, struct fb_info *info) 180 + { 181 + return 0; 182 + } 183 + 184 + static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, 185 + struct fb_info *info) 186 + { 187 + return 0; 188 + } 189 + 190 + static inline int drm_fb_helper_set_par(struct fb_info *info) 191 + { 192 + return 0; 193 + } 194 + 195 + static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 196 + struct fb_info *info) 197 + { 198 + return 0; 199 + } 200 + 201 + static inline bool 202 + drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 203 + { 204 + return true; 205 + } 206 + 207 + static inline struct fb_info * 208 + drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) 209 + { 210 + return NULL; 211 + } 212 + 213 + static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) 214 + { 215 + } 216 + static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper) 217 + { 218 + } 219 + 220 + static inline void drm_fb_helper_fill_var(struct fb_info *info, 221 + struct drm_fb_helper *fb_helper, 222 + uint32_t fb_width, uint32_t fb_height) 223 + { 224 + } 225 + 226 + static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 227 + uint32_t depth) 228 + { 229 + } 230 + 231 + static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, 232 + struct fb_info *info) 233 + { 234 + return 0; 235 + } 236 + 237 + static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) 238 + { 239 + } 240 + 241 + static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, 242 + char __user *buf, size_t count, 243 + loff_t *ppos) 244 + { 245 + return -ENODEV; 246 + } 247 + 248 + static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, 249 + const char __user *buf, 250 + size_t count, loff_t *ppos) 251 + { 252 + return -ENODEV; 253 + } 254 + 255 + static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, 256 + const struct fb_fillrect *rect) 257 + { 258 + } 259 + 260 + static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, 261 + const struct fb_copyarea *area) 262 + { 263 + } 264 + 265 + static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, 266 + const struct fb_image *image) 267 + { 268 + } 269 + 270 + static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, 271 + const struct fb_fillrect *rect) 272 + { 273 + } 274 + 275 + static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, 276 + const struct fb_copyarea *area) 277 + { 278 + } 279 + 280 + static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, 281 + const struct fb_image *image) 282 + { 283 + } 284 + 285 + static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, 286 + int state) 287 + { 288 + } 289 + 290 + static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 291 + { 292 + return 0; 293 + } 294 + 295 + static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, 296 + int bpp_sel) 297 + { 298 + return 0; 299 + } 300 + 301 + static inline int 302 + drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) 303 + { 304 + return 0; 305 + } 306 + 307 + static inline int drm_fb_helper_debug_enter(struct fb_info *info) 308 + { 309 + return 0; 310 + } 311 + 312 + static inline int drm_fb_helper_debug_leave(struct fb_info *info) 313 + { 314 + return 0; 315 + } 316 + 317 + static inline struct drm_display_mode * 318 + drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, 319 + int width, int height) 320 + { 321 + return NULL; 322 + } 323 + 324 + static inline struct drm_display_mode * 325 + drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 326 + int width, int height) 327 + { 328 + return NULL; 329 + } 330 + 331 + static inline int 332 + drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, 333 + struct drm_connector *connector) 334 + { 335 + return 0; 336 + } 337 + 338 + static inline int 339 + drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 340 + struct drm_connector *connector) 341 + { 342 + return 0; 343 + } 344 + #endif 189 345 #endif
-1
include/drm/drm_modeset_lock.h
··· 130 130 struct drm_plane; 131 131 132 132 void drm_modeset_lock_all(struct drm_device *dev); 133 - int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); 134 133 void drm_modeset_unlock_all(struct drm_device *dev); 135 134 void drm_modeset_lock_crtc(struct drm_crtc *crtc, 136 135 struct drm_plane *plane);
+22 -23
include/drm/drm_plane_helper.h
··· 43 43 * planes. 44 44 */ 45 45 46 - extern int drm_crtc_init(struct drm_device *dev, 47 - struct drm_crtc *crtc, 48 - const struct drm_crtc_funcs *funcs); 46 + int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 47 + const struct drm_crtc_funcs *funcs); 49 48 50 49 /** 51 50 * drm_plane_helper_funcs - helper operations for CRTCs ··· 78 79 plane->helper_private = funcs; 79 80 } 80 81 81 - extern int drm_plane_helper_check_update(struct drm_plane *plane, 82 - struct drm_crtc *crtc, 83 - struct drm_framebuffer *fb, 84 - struct drm_rect *src, 85 - struct drm_rect *dest, 86 - const struct drm_rect *clip, 87 - int min_scale, 88 - int max_scale, 89 - bool can_position, 90 - bool can_update_disabled, 91 - bool *visible); 92 - extern int drm_primary_helper_update(struct drm_plane *plane, 93 - struct drm_crtc *crtc, 94 - struct drm_framebuffer *fb, 95 - int crtc_x, int crtc_y, 96 - unsigned int crtc_w, unsigned int crtc_h, 97 - uint32_t src_x, uint32_t src_y, 98 - uint32_t src_w, uint32_t src_h); 99 - extern int drm_primary_helper_disable(struct drm_plane *plane); 100 - extern void drm_primary_helper_destroy(struct drm_plane *plane); 82 + int drm_plane_helper_check_update(struct drm_plane *plane, 83 + struct drm_crtc *crtc, 84 + struct drm_framebuffer *fb, 85 + struct drm_rect *src, 86 + struct drm_rect *dest, 87 + const struct drm_rect *clip, 88 + int min_scale, 89 + int max_scale, 90 + bool can_position, 91 + bool can_update_disabled, 92 + bool *visible); 93 + int drm_primary_helper_update(struct drm_plane *plane, 94 + struct drm_crtc *crtc, 95 + struct drm_framebuffer *fb, 96 + int crtc_x, int crtc_y, 97 + unsigned int crtc_w, unsigned int crtc_h, 98 + uint32_t src_x, uint32_t src_y, 99 + uint32_t src_w, uint32_t src_h); 100 + int drm_primary_helper_disable(struct drm_plane *plane); 101 + void drm_primary_helper_destroy(struct drm_plane *plane); 101 102 extern const struct drm_plane_funcs drm_primary_helper_funcs; 102 103 103 104 int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,