Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-init-cleanup' of git://people.freedesktop.org/~danvet/drm into drm-next

Next pull request, this time more of the drm de-midlayering work. The big
thing is that his patch series here removes everything from drm_bus except
the set_busid callback. Thierry has a few more patches on top of this to
make that one optional to.

With that we can ditch all the non-pci drm_bus implementations, which
Thierry has already done for the fake tegra host1x drm_bus.

Reviewed by Thierry, Laurent and David and now also survived some testing
on my intel boxes to make sure the irq fumble is fixed correctly ;-) The
last minute rebase was just to add the r-b tags from Thierry for the 2
patches I've redone.

* 'drm-init-cleanup' of git://people.freedesktop.org/~danvet/drm:
drm/<drivers>: don't set driver->dev_priv_size to 0
drm: Remove dev->kdriver
drm: remove drm_bus->get_name
drm: rip out dev->devname
drm: inline drm_pci_set_unique
drm: remove bus->get_irq implementations
drm: pass the irq explicitly to drm_irq_install
drm/irq: Look up the pci irq directly in the drm_control ioctl
drm/irq: track the irq installed in drm_irq_install in dev->irq
drm: rename dev->count_lock to dev->buf_lock
drm: Rip out totally bogus vga_switcheroo->can_switch locking
drm: kill drm_bus->bus_type
drm: remove drm_dev_to_irq from drivers
drm/irq: remove cargo-culted locking from irq_install/uninstall
drm/irq: drm_control is a legacy ioctl, so pci devices only
drm/pci: fold in irq_by_busid support
drm/irq: simplify irq checks in drm_wait_vblank

+158 -267
+1 -9
Documentation/DocBook/drm.tmpl
··· 342 342 <sect4> 343 343 <title>Managed IRQ Registration</title> 344 344 <para> 345 - Both the <function>drm_irq_install</function> and 346 - <function>drm_irq_uninstall</function> functions get the device IRQ by 347 - calling <function>drm_dev_to_irq</function>. This inline function will 348 - call a bus-specific operation to retrieve the IRQ number. For platform 349 - devices, <function>platform_get_irq</function>(..., 0) is used to 350 - retrieve the IRQ number. 351 - </para> 352 - <para> 353 345 <function>drm_irq_install</function> starts by calling the 354 346 <methodname>irq_preinstall</methodname> driver operation. The operation 355 347 is optional and must make sure that the interrupt will not get fired by 356 348 clearing all pending interrupt flags or disabling the interrupt. 357 349 </para> 358 350 <para> 359 - The IRQ will then be requested by a call to 351 + The passed-in IRQ will then be requested by a call to 360 352 <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver 361 353 feature flag is set, a shared (IRQF_SHARED) IRQ handler will be 362 354 requested.
+1 -1
drivers/gpu/drm/armada/armada_drv.c
··· 173 173 if (ret) 174 174 goto err_kms; 175 175 176 - ret = drm_irq_install(dev); 176 + ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); 177 177 if (ret) 178 178 goto err_kms; 179 179
-1
drivers/gpu/drm/ast/ast_drv.c
··· 198 198 199 199 static struct drm_driver driver = { 200 200 .driver_features = DRIVER_MODESET | DRIVER_GEM, 201 - .dev_priv_size = 0, 202 201 203 202 .load = ast_driver_load, 204 203 .unload = ast_driver_unload,
+16 -16
drivers/gpu/drm/drm_bufs.c
··· 656 656 DRM_DEBUG("zone invalid\n"); 657 657 return -EINVAL; 658 658 } 659 - spin_lock(&dev->count_lock); 659 + spin_lock(&dev->buf_lock); 660 660 if (dev->buf_use) { 661 - spin_unlock(&dev->count_lock); 661 + spin_unlock(&dev->buf_lock); 662 662 return -EBUSY; 663 663 } 664 664 atomic_inc(&dev->buf_alloc); 665 - spin_unlock(&dev->count_lock); 665 + spin_unlock(&dev->buf_lock); 666 666 667 667 mutex_lock(&dev->struct_mutex); 668 668 entry = &dma->bufs[order]; ··· 805 805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 806 806 total = PAGE_SIZE << page_order; 807 807 808 - spin_lock(&dev->count_lock); 808 + spin_lock(&dev->buf_lock); 809 809 if (dev->buf_use) { 810 - spin_unlock(&dev->count_lock); 810 + spin_unlock(&dev->buf_lock); 811 811 return -EBUSY; 812 812 } 813 813 atomic_inc(&dev->buf_alloc); 814 - spin_unlock(&dev->count_lock); 814 + spin_unlock(&dev->buf_lock); 815 815 816 816 mutex_lock(&dev->struct_mutex); 817 817 entry = &dma->bufs[order]; ··· 1015 1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1016 1016 return -EINVAL; 1017 1017 1018 - spin_lock(&dev->count_lock); 1018 + spin_lock(&dev->buf_lock); 1019 1019 if (dev->buf_use) { 1020 - spin_unlock(&dev->count_lock); 1020 + spin_unlock(&dev->buf_lock); 1021 1021 return -EBUSY; 1022 1022 } 1023 1023 atomic_inc(&dev->buf_alloc); 1024 - spin_unlock(&dev->count_lock); 1024 + spin_unlock(&dev->buf_lock); 1025 1025 1026 1026 mutex_lock(&dev->struct_mutex); 1027 1027 entry = &dma->bufs[order]; ··· 1175 1175 * \param arg pointer to a drm_buf_info structure. 1176 1176 * \return zero on success or a negative number on failure. 1177 1177 * 1178 - * Increments drm_device::buf_use while holding the drm_device::count_lock 1178 + * Increments drm_device::buf_use while holding the drm_device::buf_lock 1179 1179 * lock, preventing of allocating more buffers after this call. Information 1180 1180 * about each requested buffer is then copied into user space. 1181 1181 */ ··· 1196 1196 if (!dma) 1197 1197 return -EINVAL; 1198 1198 1199 - spin_lock(&dev->count_lock); 1199 + spin_lock(&dev->buf_lock); 1200 1200 if (atomic_read(&dev->buf_alloc)) { 1201 - spin_unlock(&dev->count_lock); 1201 + spin_unlock(&dev->buf_lock); 1202 1202 return -EBUSY; 1203 1203 } 1204 1204 ++dev->buf_use; /* Can't allocate more after this call */ 1205 - spin_unlock(&dev->count_lock); 1205 + spin_unlock(&dev->buf_lock); 1206 1206 1207 1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1208 1208 if (dma->bufs[i].buf_count) ··· 1381 1381 if (!dma) 1382 1382 return -EINVAL; 1383 1383 1384 - spin_lock(&dev->count_lock); 1384 + spin_lock(&dev->buf_lock); 1385 1385 if (atomic_read(&dev->buf_alloc)) { 1386 - spin_unlock(&dev->count_lock); 1386 + spin_unlock(&dev->buf_lock); 1387 1387 return -EBUSY; 1388 1388 } 1389 1389 dev->buf_use++; /* Can't allocate more after this call */ 1390 - spin_unlock(&dev->count_lock); 1390 + spin_unlock(&dev->buf_lock); 1391 1391 1392 1392 if (request->count >= dma->buf_count) { 1393 1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
+2 -4
drivers/gpu/drm/drm_info.c
··· 47 47 struct drm_minor *minor = node->minor; 48 48 struct drm_device *dev = minor->dev; 49 49 struct drm_master *master = minor->master; 50 - const char *bus_name; 51 50 if (!master) 52 51 return 0; 53 52 54 - bus_name = dev->driver->bus->get_name(dev); 55 53 if (master->unique) { 56 54 seq_printf(m, "%s %s %s\n", 57 - bus_name, 55 + dev->driver->name, 58 56 dev_name(dev->dev), master->unique); 59 57 } else { 60 58 seq_printf(m, "%s %s\n", 61 - bus_name, dev_name(dev->dev)); 59 + dev->driver->name, dev_name(dev->dev)); 62 60 } 63 61 return 0; 64 62 }
+7 -6
drivers/gpu/drm/drm_ioctl.c
··· 72 72 drm_unset_busid(struct drm_device *dev, 73 73 struct drm_master *master) 74 74 { 75 - kfree(dev->devname); 76 - dev->devname = NULL; 77 - 78 75 kfree(master->unique); 79 76 master->unique = NULL; 80 77 master->unique_len = 0; ··· 90 93 * Copies the bus id from userspace into drm_device::unique, and verifies that 91 94 * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated 92 95 * in interface version 1.1 and will return EBUSY when setversion has requested 93 - * version 1.1 or greater. 96 + * version 1.1 or greater. Also note that KMS is all version 1.1 and later and 97 + * UMS was only ever supported on pci devices. 94 98 */ 95 99 int drm_setunique(struct drm_device *dev, void *data, 96 100 struct drm_file *file_priv) ··· 106 108 if (!u->unique_len || u->unique_len > 1024) 107 109 return -EINVAL; 108 110 109 - if (!dev->driver->bus->set_unique) 111 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 112 + return 0; 113 + 114 + if (WARN_ON(!dev->pdev)) 110 115 return -EINVAL; 111 116 112 - ret = dev->driver->bus->set_unique(dev, master, u); 117 + ret = drm_pci_set_unique(dev, master, u); 113 118 if (ret) 114 119 goto err; 115 120
+35 -70
drivers/gpu/drm/drm_irq.c
··· 56 56 */ 57 57 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 58 58 59 - /** 60 - * Get interrupt from bus id. 61 - * 62 - * \param inode device inode. 63 - * \param file_priv DRM file private. 64 - * \param cmd command. 65 - * \param arg user argument, pointing to a drm_irq_busid structure. 66 - * \return zero on success or a negative number on failure. 67 - * 68 - * Finds the PCI device with the specified bus id and gets its IRQ number. 69 - * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 70 - * to that of the device that this DRM instance attached to. 71 - */ 72 - int drm_irq_by_busid(struct drm_device *dev, void *data, 73 - struct drm_file *file_priv) 74 - { 75 - struct drm_irq_busid *p = data; 76 - 77 - if (!dev->driver->bus->irq_by_busid) 78 - return -EINVAL; 79 - 80 - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 81 - return -EINVAL; 82 - 83 - return dev->driver->bus->irq_by_busid(dev, p); 84 - } 85 - 86 59 /* 87 60 * Clear vblank timestamp buffer for a crtc. 88 61 */ ··· 242 269 * \c irq_preinstall() and \c irq_postinstall() functions 243 270 * before and after the installation. 244 271 */ 245 - int drm_irq_install(struct drm_device *dev) 272 + int drm_irq_install(struct drm_device *dev, int irq) 246 273 { 247 274 int ret; 248 275 unsigned long sh_flags = 0; 249 - char *irqname; 250 276 251 277 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 252 278 return -EINVAL; 253 279 254 - if (drm_dev_to_irq(dev) == 0) 280 + if (irq == 0) 255 281 return -EINVAL; 256 - 257 - mutex_lock(&dev->struct_mutex); 258 282 259 283 /* Driver must have been initialized */ 260 - if (!dev->dev_private) { 261 - mutex_unlock(&dev->struct_mutex); 284 + if (!dev->dev_private) 262 285 return -EINVAL; 263 - } 264 286 265 - if (dev->irq_enabled) { 266 - mutex_unlock(&dev->struct_mutex); 287 + if (dev->irq_enabled) 267 288 return -EBUSY; 268 - } 269 289 dev->irq_enabled = true; 270 - mutex_unlock(&dev->struct_mutex); 271 290 272 - DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 291 + DRM_DEBUG("irq=%d\n", irq); 273 292 274 293 /* Before installing handler */ 275 294 if (dev->driver->irq_preinstall) ··· 271 306 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 272 307 sh_flags = IRQF_SHARED; 273 308 274 - if (dev->devname) 275 - irqname = dev->devname; 276 - else 277 - irqname = dev->driver->name; 278 - 279 - ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, 280 - sh_flags, irqname, dev); 309 + ret = request_irq(irq, dev->driver->irq_handler, 310 + sh_flags, dev->driver->name, dev); 281 311 282 312 if (ret < 0) { 283 - mutex_lock(&dev->struct_mutex); 284 313 dev->irq_enabled = false; 285 - mutex_unlock(&dev->struct_mutex); 286 314 return ret; 287 315 } 288 316 ··· 287 329 ret = dev->driver->irq_postinstall(dev); 288 330 289 331 if (ret < 0) { 290 - mutex_lock(&dev->struct_mutex); 291 332 dev->irq_enabled = false; 292 - mutex_unlock(&dev->struct_mutex); 293 333 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 294 334 vga_client_register(dev->pdev, NULL, NULL, NULL); 295 - free_irq(drm_dev_to_irq(dev), dev); 335 + free_irq(irq, dev); 336 + } else { 337 + dev->irq = irq; 296 338 } 297 339 298 340 return ret; ··· 315 357 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 316 358 return -EINVAL; 317 359 318 - mutex_lock(&dev->struct_mutex); 319 360 irq_enabled = dev->irq_enabled; 320 361 dev->irq_enabled = false; 321 - mutex_unlock(&dev->struct_mutex); 322 362 323 363 /* 324 364 * Wake up any waiters so they don't hang. ··· 335 379 if (!irq_enabled) 336 380 return -EINVAL; 337 381 338 - DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 382 + DRM_DEBUG("irq=%d\n", dev->irq); 339 383 340 384 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 341 385 vga_client_register(dev->pdev, NULL, NULL, NULL); ··· 343 387 if (dev->driver->irq_uninstall) 344 388 dev->driver->irq_uninstall(dev); 345 389 346 - free_irq(drm_dev_to_irq(dev), dev); 390 + free_irq(dev->irq, dev); 347 391 348 392 return 0; 349 393 } ··· 364 408 struct drm_file *file_priv) 365 409 { 366 410 struct drm_control *ctl = data; 411 + int ret = 0, irq; 367 412 368 413 /* if we haven't irq we fallback for compatibility reasons - 369 414 * this used to be a separate function in drm_dma.h 370 415 */ 371 416 417 + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 418 + return 0; 419 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 420 + return 0; 421 + /* UMS was only ever support on pci devices. */ 422 + if (WARN_ON(!dev->pdev)) 423 + return -EINVAL; 372 424 373 425 switch (ctl->func) { 374 426 case DRM_INST_HANDLER: 375 - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 376 - return 0; 377 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 378 - return 0; 427 + irq = dev->pdev->irq; 428 + 379 429 if (dev->if_version < DRM_IF_VERSION(1, 2) && 380 - ctl->irq != drm_dev_to_irq(dev)) 430 + ctl->irq != irq) 381 431 return -EINVAL; 382 - return drm_irq_install(dev); 432 + mutex_lock(&dev->struct_mutex); 433 + ret = drm_irq_install(dev, irq); 434 + mutex_unlock(&dev->struct_mutex); 435 + 436 + return ret; 383 437 case DRM_UNINST_HANDLER: 384 - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 385 - return 0; 386 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 387 - return 0; 388 - return drm_irq_uninstall(dev); 438 + mutex_lock(&dev->struct_mutex); 439 + ret = drm_irq_uninstall(dev); 440 + mutex_unlock(&dev->struct_mutex); 441 + 442 + return ret; 389 443 default: 390 444 return -EINVAL; 391 445 } ··· 1126 1160 int ret; 1127 1161 unsigned int flags, seq, crtc, high_crtc; 1128 1162 1129 - if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 1130 - if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) 1131 - return -EINVAL; 1163 + if (!dev->irq_enabled) 1164 + return -EINVAL; 1132 1165 1133 1166 if (vblwait->request.type & _DRM_VBLANK_SIGNAL) 1134 1167 return -EINVAL;
+47 -46
drivers/gpu/drm/drm_pci.c
··· 137 137 return pci_domain_nr(dev->pdev->bus); 138 138 } 139 139 140 - static int drm_pci_get_irq(struct drm_device *dev) 141 - { 142 - return dev->pdev->irq; 143 - } 144 - 145 - static const char *drm_pci_get_name(struct drm_device *dev) 146 - { 147 - struct pci_driver *pdriver = dev->driver->kdriver.pci; 148 - return pdriver->name; 149 - } 150 - 151 140 static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 152 141 { 153 142 int len, ret; 154 - struct pci_driver *pdriver = dev->driver->kdriver.pci; 155 143 master->unique_len = 40; 156 144 master->unique_size = master->unique_len; 157 145 master->unique = kmalloc(master->unique_size, GFP_KERNEL); ··· 161 173 } else 162 174 master->unique_len = len; 163 175 164 - dev->devname = 165 - kmalloc(strlen(pdriver->name) + 166 - master->unique_len + 2, GFP_KERNEL); 167 - 168 - if (dev->devname == NULL) { 169 - ret = -ENOMEM; 170 - goto err; 171 - } 172 - 173 - sprintf(dev->devname, "%s@%s", pdriver->name, 174 - master->unique); 175 - 176 176 return 0; 177 177 err: 178 178 return ret; 179 179 } 180 180 181 - static int drm_pci_set_unique(struct drm_device *dev, 182 - struct drm_master *master, 183 - struct drm_unique *u) 181 + int drm_pci_set_unique(struct drm_device *dev, 182 + struct drm_master *master, 183 + struct drm_unique *u) 184 184 { 185 185 int domain, bus, slot, func, ret; 186 - const char *bus_name; 187 186 188 187 master->unique_len = u->unique_len; 189 188 master->unique_size = u->unique_len + 1; ··· 186 211 } 187 212 188 213 master->unique[master->unique_len] = '\0'; 189 - 190 - bus_name = dev->driver->bus->get_name(dev); 191 - dev->devname = kmalloc(strlen(bus_name) + 192 - strlen(master->unique) + 2, GFP_KERNEL); 193 - if (!dev->devname) { 194 - ret = -ENOMEM; 195 - goto err; 196 - } 197 - 198 - sprintf(dev->devname, "%s@%s", bus_name, 199 - master->unique); 200 214 201 215 /* Return error if the busid submitted doesn't match the device's actual 202 216 * busid. ··· 211 247 return ret; 212 248 } 213 249 214 - 215 250 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 216 251 { 217 252 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || ··· 223 260 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 224 261 p->irq); 225 262 return 0; 263 + } 264 + 265 + /** 266 + * Get interrupt from bus id. 267 + * 268 + * \param inode device inode. 269 + * \param file_priv DRM file private. 270 + * \param cmd command. 271 + * \param arg user argument, pointing to a drm_irq_busid structure. 272 + * \return zero on success or a negative number on failure. 273 + * 274 + * Finds the PCI device with the specified bus id and gets its IRQ number. 275 + * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 276 + * to that of the device that this DRM instance attached to. 277 + */ 278 + int drm_irq_by_busid(struct drm_device *dev, void *data, 279 + struct drm_file *file_priv) 280 + { 281 + struct drm_irq_busid *p = data; 282 + 283 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 284 + return -EINVAL; 285 + 286 + /* UMS was only ever support on PCI devices. */ 287 + if (WARN_ON(!dev->pdev)) 288 + return -EINVAL; 289 + 290 + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 291 + return -EINVAL; 292 + 293 + return drm_pci_irq_by_busid(dev, p); 226 294 } 227 295 228 296 static void drm_pci_agp_init(struct drm_device *dev) ··· 281 287 } 282 288 283 289 static struct drm_bus drm_pci_bus = { 284 - .bus_type = DRIVER_BUS_PCI, 285 - .get_irq = drm_pci_get_irq, 286 - .get_name = drm_pci_get_name, 287 290 .set_busid = drm_pci_set_busid, 288 - .set_unique = drm_pci_set_unique, 289 - .irq_by_busid = drm_pci_irq_by_busid, 290 291 }; 291 292 292 293 /** ··· 364 375 365 376 DRM_DEBUG("\n"); 366 377 367 - driver->kdriver.pci = pdriver; 368 378 driver->bus = &drm_pci_bus; 369 379 370 380 if (driver->driver_features & DRIVER_MODESET) ··· 441 453 } 442 454 443 455 void drm_pci_agp_destroy(struct drm_device *dev) {} 456 + 457 + int drm_irq_by_busid(struct drm_device *dev, void *data, 458 + struct drm_file *file_priv) 459 + { 460 + return -EINVAL; 461 + } 462 + 463 + int drm_pci_set_unique(struct drm_device *dev, 464 + struct drm_master *master, 465 + struct drm_unique *u) 466 + { 467 + return -EINVAL; 468 + } 444 469 #endif 445 470 446 471 EXPORT_SYMBOL(drm_pci_init);
-25
drivers/gpu/drm/drm_platform.c
··· 68 68 return ret; 69 69 } 70 70 71 - static int drm_platform_get_irq(struct drm_device *dev) 72 - { 73 - return platform_get_irq(dev->platformdev, 0); 74 - } 75 - 76 - static const char *drm_platform_get_name(struct drm_device *dev) 77 - { 78 - return dev->platformdev->name; 79 - } 80 - 81 71 static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) 82 72 { 83 73 int len, ret, id; ··· 96 106 goto err; 97 107 } 98 108 99 - dev->devname = 100 - kmalloc(strlen(dev->platformdev->name) + 101 - master->unique_len + 2, GFP_KERNEL); 102 - 103 - if (dev->devname == NULL) { 104 - ret = -ENOMEM; 105 - goto err; 106 - } 107 - 108 - sprintf(dev->devname, "%s@%s", dev->platformdev->name, 109 - master->unique); 110 109 return 0; 111 110 err: 112 111 return ret; 113 112 } 114 113 115 114 static struct drm_bus drm_platform_bus = { 116 - .bus_type = DRIVER_BUS_PLATFORM, 117 - .get_irq = drm_platform_get_irq, 118 - .get_name = drm_platform_get_name, 119 115 .set_busid = drm_platform_set_busid, 120 116 }; 121 117 ··· 121 145 { 122 146 DRM_DEBUG("\n"); 123 147 124 - driver->kdriver.platform_device = platform_device; 125 148 driver->bus = &drm_platform_bus; 126 149 return drm_get_platform_dev(platform_device, driver); 127 150 }
+1 -6
drivers/gpu/drm/drm_stub.c
··· 169 169 master->unique_len = 0; 170 170 } 171 171 172 - kfree(dev->devname); 173 - dev->devname = NULL; 174 - 175 172 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 176 173 list_del(&pt->head); 177 174 drm_ht_remove_item(&master->magiclist, &pt->hash_item); ··· 569 572 INIT_LIST_HEAD(&dev->maplist); 570 573 INIT_LIST_HEAD(&dev->vblank_event_list); 571 574 572 - spin_lock_init(&dev->count_lock); 575 + spin_lock_init(&dev->buf_lock); 573 576 spin_lock_init(&dev->event_lock); 574 577 mutex_init(&dev->struct_mutex); 575 578 mutex_init(&dev->ctxlist_mutex); ··· 647 650 drm_minor_free(dev, DRM_MINOR_LEGACY); 648 651 drm_minor_free(dev, DRM_MINOR_RENDER); 649 652 drm_minor_free(dev, DRM_MINOR_CONTROL); 650 - 651 - kfree(dev->devname); 652 653 653 654 mutex_destroy(&dev->master_mutex); 654 655 kfree(dev);
-14
drivers/gpu/drm/drm_usb.c
··· 36 36 } 37 37 EXPORT_SYMBOL(drm_get_usb_dev); 38 38 39 - static int drm_usb_get_irq(struct drm_device *dev) 40 - { 41 - return 0; 42 - } 43 - 44 - static const char *drm_usb_get_name(struct drm_device *dev) 45 - { 46 - return "USB"; 47 - } 48 - 49 39 static int drm_usb_set_busid(struct drm_device *dev, 50 40 struct drm_master *master) 51 41 { ··· 43 53 } 44 54 45 55 static struct drm_bus drm_usb_bus = { 46 - .bus_type = DRIVER_BUS_USB, 47 - .get_irq = drm_usb_get_irq, 48 - .get_name = drm_usb_get_name, 49 56 .set_busid = drm_usb_set_busid, 50 57 }; 51 58 ··· 51 64 int res; 52 65 DRM_DEBUG("\n"); 53 66 54 - driver->kdriver.usb = udriver; 55 67 driver->bus = &drm_usb_bus; 56 68 57 69 res = usb_register(udriver);
+1 -1
drivers/gpu/drm/gma500/psb_drv.c
··· 354 354 PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); 355 355 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 356 356 357 - drm_irq_install(dev); 357 + drm_irq_install(dev, dev->pdev->irq); 358 358 359 359 dev->vblank_disable_allowed = true; 360 360 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+7 -6
drivers/gpu/drm/i915/i915_dma.c
··· 1280 1280 static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1281 1281 { 1282 1282 struct drm_device *dev = pci_get_drvdata(pdev); 1283 - bool can_switch; 1284 1283 1285 - spin_lock(&dev->count_lock); 1286 - can_switch = (dev->open_count == 0); 1287 - spin_unlock(&dev->count_lock); 1288 - return can_switch; 1284 + /* 1285 + * FIXME: open_count is protected by drm_global_mutex but that would lead to 1286 + * locking inversion with the driver load path. And the access here is 1287 + * completely racy anyway. So don't bother with locking for now. 1288 + */ 1289 + return dev->open_count == 0; 1289 1290 } 1290 1291 1291 1292 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { ··· 1330 1329 1331 1330 intel_power_domains_init_hw(dev_priv); 1332 1331 1333 - ret = drm_irq_install(dev); 1332 + ret = drm_irq_install(dev, dev->pdev->irq); 1334 1333 if (ret) 1335 1334 goto cleanup_gem_stolen; 1336 1335
+7 -2
drivers/gpu/drm/i915/i915_drv.c
··· 574 574 mutex_unlock(&dev->struct_mutex); 575 575 576 576 /* We need working interrupts for modeset enabling ... */ 577 - drm_irq_install(dev); 577 + drm_irq_install(dev, dev->pdev->irq); 578 578 579 579 intel_modeset_init_hw(dev); 580 580 ··· 746 746 return ret; 747 747 } 748 748 749 + /* 750 + * FIXME: This is horribly race against concurrent pageflip and 751 + * vblank wait ioctls since they can observe dev->irqs_disabled 752 + * being false when they shouldn't be able to. 753 + */ 749 754 drm_irq_uninstall(dev); 750 - drm_irq_install(dev); 755 + drm_irq_install(dev, dev->pdev->irq); 751 756 752 757 /* rps/rc6 re-init is necessary to restore state lost after the 753 758 * reset and the re-install of drm irq. Skip for ironlake per
+4 -3
drivers/gpu/drm/i915/i915_gem.c
··· 4523 4523 } 4524 4524 4525 4525 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); 4526 - mutex_unlock(&dev->struct_mutex); 4527 4526 4528 - ret = drm_irq_install(dev); 4527 + ret = drm_irq_install(dev, dev->pdev->irq); 4529 4528 if (ret) 4530 4529 goto cleanup_ringbuffer; 4530 + mutex_unlock(&dev->struct_mutex); 4531 4531 4532 4532 return 0; 4533 4533 4534 4534 cleanup_ringbuffer: 4535 - mutex_lock(&dev->struct_mutex); 4536 4535 i915_gem_cleanup_ringbuffer(dev); 4537 4536 dev_priv->ums.mm_suspended = 1; 4538 4537 mutex_unlock(&dev->struct_mutex); ··· 4546 4547 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4547 4548 return 0; 4548 4549 4550 + mutex_lock(&dev->struct_mutex); 4549 4551 drm_irq_uninstall(dev); 4552 + mutex_unlock(&dev->struct_mutex); 4550 4553 4551 4554 return i915_gem_suspend(dev); 4552 4555 }
+1 -1
drivers/gpu/drm/mga/mga_state.c
··· 1020 1020 1021 1021 switch (param->param) { 1022 1022 case MGA_PARAM_IRQ_NR: 1023 - value = drm_dev_to_irq(dev); 1023 + value = dev->pdev->irq; 1024 1024 break; 1025 1025 case MGA_PARAM_CARD_TYPE: 1026 1026 value = dev_priv->chipset;
+1 -1
drivers/gpu/drm/msm/msm_drv.c
··· 288 288 } 289 289 290 290 pm_runtime_get_sync(dev->dev); 291 - ret = drm_irq_install(dev); 291 + ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); 292 292 pm_runtime_put_sync(dev->dev); 293 293 if (ret < 0) { 294 294 dev_err(dev->dev, "failed to install IRQ handler\n");
+6 -5
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 64 64 nouveau_switcheroo_can_switch(struct pci_dev *pdev) 65 65 { 66 66 struct drm_device *dev = pci_get_drvdata(pdev); 67 - bool can_switch; 68 67 69 - spin_lock(&dev->count_lock); 70 - can_switch = (dev->open_count == 0); 71 - spin_unlock(&dev->count_lock); 72 - return can_switch; 68 + /* 69 + * FIXME: open_count is protected by drm_global_mutex but that would lead to 70 + * locking inversion with the driver load path. And the access here is 71 + * completely racy anyway. So don't bother with locking for now. 72 + */ 73 + return dev->open_count == 0; 73 74 } 74 75 75 76 static const struct vga_switcheroo_client_ops
-1
drivers/gpu/drm/qxl/qxl_drv.c
··· 214 214 static struct drm_driver qxl_driver = { 215 215 .driver_features = DRIVER_GEM | DRIVER_MODESET | 216 216 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 217 - .dev_priv_size = 0, 218 217 .load = qxl_driver_load, 219 218 .unload = qxl_driver_unload, 220 219
+1 -1
drivers/gpu/drm/qxl/qxl_irq.c
··· 87 87 atomic_set(&qdev->irq_received_cursor, 0); 88 88 atomic_set(&qdev->irq_received_io_cmd, 0); 89 89 qdev->irq_received_error = 0; 90 - ret = drm_irq_install(qdev->ddev); 90 + ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq); 91 91 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; 92 92 if (unlikely(ret != 0)) { 93 93 DRM_ERROR("Failed installing irq: %d\n", ret);
+1 -1
drivers/gpu/drm/r128/r128_state.c
··· 1594 1594 1595 1595 switch (param->param) { 1596 1596 case R128_PARAM_IRQ_NR: 1597 - value = drm_dev_to_irq(dev); 1597 + value = dev->pdev->irq; 1598 1598 break; 1599 1599 default: 1600 1600 return -EINVAL;
+6 -5
drivers/gpu/drm/radeon/radeon_device.c
··· 1125 1125 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 1126 1126 { 1127 1127 struct drm_device *dev = pci_get_drvdata(pdev); 1128 - bool can_switch; 1129 1128 1130 - spin_lock(&dev->count_lock); 1131 - can_switch = (dev->open_count == 0); 1132 - spin_unlock(&dev->count_lock); 1133 - return can_switch; 1129 + /* 1130 + * FIXME: open_count is protected by drm_global_mutex but that would lead to 1131 + * locking inversion with the driver load path. And the access here is 1132 + * completely racy anyway. So don't bother with locking for now. 1133 + */ 1134 + return dev->open_count == 0; 1134 1135 } 1135 1136 1136 1137 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
-1
drivers/gpu/drm/radeon/radeon_drv.c
··· 519 519 DRIVER_USE_AGP | 520 520 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 521 521 DRIVER_PRIME | DRIVER_RENDER, 522 - .dev_priv_size = 0, 523 522 .load = radeon_driver_load_kms, 524 523 .open = radeon_driver_open_kms, 525 524 .preclose = radeon_driver_preclose_kms,
+1 -1
drivers/gpu/drm/radeon/radeon_irq_kms.c
··· 287 287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); 288 288 289 289 rdev->irq.installed = true; 290 - r = drm_irq_install(rdev->ddev); 290 + r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); 291 291 if (r) { 292 292 rdev->irq.installed = false; 293 293 flush_work(&rdev->hotplug_work);
+1 -1
drivers/gpu/drm/radeon/radeon_state.c
··· 3054 3054 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) 3055 3055 value = 0; 3056 3056 else 3057 - value = drm_dev_to_irq(dev); 3057 + value = dev->pdev->irq; 3058 3058 break; 3059 3059 case RADEON_PARAM_GART_BASE: 3060 3060 value = dev_priv->gart_vm_start;
+1 -1
drivers/gpu/drm/shmobile/shmob_drm_drv.c
··· 185 185 goto done; 186 186 } 187 187 188 - ret = drm_irq_install(dev); 188 + ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); 189 189 if (ret < 0) { 190 190 dev_err(&pdev->dev, "failed to install IRQ handler\n"); 191 191 goto done;
-11
drivers/gpu/drm/tegra/bus.c
··· 12 12 struct drm_master *master) 13 13 { 14 14 const char *device = dev_name(dev->dev); 15 - const char *driver = dev->driver->name; 16 15 const char *bus = dev->dev->bus->name; 17 - int length; 18 16 19 17 master->unique_len = strlen(bus) + 1 + strlen(device); 20 18 master->unique_size = master->unique_len; ··· 23 25 24 26 snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device); 25 27 26 - length = strlen(driver) + 1 + master->unique_len; 27 - 28 - dev->devname = kmalloc(length + 1, GFP_KERNEL); 29 - if (!dev->devname) 30 - return -ENOMEM; 31 - 32 - snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique); 33 - 34 28 return 0; 35 29 } 36 30 37 31 static struct drm_bus drm_host1x_bus = { 38 - .bus_type = DRIVER_BUS_HOST1X, 39 32 .set_busid = drm_host1x_set_busid, 40 33 }; 41 34
+1 -1
drivers/gpu/drm/tilcdc/tilcdc_drv.c
··· 268 268 } 269 269 270 270 pm_runtime_get_sync(dev->dev); 271 - ret = drm_irq_install(dev); 271 + ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); 272 272 pm_runtime_put_sync(dev->dev); 273 273 if (ret < 0) { 274 274 dev_err(dev->dev, "failed to install IRQ handler\n");
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 806 806 } 807 807 808 808 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 809 - ret = drm_irq_install(dev); 809 + ret = drm_irq_install(dev, dev->pdev->irq); 810 810 if (ret != 0) { 811 811 DRM_ERROR("Failed installing irq: %d\n", ret); 812 812 goto out_no_irq;
+8 -25
include/drm/drmP.h
··· 143 143 #define DRIVER_PRIME 0x4000 144 144 #define DRIVER_RENDER 0x8000 145 145 146 - #define DRIVER_BUS_PCI 0x1 147 - #define DRIVER_BUS_PLATFORM 0x2 148 - #define DRIVER_BUS_USB 0x3 149 - #define DRIVER_BUS_HOST1X 0x4 150 - 151 146 /***********************************************************************/ 152 147 /** \name Begin the DRM... */ 153 148 /*@{*/ ··· 726 731 #define DRM_SCANOUTPOS_ACCURATE (1 << 2) 727 732 728 733 struct drm_bus { 729 - int bus_type; 730 - int (*get_irq)(struct drm_device *dev); 731 - const char *(*get_name)(struct drm_device *dev); 732 734 int (*set_busid)(struct drm_device *dev, struct drm_master *master); 733 - int (*set_unique)(struct drm_device *dev, struct drm_master *master, 734 - struct drm_unique *unique); 735 - int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); 736 735 }; 737 736 738 737 /** ··· 963 974 const struct drm_ioctl_desc *ioctls; 964 975 int num_ioctls; 965 976 const struct file_operations *fops; 966 - union { 967 - struct pci_driver *pci; 968 - struct platform_device *platform_device; 969 - struct usb_driver *usb; 970 - } kdriver; 971 977 struct drm_bus *bus; 972 978 973 979 /* List of devices hanging off this driver with stealth attach. */ ··· 1042 1058 */ 1043 1059 struct drm_device { 1044 1060 struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ 1045 - char *devname; /**< For /proc/interrupts */ 1046 1061 int if_version; /**< Highest interface version set */ 1047 1062 1048 1063 /** \name Lifetime Management */ ··· 1059 1076 1060 1077 /** \name Locks */ 1061 1078 /*@{ */ 1062 - spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ 1063 1079 struct mutex struct_mutex; /**< For others */ 1064 1080 struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ 1065 1081 /*@} */ 1066 1082 1067 1083 /** \name Usage Counters */ 1068 1084 /*@{ */ 1069 - int open_count; /**< Outstanding files open */ 1085 + int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ 1086 + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ 1070 1087 int buf_use; /**< Buffers in use -- cannot alloc */ 1071 1088 atomic_t buf_alloc; /**< Buffer allocation in progress */ 1072 1089 /*@} */ ··· 1097 1114 /** \name Context support */ 1098 1115 /*@{ */ 1099 1116 bool irq_enabled; /**< True if irq handler is enabled */ 1117 + int irq; 1118 + 1100 1119 __volatile__ long context_flag; /**< Context swapping flag */ 1101 1120 int last_context; /**< Last current context */ 1102 1121 /*@} */ ··· 1169 1184 int feature) 1170 1185 { 1171 1186 return ((dev->driver->driver_features & feature) ? 1 : 0); 1172 - } 1173 - 1174 - static inline int drm_dev_to_irq(struct drm_device *dev) 1175 - { 1176 - return dev->driver->bus->get_irq(dev); 1177 1187 } 1178 1188 1179 1189 static inline void drm_device_set_unplugged(struct drm_device *dev) ··· 1343 1363 /* IRQ support (drm_irq.h) */ 1344 1364 extern int drm_control(struct drm_device *dev, void *data, 1345 1365 struct drm_file *file_priv); 1346 - extern int drm_irq_install(struct drm_device *dev); 1366 + extern int drm_irq_install(struct drm_device *dev, int irq); 1347 1367 extern int drm_irq_uninstall(struct drm_device *dev); 1348 1368 1349 1369 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); ··· 1502 1522 size_t align); 1503 1523 extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1504 1524 extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1525 + extern int drm_pci_set_unique(struct drm_device *dev, 1526 + struct drm_master *master, 1527 + struct drm_unique *u); 1505 1528 1506 1529 /* sysfs support (drm_sysfs.c) */ 1507 1530 struct drm_sysfs_class;