Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'topic/drm-misc-2016-04-29' of git://anongit.freedesktop.org/drm-intel into drm-next

- prep work for struct_mutex-less gem_free_object
- more invasive/tricky mst fixes from Lyude for broken hw. I discussed
this with Ville/Jani and we all agreed more soaking in -next would be
real good this late in the -rc cycle. They're cc: stable too to make
sure they're not getting lost. Feel free to cherry-pick those four if
you disagree.
- few small things all over

* tag 'topic/drm-misc-2016-04-29' of git://anongit.freedesktop.org/drm-intel:
drm/atomic: Add missing drm_crtc_internal.h include
drm/dp: Allow signals to interrupt drm_aux-dev reads/writes
drm: Quiet down drm_mode_getresources
drm: Quiet down drm_mode_getconnector
drm: Protect dev->filelist with its own mutex
drm: Make drm_vm_open/close_locked private to drm_vm.c
drm: Hide master MAP cleanup in drm_bufs.c
drm: Forbid legacy MAP functions for DRIVER_MODESET
drm: Push struct_mutex into ->master_destroy
drm: Move drm_getmap into drm_bufs.c and give it a legacy prefix
drm: Put legacy lastclose work into drm_legacy_dev_reinit
drm: Give drm_agp_clear drm_legacy_ prefix
drm/sysfs: Annote lockless show functions with READ_ONCE
MAINTAINERS: Update the files list for the GMA500 DRM driver
drm: rcar-du: Fix compilation warning
drm/i915: Get rid of intel_dp_dpcd_read_wake()
drm/dp_helper: Perform throw-away read before actual read in drm_dp_dpcd_read()
drm/dp_helper: Retry aux transactions on all errors
drm/dp_helper: Always wait before retrying native aux transactions

+236 -221
+1 -2
MAINTAINERS
··· 3851 3851 L: dri-devel@lists.freedesktop.org 3852 3852 T: git git://github.com/patjak/drm-gma500 3853 3853 S: Maintained 3854 - F: drivers/gpu/drm/gma500 3855 - F: include/drm/gma500* 3854 + F: drivers/gpu/drm/gma500/ 3856 3855 3857 3856 DRM DRIVERS FOR NVIDIA TEGRA 3858 3857 M: Thierry Reding <thierry.reding@gmail.com>
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 93 93 struct drm_device *ddev = adev->ddev; 94 94 struct drm_file *file; 95 95 96 - mutex_lock(&ddev->struct_mutex); 96 + mutex_lock(&ddev->filelist_mutex); 97 97 98 98 list_for_each_entry(file, &ddev->filelist, lhead) { 99 99 struct drm_gem_object *gobj; ··· 103 103 spin_lock(&file->table_lock); 104 104 idr_for_each_entry(&file->object_idr, gobj, handle) { 105 105 WARN_ONCE(1, "And also active allocations!\n"); 106 - drm_gem_object_unreference(gobj); 106 + drm_gem_object_unreference_unlocked(gobj); 107 107 } 108 108 idr_destroy(&file->object_idr); 109 109 spin_unlock(&file->table_lock); 110 110 } 111 111 112 - mutex_unlock(&ddev->struct_mutex); 112 + mutex_unlock(&ddev->filelist_mutex); 113 113 } 114 114 115 115 /* ··· 769 769 struct drm_file *file; 770 770 int r; 771 771 772 - r = mutex_lock_interruptible(&dev->struct_mutex); 772 + r = mutex_lock_interruptible(&dev->filelist_mutex); 773 773 if (r) 774 774 return r; 775 775 ··· 793 793 spin_unlock(&file->table_lock); 794 794 } 795 795 796 - mutex_unlock(&dev->struct_mutex); 796 + mutex_unlock(&dev->filelist_mutex); 797 797 return 0; 798 798 } 799 799
+2 -2
drivers/gpu/drm/drm_agpsupport.c
··· 423 423 } 424 424 425 425 /** 426 - * drm_agp_clear - Clear AGP resource list 426 + * drm_legacy_agp_clear - Clear AGP resource list 427 427 * @dev: DRM device 428 428 * 429 429 * Iterate over all AGP resources and remove them. But keep the AGP head ··· 434 434 * resources from getting destroyed. Drivers are responsible of cleaning them up 435 435 * during device shutdown. 436 436 */ 437 - void drm_agp_clear(struct drm_device *dev) 437 + void drm_legacy_agp_clear(struct drm_device *dev) 438 438 { 439 439 struct drm_agp_mem *entry, *tempe; 440 440
+2
drivers/gpu/drm/drm_atomic.c
··· 31 31 #include <drm/drm_mode.h> 32 32 #include <drm/drm_plane_helper.h> 33 33 34 + #include "drm_crtc_internal.h" 35 + 34 36 /** 35 37 * drm_atomic_state_default_release - 36 38 * release memory initialized by drm_atomic_state_init
+86 -5
drivers/gpu/drm/drm_bufs.c
··· 396 396 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 397 397 return -EPERM; 398 398 399 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 400 + drm_core_check_feature(dev, DRIVER_MODESET)) 401 + return -EINVAL; 402 + 399 403 err = drm_addmap_core(dev, map->offset, map->size, map->type, 400 404 map->flags, &maplist); 401 405 ··· 416 412 * it's not a real mtrr index anymore.) 417 413 */ 418 414 map->mtrr = -1; 415 + 416 + return 0; 417 + } 418 + 419 + /* 420 + * Get a mapping information. 421 + * 422 + * \param inode device inode. 423 + * \param file_priv DRM file private. 424 + * \param cmd command. 425 + * \param arg user argument, pointing to a drm_map structure. 426 + * 427 + * \return zero on success or a negative number on failure. 428 + * 429 + * Searches for the mapping with the specified offset and copies its information 430 + * into userspace 431 + */ 432 + int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 433 + struct drm_file *file_priv) 434 + { 435 + struct drm_map *map = data; 436 + struct drm_map_list *r_list = NULL; 437 + struct list_head *list; 438 + int idx; 439 + int i; 440 + 441 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 442 + drm_core_check_feature(dev, DRIVER_MODESET)) 443 + return -EINVAL; 444 + 445 + idx = map->offset; 446 + if (idx < 0) 447 + return -EINVAL; 448 + 449 + i = 0; 450 + mutex_lock(&dev->struct_mutex); 451 + list_for_each(list, &dev->maplist) { 452 + if (i == idx) { 453 + r_list = list_entry(list, struct drm_map_list, head); 454 + break; 455 + } 456 + i++; 457 + } 458 + if (!r_list || !r_list->map) { 459 + mutex_unlock(&dev->struct_mutex); 460 + return -EINVAL; 461 + } 462 + 463 + map->offset = r_list->map->offset; 464 + map->size = r_list->map->size; 465 + map->type = r_list->map->type; 466 + map->flags = r_list->map->flags; 467 + map->handle = (void *)(unsigned long) r_list->user_token; 468 + map->mtrr = arch_phys_wc_index(r_list->map->mtrr); 469 + 470 + mutex_unlock(&dev->struct_mutex); 419 471 420 472 return 0; 421 473 } ··· 542 482 } 543 483 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 544 484 545 - int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 485 + void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 546 486 { 547 - int ret; 487 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 488 + drm_core_check_feature(dev, DRIVER_MODESET)) 489 + return; 548 490 549 491 mutex_lock(&dev->struct_mutex); 550 - ret = drm_legacy_rmmap_locked(dev, map); 492 + drm_legacy_rmmap_locked(dev, map); 551 493 mutex_unlock(&dev->struct_mutex); 552 - 553 - return ret; 554 494 } 555 495 EXPORT_SYMBOL(drm_legacy_rmmap); 496 + 497 + void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) 498 + { 499 + struct drm_map_list *r_list, *list_temp; 500 + 501 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 502 + return; 503 + 504 + mutex_lock(&dev->struct_mutex); 505 + list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 506 + if (r_list->master == master) { 507 + drm_legacy_rmmap_locked(dev, r_list->map); 508 + r_list = NULL; 509 + } 510 + } 511 + mutex_unlock(&dev->struct_mutex); 512 + } 556 513 557 514 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 558 515 * the last close of the device, and this is necessary for cleanup when things ··· 593 516 struct drm_local_map *map = NULL; 594 517 struct drm_map_list *r_list; 595 518 int ret; 519 + 520 + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 521 + drm_core_check_feature(dev, DRIVER_MODESET)) 522 + return -EINVAL; 596 523 597 524 mutex_lock(&dev->struct_mutex); 598 525 list_for_each_entry(r_list, &dev->maplist, head) {
-12
drivers/gpu/drm/drm_crtc.c
··· 1936 1936 copied = 0; 1937 1937 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; 1938 1938 drm_for_each_crtc(crtc, dev) { 1939 - DRM_DEBUG_KMS("[CRTC:%d:%s]\n", 1940 - crtc->base.id, crtc->name); 1941 1939 if (put_user(crtc->base.id, crtc_id + copied)) { 1942 1940 ret = -EFAULT; 1943 1941 goto out; ··· 1950 1952 copied = 0; 1951 1953 encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; 1952 1954 drm_for_each_encoder(encoder, dev) { 1953 - DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, 1954 - encoder->name); 1955 1955 if (put_user(encoder->base.id, encoder_id + 1956 1956 copied)) { 1957 1957 ret = -EFAULT; ··· 1965 1969 copied = 0; 1966 1970 connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; 1967 1971 drm_for_each_connector(connector, dev) { 1968 - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1969 - connector->base.id, 1970 - connector->name); 1971 1972 if (put_user(connector->base.id, 1972 1973 connector_id + copied)) { 1973 1974 ret = -EFAULT; ··· 1974 1981 } 1975 1982 } 1976 1983 card_res->count_connectors = connector_count; 1977 - 1978 - DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs, 1979 - card_res->count_connectors, card_res->count_encoders); 1980 1984 1981 1985 out: 1982 1986 mutex_unlock(&dev->mode_config.mutex); ··· 2132 2142 return -EINVAL; 2133 2143 2134 2144 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); 2135 - 2136 - DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); 2137 2145 2138 2146 mutex_lock(&dev->mode_config.mutex); 2139 2147
+12
drivers/gpu/drm/drm_dp_aux_dev.c
··· 159 159 uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; 160 160 ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); 161 161 162 + if (signal_pending(current)) { 163 + res = num_bytes_processed ? 164 + num_bytes_processed : -ERESTARTSYS; 165 + goto out; 166 + } 167 + 162 168 res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo); 163 169 if (res <= 0) { 164 170 res = num_bytes_processed ? num_bytes_processed : res; ··· 207 201 while (bytes_pending > 0) { 208 202 uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; 209 203 ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); 204 + 205 + if (signal_pending(current)) { 206 + res = num_bytes_processed ? 207 + num_bytes_processed : -ERESTARTSYS; 208 + goto out; 209 + } 210 210 211 211 if (__copy_from_user(localbuf, 212 212 buf + num_bytes_processed, todo)) {
+43 -23
drivers/gpu/drm/drm_dp_helper.c
··· 178 178 unsigned int offset, void *buffer, size_t size) 179 179 { 180 180 struct drm_dp_aux_msg msg; 181 - unsigned int retry; 182 - int err = 0; 181 + unsigned int retry, native_reply; 182 + int err = 0, ret = 0; 183 183 184 184 memset(&msg, 0, sizeof(msg)); 185 185 msg.address = offset; ··· 196 196 * sufficient, bump to 32 which makes Dell 4k monitors happier. 197 197 */ 198 198 for (retry = 0; retry < 32; retry++) { 199 - 200 - err = aux->transfer(aux, &msg); 201 - if (err < 0) { 202 - if (err == -EBUSY) 203 - continue; 204 - 205 - goto unlock; 199 + if (ret != 0 && ret != -ETIMEDOUT) { 200 + usleep_range(AUX_RETRY_INTERVAL, 201 + AUX_RETRY_INTERVAL + 100); 206 202 } 207 203 204 + ret = aux->transfer(aux, &msg); 208 205 209 - switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { 210 - case DP_AUX_NATIVE_REPLY_ACK: 211 - if (err < size) 212 - err = -EPROTO; 213 - goto unlock; 206 + if (ret > 0) { 207 + native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK; 208 + if (native_reply == DP_AUX_NATIVE_REPLY_ACK) { 209 + if (ret == size) 210 + goto unlock; 214 211 215 - case DP_AUX_NATIVE_REPLY_NACK: 216 - err = -EIO; 217 - goto unlock; 218 - 219 - case DP_AUX_NATIVE_REPLY_DEFER: 220 - usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 221 - break; 212 + ret = -EPROTO; 213 + } else 214 + ret = -EIO; 222 215 } 216 + 217 + /* 218 + * We want the error we return to be the error we received on 219 + * the first transaction, since we may get a different error the 220 + * next time we retry 221 + */ 222 + if (!err) 223 + err = ret; 223 224 } 224 225 225 226 DRM_DEBUG_KMS("too many retries, giving up\n"); 226 - err = -EIO; 227 + ret = err; 227 228 228 229 unlock: 229 230 mutex_unlock(&aux->hw_mutex); 230 - return err; 231 + return ret; 231 232 } 232 233 233 234 /** ··· 248 247 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, 249 248 void *buffer, size_t size) 250 249 { 250 + int ret; 251 + 252 + /* 253 + * HP ZR24w corrupts the first DPCD access after entering power save 254 + * mode. Eg. on a read, the entire buffer will be filled with the same 255 + * byte. Do a throw away read to avoid corrupting anything we care 256 + * about. Afterwards things will work correctly until the monitor 257 + * gets woken up and subsequently re-enters power save mode. 258 + * 259 + * The user pressing any button on the monitor is enough to wake it 260 + * up, so there is no particularly good place to do the workaround. 261 + * We just have to do it before any DPCD access and hope that the 262 + * monitor doesn't power down exactly after the throw away read. 263 + */ 264 + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer, 265 + 1); 266 + if (ret != 1) 267 + return ret; 268 + 251 269 return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, 252 270 size); 253 271 }
+2 -9
drivers/gpu/drm/drm_drv.c
··· 121 121 { 122 122 struct drm_master *master = container_of(kref, struct drm_master, refcount); 123 123 struct drm_device *dev = master->minor->dev; 124 - struct drm_map_list *r_list, *list_temp; 125 124 126 - mutex_lock(&dev->struct_mutex); 127 125 if (dev->driver->master_destroy) 128 126 dev->driver->master_destroy(dev, master); 129 127 130 - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 131 - if (r_list->master == master) { 132 - drm_legacy_rmmap_locked(dev, r_list->map); 133 - r_list = NULL; 134 - } 135 - } 136 - mutex_unlock(&dev->struct_mutex); 128 + drm_legacy_master_rmmaps(dev, master); 137 129 138 130 idr_destroy(&master->magic_map); 139 131 kfree(master->unique); ··· 590 598 spin_lock_init(&dev->buf_lock); 591 599 spin_lock_init(&dev->event_lock); 592 600 mutex_init(&dev->struct_mutex); 601 + mutex_init(&dev->filelist_mutex); 593 602 mutex_init(&dev->ctxlist_mutex); 594 603 mutex_init(&dev->master_mutex); 595 604
+25 -26
drivers/gpu/drm/drm_fops.c
··· 297 297 } 298 298 mutex_unlock(&dev->master_mutex); 299 299 300 - mutex_lock(&dev->struct_mutex); 300 + mutex_lock(&dev->filelist_mutex); 301 301 list_add(&priv->lhead, &dev->filelist); 302 - mutex_unlock(&dev->struct_mutex); 302 + mutex_unlock(&dev->filelist_mutex); 303 303 304 304 #ifdef __alpha__ 305 305 /* ··· 381 381 */ 382 382 static void drm_legacy_dev_reinit(struct drm_device *dev) 383 383 { 384 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 385 - return; 384 + if (dev->irq_enabled) 385 + drm_irq_uninstall(dev); 386 + 387 + mutex_lock(&dev->struct_mutex); 388 + 389 + drm_legacy_agp_clear(dev); 390 + 391 + drm_legacy_sg_cleanup(dev); 392 + drm_legacy_vma_flush(dev); 393 + drm_legacy_dma_takedown(dev); 394 + 395 + mutex_unlock(&dev->struct_mutex); 386 396 387 397 dev->sigdata.lock = NULL; 388 398 389 399 dev->context_flag = 0; 390 400 dev->last_context = 0; 391 401 dev->if_version = 0; 402 + 403 + DRM_DEBUG("lastclose completed\n"); 392 404 } 393 405 394 406 /* ··· 412 400 * 413 401 * \sa drm_device 414 402 */ 415 - int drm_lastclose(struct drm_device * dev) 403 + void drm_lastclose(struct drm_device * dev) 416 404 { 417 405 DRM_DEBUG("\n"); 418 406 ··· 420 408 dev->driver->lastclose(dev); 421 409 DRM_DEBUG("driver lastclose completed\n"); 422 410 423 - if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) 424 - drm_irq_uninstall(dev); 425 - 426 - mutex_lock(&dev->struct_mutex); 427 - 428 - drm_agp_clear(dev); 429 - 430 - drm_legacy_sg_cleanup(dev); 431 - drm_legacy_vma_flush(dev); 432 - drm_legacy_dma_takedown(dev); 433 - 434 - mutex_unlock(&dev->struct_mutex); 435 - 436 - drm_legacy_dev_reinit(dev); 437 - 438 - DRM_DEBUG("lastclose completed\n"); 439 - return 0; 411 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 412 + drm_legacy_dev_reinit(dev); 440 413 } 441 414 442 415 /** ··· 442 445 struct drm_file *file_priv = filp->private_data; 443 446 struct drm_minor *minor = file_priv->minor; 444 447 struct drm_device *dev = minor->dev; 445 - int retcode = 0; 446 448 447 449 mutex_lock(&drm_global_mutex); 448 450 449 451 DRM_DEBUG("open_count = %d\n", dev->open_count); 450 452 451 - mutex_lock(&dev->struct_mutex); 453 + mutex_lock(&dev->filelist_mutex); 452 454 list_del(&file_priv->lhead); 455 + mutex_unlock(&dev->filelist_mutex); 456 + 457 + mutex_lock(&dev->struct_mutex); 453 458 if (file_priv->magic) 454 459 idr_remove(&file_priv->master->magic_map, file_priv->magic); 455 460 mutex_unlock(&dev->struct_mutex); ··· 537 538 */ 538 539 539 540 if (!--dev->open_count) { 540 - retcode = drm_lastclose(dev); 541 + drm_lastclose(dev); 541 542 if (drm_device_is_unplugged(dev)) 542 543 drm_put_dev(dev); 543 544 } ··· 545 546 546 547 drm_minor_release(minor); 547 548 548 - return retcode; 549 + return 0; 549 550 } 550 551 EXPORT_SYMBOL(drm_release); 551 552
+2 -2
drivers/gpu/drm/drm_info.c
··· 174 174 /* dev->filelist is sorted youngest first, but we want to present 175 175 * oldest first (i.e. kernel, servers, clients), so walk backwardss. 176 176 */ 177 - mutex_lock(&dev->struct_mutex); 177 + mutex_lock(&dev->filelist_mutex); 178 178 list_for_each_entry_reverse(priv, &dev->filelist, lhead) { 179 179 struct task_struct *task; 180 180 ··· 190 190 priv->magic); 191 191 rcu_read_unlock(); 192 192 } 193 - mutex_unlock(&dev->struct_mutex); 193 + mutex_unlock(&dev->filelist_mutex); 194 194 return 0; 195 195 } 196 196
+1 -3
drivers/gpu/drm/drm_internal.h
··· 26 26 27 27 /* drm_fops.c */ 28 28 extern struct mutex drm_global_mutex; 29 - int drm_lastclose(struct drm_device *dev); 29 + void drm_lastclose(struct drm_device *dev); 30 30 31 31 /* drm_pci.c */ 32 32 int drm_pci_set_unique(struct drm_device *dev, ··· 37 37 38 38 /* drm_vm.c */ 39 39 int drm_vma_info(struct seq_file *m, void *data); 40 - void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma); 41 - void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma); 42 40 43 41 /* drm_prime.c */ 44 42 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+1 -53
drivers/gpu/drm/drm_ioctl.c
··· 150 150 } 151 151 152 152 /* 153 - * Get a mapping information. 154 - * 155 - * \param inode device inode. 156 - * \param file_priv DRM file private. 157 - * \param cmd command. 158 - * \param arg user argument, pointing to a drm_map structure. 159 - * 160 - * \return zero on success or a negative number on failure. 161 - * 162 - * Searches for the mapping with the specified offset and copies its information 163 - * into userspace 164 - */ 165 - static int drm_getmap(struct drm_device *dev, void *data, 166 - struct drm_file *file_priv) 167 - { 168 - struct drm_map *map = data; 169 - struct drm_map_list *r_list = NULL; 170 - struct list_head *list; 171 - int idx; 172 - int i; 173 - 174 - idx = map->offset; 175 - if (idx < 0) 176 - return -EINVAL; 177 - 178 - i = 0; 179 - mutex_lock(&dev->struct_mutex); 180 - list_for_each(list, &dev->maplist) { 181 - if (i == idx) { 182 - r_list = list_entry(list, struct drm_map_list, head); 183 - break; 184 - } 185 - i++; 186 - } 187 - if (!r_list || !r_list->map) { 188 - mutex_unlock(&dev->struct_mutex); 189 - return -EINVAL; 190 - } 191 - 192 - map->offset = r_list->map->offset; 193 - map->size = r_list->map->size; 194 - map->type = r_list->map->type; 195 - map->flags = r_list->map->flags; 196 - map->handle = (void *)(unsigned long) r_list->user_token; 197 - map->mtrr = arch_phys_wc_index(r_list->map->mtrr); 198 - 199 - mutex_unlock(&dev->struct_mutex); 200 - 201 - return 0; 202 - } 203 - 204 - /* 205 153 * Get client information. 206 154 * 207 155 * \param inode device inode. ··· 506 558 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 507 559 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 508 560 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 509 - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 561 + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), 510 562 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 511 563 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 512 564 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+2
drivers/gpu/drm/drm_legacy.h
··· 63 63 64 64 #define DRM_MAP_HASH_OFFSET 0x10000000 65 65 66 + int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 67 + struct drm_file *file_priv); 66 68 int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); 67 69 int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); 68 70 int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
+1 -1
drivers/gpu/drm/drm_pci.c
··· 250 250 { 251 251 if (dev->agp) { 252 252 arch_phys_wc_del(dev->agp->agp_mtrr); 253 - drm_agp_clear(dev); 253 + drm_legacy_agp_clear(dev); 254 254 kfree(dev->agp); 255 255 dev->agp = NULL; 256 256 }
+8 -3
drivers/gpu/drm/drm_sysfs.c
··· 208 208 char *buf) 209 209 { 210 210 struct drm_connector *connector = to_drm_connector(device); 211 + enum drm_connector_status status; 212 + 213 + status = READ_ONCE(connector->status); 211 214 212 215 return snprintf(buf, PAGE_SIZE, "%s\n", 213 - drm_get_connector_status_name(connector->status)); 216 + drm_get_connector_status_name(status)); 214 217 } 215 218 216 219 static ssize_t dpms_show(struct device *device, ··· 234 231 char *buf) 235 232 { 236 233 struct drm_connector *connector = to_drm_connector(device); 234 + bool enabled; 237 235 238 - return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" : 239 - "disabled"); 236 + enabled = READ_ONCE(connector->encoder); 237 + 238 + return snprintf(buf, PAGE_SIZE, enabled ? "enabled\n" : "disabled\n"); 240 239 } 241 240 242 241 static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+4 -12
drivers/gpu/drm/drm_vm.c
··· 395 395 .close = drm_vm_close, 396 396 }; 397 397 398 - /** 399 - * \c open method for shared virtual memory. 400 - * 401 - * \param vma virtual memory area. 402 - * 403 - * Create a new drm_vma_entry structure as the \p vma private data entry and 404 - * add it to drm_device::vmalist. 405 - */ 406 - void drm_vm_open_locked(struct drm_device *dev, 407 - struct vm_area_struct *vma) 398 + static void drm_vm_open_locked(struct drm_device *dev, 399 + struct vm_area_struct *vma) 408 400 { 409 401 struct drm_vma_entry *vma_entry; 410 402 ··· 421 429 mutex_unlock(&dev->struct_mutex); 422 430 } 423 431 424 - void drm_vm_close_locked(struct drm_device *dev, 425 - struct vm_area_struct *vma) 432 + static void drm_vm_close_locked(struct drm_device *dev, 433 + struct vm_area_struct *vma) 426 434 { 427 435 struct drm_vma_entry *pt, *temp; 428 436
+10 -2
drivers/gpu/drm/i915/i915_debugfs.c
··· 528 528 529 529 seq_putc(m, '\n'); 530 530 print_batch_pool_stats(m, dev_priv); 531 + 532 + mutex_unlock(&dev->struct_mutex); 533 + 534 + mutex_lock(&dev->filelist_mutex); 531 535 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 532 536 struct file_stats stats; 533 537 struct task_struct *task; ··· 552 548 print_file_stats(m, task ? task->comm : "<unknown>", stats); 553 549 rcu_read_unlock(); 554 550 } 555 - 556 - mutex_unlock(&dev->struct_mutex); 551 + mutex_unlock(&dev->filelist_mutex); 557 552 558 553 return 0; 559 554 } ··· 2357 2354 else if (INTEL_INFO(dev)->gen >= 6) 2358 2355 gen6_ppgtt_info(m, dev); 2359 2356 2357 + mutex_lock(&dev->filelist_mutex); 2360 2358 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2361 2359 struct drm_i915_file_private *file_priv = file->driver_priv; 2362 2360 struct task_struct *task; ··· 2372 2368 idr_for_each(&file_priv->context_idr, per_file_ctx, 2373 2369 (void *)(unsigned long)m); 2374 2370 } 2371 + mutex_unlock(&dev->filelist_mutex); 2375 2372 2376 2373 out_put: 2377 2374 intel_runtime_pm_put(dev_priv); ··· 2408 2403 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2409 2404 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2410 2405 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2406 + 2407 + mutex_lock(&dev->filelist_mutex); 2411 2408 spin_lock(&dev_priv->rps.client_lock); 2412 2409 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2413 2410 struct drm_i915_file_private *file_priv = file->driver_priv; ··· 2432 2425 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2433 2426 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2434 2427 spin_unlock(&dev_priv->rps.client_lock); 2428 + mutex_unlock(&dev->filelist_mutex); 2435 2429 2436 2430 return 0; 2437 2431 }
+23 -58
drivers/gpu/drm/i915/intel_dp.c
··· 3074 3074 } 3075 3075 3076 3076 /* 3077 - * Native read with retry for link status and receiver capability reads for 3078 - * cases where the sink may still be asleep. 3079 - * 3080 - * Sinks are *supposed* to come up within 1ms from an off state, but we're also 3081 - * supposed to retry 3 times per the spec. 3082 - */ 3083 - static ssize_t 3084 - intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, 3085 - void *buffer, size_t size) 3086 - { 3087 - ssize_t ret; 3088 - int i; 3089 - 3090 - /* 3091 - * Sometime we just get the same incorrect byte repeated 3092 - * over the entire buffer. Doing just one throw away read 3093 - * initially seems to "solve" it. 3094 - */ 3095 - drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1); 3096 - 3097 - for (i = 0; i < 3; i++) { 3098 - ret = drm_dp_dpcd_read(aux, offset, buffer, size); 3099 - if (ret == size) 3100 - return ret; 3101 - msleep(1); 3102 - } 3103 - 3104 - return ret; 3105 - } 3106 - 3107 - /* 3108 3077 * Fetch AUX CH registers 0x202 - 0x207 which contain 3109 3078 * link status information 3110 3079 */ 3111 3080 bool 3112 3081 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 3113 3082 { 3114 - return intel_dp_dpcd_read_wake(&intel_dp->aux, 3115 - DP_LANE0_1_STATUS, 3116 - link_status, 3117 - DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3083 + return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3084 + DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3118 3085 } 3119 3086 3120 3087 /* These are source-specific values. */ ··· 3716 3749 struct drm_i915_private *dev_priv = dev->dev_private; 3717 3750 uint8_t rev; 3718 3751 3719 - if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, 3720 - sizeof(intel_dp->dpcd)) < 0) 3752 + if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3753 + sizeof(intel_dp->dpcd)) < 0) 3721 3754 return false; /* aux transfer failed */ 3722 3755 3723 3756 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); ··· 3725 3758 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 3726 3759 return false; /* DPCD not present */ 3727 3760 3728 - if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, 3729 - &intel_dp->sink_count, 1) < 0) 3761 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT, 3762 + &intel_dp->sink_count, 1) < 0) 3730 3763 return false; 3731 3764 3732 3765 /* ··· 3749 3782 /* Check if the panel supports PSR */ 3750 3783 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 3751 3784 if (is_edp(intel_dp)) { 3752 - intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, 3753 - intel_dp->psr_dpcd, 3754 - sizeof(intel_dp->psr_dpcd)); 3785 + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, 3786 + intel_dp->psr_dpcd, 3787 + sizeof(intel_dp->psr_dpcd)); 3755 3788 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 3756 3789 dev_priv->psr.sink_support = true; 3757 3790 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); ··· 3762 3795 uint8_t frame_sync_cap; 3763 3796 3764 3797 dev_priv->psr.sink_support = true; 3765 - intel_dp_dpcd_read_wake(&intel_dp->aux, 3766 - DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, 3767 - &frame_sync_cap, 1); 3798 + drm_dp_dpcd_read(&intel_dp->aux, 3799 + DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, 3800 + &frame_sync_cap, 1); 3768 3801 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false; 3769 3802 /* PSR2 needs frame sync as well */ 3770 3803 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; ··· 3780 3813 /* Intermediate frequency support */ 3781 3814 if (is_edp(intel_dp) && 3782 3815 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && 3783 - (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && 3816 + (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && 3784 3817 (rev >= 0x03)) { /* eDp v1.4 or higher */ 3785 3818 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3786 3819 int i; 3787 3820 3788 - intel_dp_dpcd_read_wake(&intel_dp->aux, 3789 - DP_SUPPORTED_LINK_RATES, 3790 - sink_rates, 3791 - sizeof(sink_rates)); 3821 + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 3822 + sink_rates, sizeof(sink_rates)); 3792 3823 3793 3824 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 3794 3825 int val = le16_to_cpu(sink_rates[i]); ··· 3809 3844 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 3810 3845 return true; /* no per-port downstream info */ 3811 3846 3812 - if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 3813 - intel_dp->downstream_ports, 3814 - DP_MAX_DOWNSTREAM_PORTS) < 0) 3847 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 3848 + intel_dp->downstream_ports, 3849 + DP_MAX_DOWNSTREAM_PORTS) < 0) 3815 3850 return false; /* downstream port status fetch failed */ 3816 3851 3817 3852 return true; ··· 3825 3860 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 3826 3861 return; 3827 3862 3828 - if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) 3863 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) 3829 3864 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 3830 3865 buf[0], buf[1], buf[2]); 3831 3866 3832 - if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) 3867 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) 3833 3868 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 3834 3869 buf[0], buf[1], buf[2]); 3835 3870 } ··· 3848 3883 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3849 3884 return false; 3850 3885 3851 - if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { 3886 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { 3852 3887 if (buf[0] & DP_MST_CAP) { 3853 3888 DRM_DEBUG_KMS("Sink is MST capable\n"); 3854 3889 intel_dp->is_mst = true; ··· 3985 4020 static bool 3986 4021 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 3987 4022 { 3988 - return intel_dp_dpcd_read_wake(&intel_dp->aux, 4023 + return drm_dp_dpcd_read(&intel_dp->aux, 3989 4024 DP_DEVICE_SERVICE_IRQ_VECTOR, 3990 4025 sink_irq_vector, 1) == 1; 3991 4026 } ··· 3995 4030 { 3996 4031 int ret; 3997 4032 3998 - ret = intel_dp_dpcd_read_wake(&intel_dp->aux, 4033 + ret = drm_dp_dpcd_read(&intel_dp->aux, 3999 4034 DP_SINK_COUNT_ESI, 4000 4035 sink_irq_vector, 14); 4001 4036 if (ret != 14)
+1
include/drm/drmP.h
··· 769 769 atomic_t buf_alloc; /**< Buffer allocation in progress */ 770 770 /*@} */ 771 771 772 + struct mutex filelist_mutex; 772 773 struct list_head filelist; 773 774 774 775 /** \name Memory management */
+2 -2
include/drm/drm_agpsupport.h
··· 37 37 uint32_t type); 38 38 39 39 struct drm_agp_head *drm_agp_init(struct drm_device *dev); 40 - void drm_agp_clear(struct drm_device *dev); 40 + void drm_legacy_agp_clear(struct drm_device *dev); 41 41 int drm_agp_acquire(struct drm_device *dev); 42 42 int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, 43 43 struct drm_file *file_priv); ··· 93 93 return NULL; 94 94 } 95 95 96 - static inline void drm_agp_clear(struct drm_device *dev) 96 + static inline void drm_legacy_agp_clear(struct drm_device *dev) 97 97 { 98 98 } 99 99
+3 -1
include/drm/drm_legacy.h
··· 154 154 int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, 155 155 unsigned int size, enum drm_map_type type, 156 156 enum drm_map_flags flags, struct drm_local_map **map_p); 157 - int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); 157 + void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); 158 158 int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); 159 + void drm_legacy_master_rmmaps(struct drm_device *dev, 160 + struct drm_master *master); 159 161 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); 160 162 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); 161 163