Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Kill unneeded legacy security features

At one point, the GPU command verifier and user-space handle manager
couldn't properly protect GPU clients from accessing each other's data.
Instead there was an elaborate mechanism to make sure only the active
master's primary clients could render. The other clients were either
put to sleep or even killed (if the master had exited). VRAM was
evicted on master switch. With the advent of render-node functionality,
we relaxed the VRAM eviction, but the other mechanisms stayed in place.

Now that the GPU command verifier and ttm object manager properly
isolates primary clients from different master realms we can remove the
master switch related code and drop those legacy features.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>

+3 -312
-100
drivers/gpu/drm/vmwgfx/ttm_lock.c
··· 29 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 30 */ 31 31 32 - #include <drm/ttm/ttm_module.h> 33 32 #include <linux/atomic.h> 34 33 #include <linux/errno.h> 35 34 #include <linux/wait.h> ··· 48 49 init_waitqueue_head(&lock->queue); 49 50 lock->rw = 0; 50 51 lock->flags = 0; 51 - lock->kill_takers = false; 52 - lock->signal = SIGKILL; 53 52 } 54 53 55 54 void ttm_read_unlock(struct ttm_lock *lock) ··· 63 66 bool locked = false; 64 67 65 68 spin_lock(&lock->lock); 66 - if (unlikely(lock->kill_takers)) { 67 - send_sig(lock->signal, current, 0); 68 - spin_unlock(&lock->lock); 69 - return false; 70 - } 71 69 if (lock->rw >= 0 && lock->flags == 0) { 72 70 ++lock->rw; 73 71 locked = true; ··· 90 98 *locked = false; 91 99 92 100 spin_lock(&lock->lock); 93 - if (unlikely(lock->kill_takers)) { 94 - send_sig(lock->signal, current, 0); 95 - spin_unlock(&lock->lock); 96 - return false; 97 - } 98 101 if (lock->rw >= 0 && lock->flags == 0) { 99 102 ++lock->rw; 100 103 block = false; ··· 134 147 bool locked = false; 135 148 136 149 spin_lock(&lock->lock); 137 - if (unlikely(lock->kill_takers)) { 138 - send_sig(lock->signal, current, 0); 139 - spin_unlock(&lock->lock); 140 - return false; 141 - } 142 150 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { 143 151 lock->rw = -1; 144 152 lock->flags &= ~TTM_WRITE_LOCK_PENDING; ··· 162 180 wait_event(lock->queue, __ttm_write_lock(lock)); 163 181 164 182 return ret; 165 - } 166 - 167 - static int __ttm_vt_unlock(struct ttm_lock *lock) 168 - { 169 - int ret = 0; 170 - 171 - spin_lock(&lock->lock); 172 - if (unlikely(!(lock->flags & TTM_VT_LOCK))) 173 - ret = -EINVAL; 174 - lock->flags &= ~TTM_VT_LOCK; 175 - wake_up_all(&lock->queue); 176 - spin_unlock(&lock->lock); 177 - 178 - return ret; 179 - } 180 - 181 - static void ttm_vt_lock_remove(struct ttm_base_object **p_base) 182 - { 183 - struct ttm_base_object *base = *p_base; 184 - struct ttm_lock *lock = container_of(base, struct ttm_lock, base); 185 - int ret; 186 - 187 - *p_base = NULL; 188 - ret = __ttm_vt_unlock(lock); 189 - BUG_ON(ret != 0); 190 - } 191 - 192 - static bool __ttm_vt_lock(struct ttm_lock *lock) 193 - { 194 - bool locked = false; 195 - 196 - spin_lock(&lock->lock); 197 - if (lock->rw == 0) { 198 - lock->flags &= ~TTM_VT_LOCK_PENDING; 199 - lock->flags |= TTM_VT_LOCK; 200 - locked = true; 201 - } else { 202 - lock->flags |= TTM_VT_LOCK_PENDING; 203 - } 204 - spin_unlock(&lock->lock); 205 - return locked; 206 - } 207 - 208 - int ttm_vt_lock(struct ttm_lock *lock, 209 - bool interruptible, 210 - struct ttm_object_file *tfile) 211 - { 212 - int ret = 0; 213 - 214 - if (interruptible) { 215 - ret = wait_event_interruptible(lock->queue, 216 - __ttm_vt_lock(lock)); 217 - if (unlikely(ret != 0)) { 218 - spin_lock(&lock->lock); 219 - lock->flags &= ~TTM_VT_LOCK_PENDING; 220 - wake_up_all(&lock->queue); 221 - spin_unlock(&lock->lock); 222 - return ret; 223 - } 224 - } else 225 - wait_event(lock->queue, __ttm_vt_lock(lock)); 226 - 227 - /* 228 - * Add a base-object, the destructor of which will 229 - * make sure the lock is released if the client dies 230 - * while holding it. 231 - */ 232 - 233 - ret = ttm_base_object_init(tfile, &lock->base, false, 234 - ttm_lock_type, &ttm_vt_lock_remove, NULL); 235 - if (ret) 236 - (void)__ttm_vt_unlock(lock); 237 - else 238 - lock->vt_holder = tfile; 239 - 240 - return ret; 241 - } 242 - 243 - int ttm_vt_unlock(struct ttm_lock *lock) 244 - { 245 - return ttm_ref_object_base_unref(lock->vt_holder, 246 - lock->base.handle, TTM_REF_USAGE); 247 183 } 248 184 249 185 void ttm_suspend_unlock(struct ttm_lock *lock)
-30
drivers/gpu/drm/vmwgfx/ttm_lock.h
··· 63 63 * @lock: Spinlock protecting some lock members. 64 64 * @rw: Read-write lock counter. Protected by @lock. 65 65 * @flags: Lock state. Protected by @lock. 66 - * @kill_takers: Boolean whether to kill takers of the lock. 67 - * @signal: Signal to send when kill_takers is true. 68 66 */ 69 67 70 68 struct ttm_lock { ··· 71 73 spinlock_t lock; 72 74 int32_t rw; 73 75 uint32_t flags; 74 - bool kill_takers; 75 - int signal; 76 - struct ttm_object_file *vt_holder; 77 76 }; 78 77 79 78 ··· 214 219 * -ERESTARTSYS If interrupted by a signal and interruptible is true. 215 220 */ 216 221 extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); 217 - 218 - /** 219 - * ttm_lock_set_kill 220 - * 221 - * @lock: Pointer to a struct ttm_lock 222 - * @val: Boolean whether to kill processes taking the lock. 223 - * @signal: Signal to send to the process taking the lock. 224 - * 225 - * The kill-when-taking-lock functionality is used to kill processes that keep 226 - * on using the TTM functionality when its resources has been taken down, for 227 - * example when the X server exits. A typical sequence would look like this: 228 - * - X server takes lock in write mode. 229 - * - ttm_lock_set_kill() is called with @val set to true. 230 - * - As part of X server exit, TTM resources are taken down. 231 - * - X server releases the lock on file release. 232 - * - Another dri client wants to render, takes the lock and is killed. 233 - * 234 - */ 235 - static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, 236 - int signal) 237 - { 238 - lock->kill_takers = val; 239 - if (val) 240 - lock->signal = signal; 241 - } 242 222 243 223 #endif
+2 -161
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 254 254 static int vmw_assume_16bpp; 255 255 256 256 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 257 - static void vmw_master_init(struct vmw_master *); 258 257 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 259 258 void *ptr); 260 259 ··· 763 764 DRM_INFO("MMIO at 0x%08x size is %u kiB\n", 764 765 dev_priv->mmio_start, dev_priv->mmio_size / 1024); 765 766 766 - vmw_master_init(&dev_priv->fbdev_master); 767 - ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 768 - dev_priv->active_master = &dev_priv->fbdev_master; 769 - 770 767 dev_priv->mmio_virt = memremap(dev_priv->mmio_start, 771 768 dev_priv->mmio_size, MEMREMAP_WB); 772 769 ··· 1004 1009 static void vmw_postclose(struct drm_device *dev, 1005 1010 struct drm_file *file_priv) 1006 1011 { 1007 - struct vmw_fpriv *vmw_fp; 1008 - 1009 - vmw_fp = vmw_fpriv(file_priv); 1010 - 1011 - if (vmw_fp->locked_master) { 1012 - struct vmw_master *vmaster = 1013 - vmw_master(vmw_fp->locked_master); 1014 - 1015 - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1016 - ttm_vt_unlock(&vmaster->lock); 1017 - drm_master_put(&vmw_fp->locked_master); 1018 - } 1012 + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1019 1013 1020 1014 ttm_object_file_release(&vmw_fp->tfile); 1021 1015 kfree(vmw_fp); ··· 1033 1049 return ret; 1034 1050 } 1035 1051 1036 - static struct vmw_master *vmw_master_check(struct drm_device *dev, 1037 - struct drm_file *file_priv, 1038 - unsigned int flags) 1039 - { 1040 - int ret; 1041 - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1042 - struct vmw_master *vmaster; 1043 - 1044 - if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) 1045 - return NULL; 1046 - 1047 - ret = mutex_lock_interruptible(&dev->master_mutex); 1048 - if (unlikely(ret != 0)) 1049 - return ERR_PTR(-ERESTARTSYS); 1050 - 1051 - if (drm_is_current_master(file_priv)) { 1052 - mutex_unlock(&dev->master_mutex); 1053 - return NULL; 1054 - } 1055 - 1056 - /* 1057 - * Check if we were previously master, but now dropped. In that 1058 - * case, allow at least render node functionality. 1059 - */ 1060 - if (vmw_fp->locked_master) { 1061 - mutex_unlock(&dev->master_mutex); 1062 - 1063 - if (flags & DRM_RENDER_ALLOW) 1064 - return NULL; 1065 - 1066 - DRM_ERROR("Dropped master trying to access ioctl that " 1067 - "requires authentication.\n"); 1068 - return ERR_PTR(-EACCES); 1069 - } 1070 - mutex_unlock(&dev->master_mutex); 1071 - 1072 - /* 1073 - * Take the TTM lock. Possibly sleep waiting for the authenticating 1074 - * master to become master again, or for a SIGTERM if the 1075 - * authenticating master exits. 1076 - */ 1077 - vmaster = vmw_master(file_priv->master); 1078 - ret = ttm_read_lock(&vmaster->lock, true); 1079 - if (unlikely(ret != 0)) 1080 - vmaster = ERR_PTR(ret); 1081 - 1082 - return vmaster; 1083 - } 1084 - 1085 1052 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1086 1053 unsigned long arg, 1087 1054 long (*ioctl_func)(struct file *, unsigned int, ··· 1041 1106 struct drm_file *file_priv = filp->private_data; 1042 1107 struct drm_device *dev = file_priv->minor->dev; 1043 1108 unsigned int nr = DRM_IOCTL_NR(cmd); 1044 - struct vmw_master *vmaster; 1045 1109 unsigned int flags; 1046 - long ret; 1047 1110 1048 1111 /* 1049 1112 * Do extra checking on driver private ioctls. ··· 1067 1134 } else if (!drm_ioctl_flags(nr, &flags)) 1068 1135 return -EINVAL; 1069 1136 1070 - vmaster = vmw_master_check(dev, file_priv, flags); 1071 - if (IS_ERR(vmaster)) { 1072 - ret = PTR_ERR(vmaster); 1073 - 1074 - if (ret != -ERESTARTSYS) 1075 - DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", 1076 - nr, ret); 1077 - return ret; 1078 - } 1079 - 1080 - ret = ioctl_func(filp, cmd, arg); 1081 - if (vmaster) 1082 - ttm_read_unlock(&vmaster->lock); 1083 - 1084 - return ret; 1137 + return ioctl_func(filp, cmd, arg); 1085 1138 1086 1139 out_io_encoding: 1087 1140 DRM_ERROR("Invalid command format, ioctl %d\n", ··· 1090 1171 } 1091 1172 #endif 1092 1173 1093 - static void vmw_master_init(struct vmw_master *vmaster) 1094 - { 1095 - ttm_lock_init(&vmaster->lock); 1096 - } 1097 - 1098 - static int vmw_master_create(struct drm_device *dev, 1099 - struct drm_master *master) 1100 - { 1101 - struct vmw_master *vmaster; 1102 - 1103 - vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 1104 - if (unlikely(!vmaster)) 1105 - return -ENOMEM; 1106 - 1107 - vmw_master_init(vmaster); 1108 - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1109 - master->driver_priv = vmaster; 1110 - 1111 - return 0; 1112 - } 1113 - 1114 - static void vmw_master_destroy(struct drm_device *dev, 1115 - struct drm_master *master) 1116 - { 1117 - struct vmw_master *vmaster = vmw_master(master); 1118 - 1119 - master->driver_priv = NULL; 1120 - kfree(vmaster); 1121 - } 1122 - 1123 1174 static int vmw_master_set(struct drm_device *dev, 1124 1175 struct drm_file *file_priv, 1125 1176 bool from_open) 1126 1177 { 1127 - struct vmw_private *dev_priv = vmw_priv(dev); 1128 - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1129 - struct vmw_master *active = dev_priv->active_master; 1130 - struct vmw_master *vmaster = vmw_master(file_priv->master); 1131 - int ret = 0; 1132 - 1133 - if (active) { 1134 - BUG_ON(active != &dev_priv->fbdev_master); 1135 - ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 1136 - if (unlikely(ret != 0)) 1137 - return ret; 1138 - 1139 - ttm_lock_set_kill(&active->lock, true, SIGTERM); 1140 - dev_priv->active_master = NULL; 1141 - } 1142 - 1143 - ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1144 - if (!from_open) { 1145 - ttm_vt_unlock(&vmaster->lock); 1146 - BUG_ON(vmw_fp->locked_master != file_priv->master); 1147 - drm_master_put(&vmw_fp->locked_master); 1148 - } 1149 - 1150 - dev_priv->active_master = vmaster; 1151 - 1152 1178 /* 1153 1179 * Inform a new master that the layout may have changed while 1154 1180 * it was gone. ··· 1108 1244 struct drm_file *file_priv) 1109 1245 { 1110 1246 struct vmw_private *dev_priv = vmw_priv(dev); 1111 - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1112 - struct vmw_master *vmaster = vmw_master(file_priv->master); 1113 - int ret; 1114 1247 1115 - /** 1116 - * Make sure the master doesn't disappear while we have 1117 - * it locked. 1118 - */ 1119 - 1120 - vmw_fp->locked_master = drm_master_get(file_priv->master); 1121 - ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1122 1248 vmw_kms_legacy_hotspot_clear(dev_priv); 1123 - if (unlikely((ret != 0))) { 1124 - DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1125 - drm_master_put(&vmw_fp->locked_master); 1126 - } 1127 - 1128 - ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1129 - 1130 1249 if (!dev_priv->enable_fb) 1131 1250 vmw_svga_disable(dev_priv); 1132 - 1133 - dev_priv->active_master = &dev_priv->fbdev_master; 1134 - ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1135 - ttm_vt_unlock(&dev_priv->fbdev_master.lock); 1136 1251 } 1137 1252 1138 1253 /** ··· 1389 1546 .disable_vblank = vmw_disable_vblank, 1390 1547 .ioctls = vmw_ioctls, 1391 1548 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1392 - .master_create = vmw_master_create, 1393 - .master_destroy = vmw_master_destroy, 1394 1549 .master_set = vmw_master_set, 1395 1550 .master_drop = vmw_master_drop, 1396 1551 .open = vmw_driver_open,
+1 -15
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 81 81 #define VMW_RES_SHADER ttm_driver_type4 82 82 83 83 struct vmw_fpriv { 84 - struct drm_master *locked_master; 85 84 struct ttm_object_file *tfile; 86 85 bool gb_aware; /* user-space is guest-backed aware */ 87 86 }; ··· 375 376 struct vmw_legacy_display; 376 377 struct vmw_overlay; 377 378 378 - struct vmw_master { 379 - struct ttm_lock lock; 380 - }; 381 - 382 379 struct vmw_vga_topology_state { 383 380 uint32_t width; 384 381 uint32_t height; ··· 532 537 spinlock_t svga_lock; 533 538 534 539 /** 535 - * Master management. 540 + * PM management. 536 541 */ 537 - 538 - struct vmw_master *active_master; 539 - struct vmw_master fbdev_master; 540 542 struct notifier_block pm_nb; 541 543 bool refuse_hibernation; 542 544 bool suspend_locked; ··· 597 605 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) 598 606 { 599 607 return (struct vmw_fpriv *)file_priv->driver_priv; 600 - } 601 - 602 - static inline struct vmw_master *vmw_master(struct drm_master *master) 603 - { 604 - return (struct vmw_master *) master->driver_priv; 605 608 } 606 609 607 610 /* ··· 998 1011 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 999 1012 unsigned width, unsigned height, unsigned pitch, 1000 1013 unsigned bpp, unsigned depth); 1001 - void vmw_kms_idle_workqueues(struct vmw_master *vmaster); 1002 1014 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1003 1015 uint32_t pitch, 1004 1016 uint32_t height);
-6
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 915 915 if (unlikely(drm_is_render_client(file_priv))) 916 916 require_exist = true; 917 917 918 - if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { 919 - DRM_ERROR("Locked master refused legacy " 920 - "surface reference.\n"); 921 - return -EACCES; 922 - } 923 - 924 918 handle = u_handle; 925 919 } 926 920