Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Fix the driver for large dma addresses

With dma compliance / IOMMU support added to the driver in kernel 3.13,
the dma addresses can exceed 44 bits, which is what we support in
32-bit mode and with GMR1.
So in 32-bit mode and optionally in 64-bit mode, restrict the dma
addresses to 44 bits, and strip the old GMR1 code.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Cc: stable@vger.kernel.org

+39 -169
+36 -11
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 189 189 static int vmw_force_iommu; 190 190 static int vmw_restrict_iommu; 191 191 static int vmw_force_coherent; 192 + static int vmw_restrict_dma_mask; 192 193 193 194 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 194 195 static void vmw_master_init(struct vmw_master *); ··· 204 203 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 205 204 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 206 205 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 206 + MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 207 + module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 207 208 208 209 209 210 static void vmw_print_capabilities(uint32_t capabilities) ··· 513 510 return 0; 514 511 } 515 512 513 + /** 514 + * vmw_dma_masks - set required page- and dma masks 515 + * 516 + * @dev: Pointer to struct drm-device 517 + * 518 + * With 32-bit we can only handle 32 bit PFNs. Optionally set that 519 + * restriction also for 64-bit systems. 520 + */ 521 + #ifdef CONFIG_INTEL_IOMMU 522 + static int vmw_dma_masks(struct vmw_private *dev_priv) 523 + { 524 + struct drm_device *dev = dev_priv->dev; 525 + 526 + if (intel_iommu_enabled && 527 + (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 528 + DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 529 + return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 530 + } 531 + return 0; 532 + } 533 + #else 534 + static int vmw_dma_masks(struct vmw_private *dev_priv) 535 + { 536 + return 0; 537 + } 538 + #endif 539 + 516 540 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 517 541 { 518 542 struct vmw_private *dev_priv; ··· 608 578 609 579 vmw_get_initial_size(dev_priv); 610 580 611 - if (dev_priv->capabilities & SVGA_CAP_GMR) { 612 - dev_priv->max_gmr_descriptors = 613 - vmw_read(dev_priv, 614 - SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); 581 + if (dev_priv->capabilities & SVGA_CAP_GMR2) { 615 582 dev_priv->max_gmr_ids = 616 583 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 617 - } 618 - if (dev_priv->capabilities & SVGA_CAP_GMR2) { 619 584 dev_priv->max_gmr_pages = 620 585 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 621 586 dev_priv->memory_size = ··· 624 599 dev_priv->memory_size = 512*1024*1024; 625 600 } 626 601 602 + ret = vmw_dma_masks(dev_priv); 603 + if (unlikely(ret != 0)) 604 + goto out_err0; 605 + 627 606 mutex_unlock(&dev_priv->hw_mutex); 628 607 629 608 vmw_print_capabilities(dev_priv->capabilities); 630 609 631 - if (dev_priv->capabilities & SVGA_CAP_GMR) { 610 + if (dev_priv->capabilities & SVGA_CAP_GMR2) { 632 611 DRM_INFO("Max GMR ids is %u\n", 633 612 (unsigned)dev_priv->max_gmr_ids); 634 - DRM_INFO("Max GMR descriptors is %u\n", 635 - (unsigned)dev_priv->max_gmr_descriptors); 636 - } 637 - if (dev_priv->capabilities & SVGA_CAP_GMR2) { 638 613 DRM_INFO("Max number of GMR pages is %u\n", 639 614 (unsigned)dev_priv->max_gmr_pages); 640 615 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
-1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 290 290 __le32 __iomem *mmio_virt; 291 291 int mmio_mtrr; 292 292 uint32_t capabilities; 293 - uint32_t max_gmr_descriptors; 294 293 uint32_t max_gmr_ids; 295 294 uint32_t max_gmr_pages; 296 295 uint32_t memory_size;
+3 -157
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
··· 125 125 } 126 126 127 127 128 - static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, 129 - struct list_head *desc_pages) 130 - { 131 - struct page *page, *next; 132 - struct svga_guest_mem_descriptor *page_virtual; 133 - unsigned int desc_per_page = PAGE_SIZE / 134 - sizeof(struct svga_guest_mem_descriptor) - 1; 135 - 136 - if (list_empty(desc_pages)) 137 - return; 138 - 139 - list_for_each_entry_safe(page, next, desc_pages, lru) { 140 - list_del_init(&page->lru); 141 - 142 - if (likely(desc_dma != DMA_ADDR_INVALID)) { 143 - dma_unmap_page(dev, desc_dma, PAGE_SIZE, 144 - DMA_TO_DEVICE); 145 - } 146 - 147 - page_virtual = kmap_atomic(page); 148 - desc_dma = (dma_addr_t) 149 - le32_to_cpu(page_virtual[desc_per_page].ppn) << 150 - PAGE_SHIFT; 151 - kunmap_atomic(page_virtual); 152 - 153 - __free_page(page); 154 - } 155 - } 156 - 157 - /** 158 - * FIXME: Adjust to the ttm lowmem / highmem storage to minimize 159 - * the number of used descriptors. 160 - * 161 - */ 162 - 163 - static int vmw_gmr_build_descriptors(struct device *dev, 164 - struct list_head *desc_pages, 165 - struct vmw_piter *iter, 166 - unsigned long num_pages, 167 - dma_addr_t *first_dma) 168 - { 169 - struct page *page; 170 - struct svga_guest_mem_descriptor *page_virtual = NULL; 171 - struct svga_guest_mem_descriptor *desc_virtual = NULL; 172 - unsigned int desc_per_page; 173 - unsigned long prev_pfn; 174 - unsigned long pfn; 175 - int ret; 176 - dma_addr_t desc_dma; 177 - 178 - desc_per_page = PAGE_SIZE / 179 - sizeof(struct svga_guest_mem_descriptor) - 1; 180 - 181 - while (likely(num_pages != 0)) { 182 - page = alloc_page(__GFP_HIGHMEM); 183 - if (unlikely(page == NULL)) { 184 - ret = -ENOMEM; 185 - goto out_err; 186 - } 187 - 188 - list_add_tail(&page->lru, desc_pages); 189 - page_virtual = kmap_atomic(page); 190 - desc_virtual = page_virtual - 1; 191 - prev_pfn = ~(0UL); 192 - 193 - while (likely(num_pages != 0)) { 194 - pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; 195 - 196 - if (pfn != prev_pfn + 1) { 197 - 198 - if (desc_virtual - page_virtual == 199 - desc_per_page - 1) 200 - break; 201 - 202 - (++desc_virtual)->ppn = cpu_to_le32(pfn); 203 - desc_virtual->num_pages = cpu_to_le32(1); 204 - } else { 205 - uint32_t tmp = 206 - le32_to_cpu(desc_virtual->num_pages); 207 - desc_virtual->num_pages = cpu_to_le32(tmp + 1); 208 - } 209 - prev_pfn = pfn; 210 - --num_pages; 211 - vmw_piter_next(iter); 212 - } 213 - 214 - (++desc_virtual)->ppn = DMA_PAGE_INVALID; 215 - desc_virtual->num_pages = cpu_to_le32(0); 216 - kunmap_atomic(page_virtual); 217 - } 218 - 219 - desc_dma = 0; 220 - list_for_each_entry_reverse(page, desc_pages, lru) { 221 - page_virtual = kmap_atomic(page); 222 - page_virtual[desc_per_page].ppn = cpu_to_le32 223 - (desc_dma >> PAGE_SHIFT); 224 - kunmap_atomic(page_virtual); 225 - desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, 226 - DMA_TO_DEVICE); 227 - 228 - if (unlikely(dma_mapping_error(dev, desc_dma))) 229 - goto out_err; 230 - } 231 - *first_dma = desc_dma; 232 - 233 - return 0; 234 - out_err: 235 - vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); 236 - return ret; 237 - } 238 - 239 - static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, 240 - int gmr_id, dma_addr_t desc_dma) 241 - { 242 - mutex_lock(&dev_priv->hw_mutex); 243 - 244 - vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); 245 - wmb(); 246 - vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); 247 - mb(); 248 - 249 - mutex_unlock(&dev_priv->hw_mutex); 250 - 251 - } 252 - 253 128 int vmw_gmr_bind(struct vmw_private *dev_priv, 254 129 const struct vmw_sg_table *vsgt, 255 130 unsigned long num_pages, 256 131 int gmr_id) 257 132 { 258 - struct list_head desc_pages; 259 - dma_addr_t desc_dma = 0; 260 - struct device *dev = dev_priv->dev->dev; 261 133 struct vmw_piter data_iter; 262 - int ret; 263 134 264 135 vmw_piter_start(&data_iter, vsgt, 0); 265 136 266 137 if (unlikely(!vmw_piter_next(&data_iter))) 267 138 return 0; 268 139 269 - if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 270 - return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); 271 - 272 - if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) 140 + if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) 273 141 return -EINVAL; 274 142 275 - if (vsgt->num_regions > dev_priv->max_gmr_descriptors) 276 - return -EINVAL; 277 - 278 - INIT_LIST_HEAD(&desc_pages); 279 - 280 - ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, 281 - num_pages, &desc_dma); 282 - if (unlikely(ret != 0)) 283 - return ret; 284 - 285 - vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); 286 - vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); 287 - 288 - return 0; 143 + return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); 289 144 } 290 145 291 146 292 147 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) 293 148 { 294 - if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { 149 + if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 295 150 vmw_gmr2_unbind(dev_priv, gmr_id); 296 - return; 297 - } 298 - 299 - mutex_lock(&dev_priv->hw_mutex); 300 - vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); 301 - wmb(); 302 - vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); 303 - mb(); 304 - mutex_unlock(&dev_priv->hw_mutex); 305 151 }