Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: make mappable struct resource centric

Now that we are using struct resource to track the stolen region, it is
more convenient if we track the mappable region in a resource as well.

v2: prefer iomap and gmadr naming scheme
prefer DEFINE_RES_MEM

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171211151822.20953-8-matthew.auld@intel.com

authored by

Matthew Auld and committed by
Joonas Lahtinen
73ebd503 17a05345

+37 -28
+1 -1
drivers/gpu/drm/i915/gvt/gvt.h
··· 348 348 349 349 /* Aperture/GM space definitions for GVT device */ 350 350 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) 351 - #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) 351 + #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) 352 352 353 353 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) 354 354 #define gvt_ggtt_sz(gvt) \
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 726 726 if (!ap) 727 727 return -ENOMEM; 728 728 729 - ap->ranges[0].base = ggtt->mappable_base; 729 + ap->ranges[0].base = ggtt->gmadr.start; 730 730 ap->ranges[0].size = ggtt->mappable_end; 731 731 732 732 primary =
+4 -4
drivers/gpu/drm/i915/i915_gem.c
··· 1116 1116 page_base += offset & PAGE_MASK; 1117 1117 } 1118 1118 1119 - if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1119 + if (gtt_user_read(&ggtt->iomap, page_base, page_offset, 1120 1120 user_data, page_length)) { 1121 1121 ret = -EFAULT; 1122 1122 break; ··· 1324 1324 * If the object is non-shmem backed, we retry again with the 1325 1325 * path that handles page fault. 1326 1326 */ 1327 - if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1327 + if (ggtt_write(&ggtt->iomap, page_base, page_offset, 1328 1328 user_data, page_length)) { 1329 1329 ret = -EFAULT; 1330 1330 break; ··· 1967 1967 /* Finally, remap it using the new GTT offset */ 1968 1968 ret = remap_io_mapping(area, 1969 1969 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), 1970 - (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, 1970 + (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, 1971 1971 min_t(u64, vma->size, area->vm_end - area->vm_start), 1972 - &ggtt->mappable); 1972 + &ggtt->iomap); 1973 1973 if (ret) 1974 1974 goto err_fence; 1975 1975
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1012 1012 offset += page << PAGE_SHIFT; 1013 1013 } 1014 1014 1015 - vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable, 1015 + vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, 1016 1016 offset); 1017 1017 cache->page = page; 1018 1018 cache->vaddr = (unsigned long)vaddr;
+19 -10
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2912 2912 mutex_unlock(&dev_priv->drm.struct_mutex); 2913 2913 2914 2914 arch_phys_wc_del(ggtt->mtrr); 2915 - io_mapping_fini(&ggtt->mappable); 2915 + io_mapping_fini(&ggtt->iomap); 2916 2916 } 2917 2917 2918 2918 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) ··· 3288 3288 int err; 3289 3289 3290 3290 /* TODO: We're not aware of mappable constraints on gen8 yet */ 3291 - ggtt->mappable_base = pci_resource_start(pdev, 2); 3292 - ggtt->mappable_end = pci_resource_len(pdev, 2); 3291 + ggtt->gmadr = 3292 + (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), 3293 + pci_resource_len(pdev, 2)); 3294 + ggtt->mappable_end = resource_size(&ggtt->gmadr); 3293 3295 3294 3296 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); 3295 3297 if (!err) ··· 3345 3343 u16 snb_gmch_ctl; 3346 3344 int err; 3347 3345 3348 - ggtt->mappable_base = pci_resource_start(pdev, 2); 3349 - ggtt->mappable_end = pci_resource_len(pdev, 2); 3346 + ggtt->gmadr = 3347 + (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), 3348 + pci_resource_len(pdev, 2)); 3349 + ggtt->mappable_end = resource_size(&ggtt->gmadr); 3350 3350 3351 3351 /* 64/512MB is the current min/max we actually know of, but this is just 3352 3352 * a coarse sanity check. ··· 3401 3397 static int i915_gmch_probe(struct i915_ggtt *ggtt) 3402 3398 { 3403 3399 struct drm_i915_private *dev_priv = ggtt->base.i915; 3400 + phys_addr_t gmadr_base; 3404 3401 int ret; 3405 3402 3406 3403 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); ··· 3411 3406 } 3412 3407 3413 3408 intel_gtt_get(&ggtt->base.total, 3414 - &ggtt->mappable_base, 3409 + &gmadr_base, 3415 3410 &ggtt->mappable_end); 3411 + 3412 + ggtt->gmadr = 3413 + (struct resource) DEFINE_RES_MEM(gmadr_base, 3414 + ggtt->mappable_end); 3416 3415 3417 3416 ggtt->do_idle_maps = needs_idle_maps(dev_priv); 3418 3417 ggtt->base.insert_page = i915_ggtt_insert_page; ··· 3485 3476 /* GMADR is the PCI mmio aperture into the global GTT. */ 3486 3477 DRM_INFO("Memory usable by graphics device = %lluM\n", 3487 3478 ggtt->base.total >> 20); 3488 - DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); 3479 + DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); 3489 3480 DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n", 3490 3481 (u64)resource_size(&intel_graphics_stolen_res) >> 20); 3491 3482 if (intel_vtd_active()) ··· 3516 3507 ggtt->base.mm.color_adjust = i915_gtt_color_adjust; 3517 3508 mutex_unlock(&dev_priv->drm.struct_mutex); 3518 3509 3519 - if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, 3520 - dev_priv->ggtt.mappable_base, 3510 + if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, 3511 + dev_priv->ggtt.gmadr.start, 3521 3512 dev_priv->ggtt.mappable_end)) { 3522 3513 ret = -EIO; 3523 3514 goto out_gtt_cleanup; 3524 3515 } 3525 3516 3526 - ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end); 3517 + ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); 3527 3518 3528 3519 /* 3529 3520 * Initialise stolen early so that we may reserve preallocated
+2 -2
drivers/gpu/drm/i915/i915_gem_gtt.h
··· 368 368 */ 369 369 struct i915_ggtt { 370 370 struct i915_address_space base; 371 - struct io_mapping mappable; /* Mapping to our CPU mappable region */ 372 371 373 - phys_addr_t mappable_base; /* PA of our GMADR */ 372 + struct io_mapping iomap; /* Mapping to our CPU mappable region */ 373 + struct resource gmadr; /* GMADR resource */ 374 374 u64 mappable_end; /* End offset that we can CPU map */ 375 375 376 376 /* Stolen memory is segmented in hardware with different portions
+1 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 956 956 ggtt->base.insert_page(&ggtt->base, dma, slot, 957 957 I915_CACHE_NONE, 0); 958 958 959 - s = io_mapping_map_atomic_wc(&ggtt->mappable, slot); 959 + s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); 960 960 ret = compress_page(&compress, (void __force *)s, dst); 961 961 io_mapping_unmap_atomic(s); 962 962
+1 -1
drivers/gpu/drm/i915/i915_vma.c
··· 311 311 312 312 ptr = vma->iomap; 313 313 if (ptr == NULL) { 314 - ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, 314 + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 315 315 vma->node.start, 316 316 vma->node.size); 317 317 if (ptr == NULL) {
+1 -1
drivers/gpu/drm/i915/intel_display.c
··· 14595 14595 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 14596 14596 } 14597 14597 14598 - dev->mode_config.fb_base = ggtt->mappable_base; 14598 + dev->mode_config.fb_base = ggtt->gmadr.start; 14599 14599 14600 14600 DRM_DEBUG_KMS("%d display pipe%s available.\n", 14601 14601 INTEL_INFO(dev_priv)->num_pipes,
+2 -2
drivers/gpu/drm/i915/intel_overlay.c
··· 219 219 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) 220 220 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; 221 221 else 222 - regs = io_mapping_map_wc(&dev_priv->ggtt.mappable, 222 + regs = io_mapping_map_wc(&dev_priv->ggtt.iomap, 223 223 overlay->flip_addr, 224 224 PAGE_SIZE); 225 225 ··· 1508 1508 regs = (struct overlay_registers __iomem *) 1509 1509 overlay->reg_bo->phys_handle->vaddr; 1510 1510 else 1511 - regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable, 1511 + regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap, 1512 1512 overlay->flip_addr); 1513 1513 1514 1514 return regs;
+2 -2
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
··· 1074 1074 i915_gem_object_get_dma_address(obj, 0), 1075 1075 offset, I915_CACHE_NONE, 0); 1076 1076 1077 - vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset); 1077 + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); 1078 1078 iowrite32(n, vaddr + n); 1079 1079 io_mapping_unmap_atomic(vaddr); 1080 1080 ··· 1092 1092 i915_gem_object_get_dma_address(obj, 0), 1093 1093 offset, I915_CACHE_NONE, 0); 1094 1094 1095 - vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset); 1095 + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); 1096 1096 val = ioread32(vaddr + n); 1097 1097 io_mapping_unmap_atomic(vaddr); 1098 1098
+2 -2
drivers/gpu/drm/i915/selftests/mock_gtt.c
··· 110 110 111 111 ggtt->base.i915 = i915; 112 112 113 - ggtt->mappable_base = 0; 114 - ggtt->mappable_end = 2048 * PAGE_SIZE; 113 + ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); 114 + ggtt->mappable_end = resource_size(&ggtt->gmadr); 115 115 ggtt->base.total = 4096 * PAGE_SIZE; 116 116 117 117 ggtt->base.clear_range = nop_clear_range;