Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/gma500: Rename struct gtt_range to struct psb_gem_object

struct gtt_range represents a GEM object. Rename the structure to struct
psb_gem_object and update all users. No functional changes.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211015084053.13708-11-tzimmermann@suse.de

+123 -117
+4 -5
drivers/gpu/drm/gma500/framebuffer.c
··· 81 81 struct drm_framebuffer *fb = vma->vm_private_data; 82 82 struct drm_device *dev = fb->dev; 83 83 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 84 - struct gtt_range *gtt = to_gtt_range(fb->obj[0]); 84 + struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]); 85 85 int page_num; 86 86 int i; 87 87 unsigned long address; 88 88 vm_fault_t ret = VM_FAULT_SIGBUS; 89 89 unsigned long pfn; 90 - unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + 91 - gtt->offset; 90 + unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset; 92 91 93 92 page_num = vma_pages(vma); 94 93 address = vmf->address - (vmf->pgoff << PAGE_SHIFT); ··· 241 242 struct drm_mode_fb_cmd2 mode_cmd; 242 243 int size; 243 244 int ret; 244 - struct gtt_range *backing; 245 + struct psb_gem_object *backing; 245 246 struct drm_gem_object *obj; 246 247 u32 bpp, depth; 247 248 ··· 263 264 backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE); 264 265 if (IS_ERR(backing)) 265 266 return PTR_ERR(backing); 266 - obj = &backing->gem; 267 + obj = &backing->base; 267 268 268 269 memset(dev_priv->vram_addr + backing->offset, 0, size); 269 270
+54 -52
drivers/gpu/drm/gma500/gem.c
··· 21 21 #include "gem.h" 22 22 #include "psb_drv.h" 23 23 24 - int psb_gem_pin(struct gtt_range *gt) 24 + int psb_gem_pin(struct psb_gem_object *pobj) 25 25 { 26 - struct drm_device *dev = gt->gem.dev; 26 + struct drm_gem_object *obj = &pobj->base; 27 + struct drm_device *dev = obj->dev; 27 28 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 28 29 u32 gpu_base = dev_priv->gtt.gatt_start; 29 30 struct page **pages; ··· 33 32 34 33 mutex_lock(&dev_priv->gtt_mutex); 35 34 36 - if (gt->in_gart || gt->stolen) 35 + if (pobj->in_gart || pobj->stolen) 37 36 goto out; /* already mapped */ 38 37 39 - pages = drm_gem_get_pages(&gt->gem); 38 + pages = drm_gem_get_pages(obj); 40 39 if (IS_ERR(pages)) { 41 40 ret = PTR_ERR(pages); 42 41 goto err_mutex_unlock; 43 42 } 44 43 45 - npages = gt->gem.size / PAGE_SIZE; 44 + npages = obj->size / PAGE_SIZE; 46 45 47 46 set_pages_array_wc(pages, npages); 48 47 49 - psb_gtt_insert_pages(dev_priv, &gt->resource, pages); 48 + psb_gtt_insert_pages(dev_priv, &pobj->resource, pages); 50 49 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages, 51 - (gpu_base + gt->offset), npages, 0, 0, 50 + (gpu_base + pobj->offset), npages, 0, 0, 52 51 PSB_MMU_CACHED_MEMORY); 53 52 54 - gt->npage = npages; 55 - gt->pages = pages; 53 + pobj->npage = npages; 54 + pobj->pages = pages; 56 55 57 56 out: 58 - ++gt->in_gart; 57 + ++pobj->in_gart; 59 58 mutex_unlock(&dev_priv->gtt_mutex); 60 59 61 60 return 0; ··· 65 64 return ret; 66 65 } 67 66 68 - void psb_gem_unpin(struct gtt_range *gt) 67 + void psb_gem_unpin(struct psb_gem_object *pobj) 69 68 { 70 - struct drm_device *dev = gt->gem.dev; 69 + struct drm_gem_object *obj = &pobj->base; 70 + struct drm_device *dev = obj->dev; 71 71 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 72 72 u32 gpu_base = dev_priv->gtt.gatt_start; 73 73 74 74 mutex_lock(&dev_priv->gtt_mutex); 75 75 76 - WARN_ON(!gt->in_gart); 76 + WARN_ON(!pobj->in_gart); 77 77 78 - --gt->in_gart; 78 + --pobj->in_gart; 79 79 80 - if (gt->in_gart || gt->stolen) 80 + if (pobj->in_gart || pobj->stolen) 81 81 goto out; 82 82 83 83 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), 84 - (gpu_base + gt->offset), gt->npage, 0, 0); 85 - psb_gtt_remove_pages(dev_priv, &gt->resource); 84 + (gpu_base + pobj->offset), pobj->npage, 0, 0); 85 + psb_gtt_remove_pages(dev_priv, &pobj->resource); 86 86 87 87 /* Reset caching flags */ 88 - set_pages_array_wb(gt->pages, gt->npage); 88 + set_pages_array_wb(pobj->pages, pobj->npage); 89 89 90 - drm_gem_put_pages(&gt->gem, gt->pages, true, false); 91 - gt->pages = NULL; 92 - gt->npage = 0; 90 + drm_gem_put_pages(obj, pobj->pages, true, false); 91 + pobj->pages = NULL; 92 + pobj->npage = 0; 93 93 94 94 out: 95 95 mutex_unlock(&dev_priv->gtt_mutex); ··· 100 98 101 99 static void psb_gem_free_object(struct drm_gem_object *obj) 102 100 { 103 - struct gtt_range *gt = to_gtt_range(obj); 101 + struct psb_gem_object *pobj = to_psb_gem_object(obj); 104 102 105 103 drm_gem_object_release(obj); 106 104 107 105 /* Undo the mmap pin if we are destroying the object */ 108 - if (gt->mmapping) 109 - psb_gem_unpin(gt); 106 + if (pobj->mmapping) 107 + psb_gem_unpin(pobj); 110 108 111 - WARN_ON(gt->in_gart && !gt->stolen); 109 + WARN_ON(pobj->in_gart && !pobj->stolen); 112 110 113 - release_resource(&gt->resource); 114 - kfree(gt); 111 + release_resource(&pobj->resource); 112 + kfree(pobj); 115 113 } 116 114 117 115 static const struct vm_operations_struct psb_gem_vm_ops = { ··· 125 123 .vm_ops = &psb_gem_vm_ops, 126 124 }; 127 125 128 - struct gtt_range * 126 + struct psb_gem_object * 129 127 psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align) 130 128 { 131 129 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 132 - struct gtt_range *gt; 130 + struct psb_gem_object *pobj; 133 131 struct drm_gem_object *obj; 134 132 int ret; 135 133 136 134 size = roundup(size, PAGE_SIZE); 137 135 138 - gt = kzalloc(sizeof(*gt), GFP_KERNEL); 139 - if (!gt) 136 + pobj = kzalloc(sizeof(*pobj), GFP_KERNEL); 137 + if (!pobj) 140 138 return ERR_PTR(-ENOMEM); 141 - obj = &gt->gem; 139 + obj = &pobj->base; 142 140 143 141 /* GTT resource */ 144 142 145 - ret = psb_gtt_allocate_resource(dev_priv, &gt->resource, name, size, align, stolen, 146 - &gt->offset); 143 + ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen, 144 + &pobj->offset); 147 145 if (ret) 148 146 goto err_kfree; 149 147 150 148 if (stolen) { 151 - gt->stolen = true; 152 - gt->in_gart = 1; 149 + pobj->stolen = true; 150 + pobj->in_gart = 1; 153 151 } 154 152 155 153 /* GEM object */ ··· 167 165 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32); 168 166 } 169 167 170 - return gt; 168 + return pobj; 171 169 172 170 err_release_resource: 173 - release_resource(&gt->resource); 171 + release_resource(&pobj->resource); 174 172 err_kfree: 175 - kfree(gt); 173 + kfree(pobj); 176 174 return ERR_PTR(ret); 177 175 } 178 176 ··· 190 188 struct drm_mode_create_dumb *args) 191 189 { 192 190 size_t pitch, size; 193 - struct gtt_range *gt; 191 + struct psb_gem_object *pobj; 194 192 struct drm_gem_object *obj; 195 193 u32 handle; 196 194 int ret; ··· 203 201 if (!size) 204 202 return -EINVAL; 205 203 206 - gt = psb_gem_create(dev, size, "gem", false, PAGE_SIZE); 207 - if (IS_ERR(gt)) 208 - return PTR_ERR(gt); 209 - obj = &gt->gem; 204 + pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE); 205 + if (IS_ERR(pobj)) 206 + return PTR_ERR(pobj); 207 + obj = &pobj->base; 210 208 211 209 ret = drm_gem_handle_create(file, obj, &handle); 212 210 if (ret) ··· 245 243 { 246 244 struct vm_area_struct *vma = vmf->vma; 247 245 struct drm_gem_object *obj; 248 - struct gtt_range *r; 246 + struct psb_gem_object *pobj; 249 247 int err; 250 248 vm_fault_t ret; 251 249 unsigned long pfn; ··· 257 255 dev = obj->dev; 258 256 dev_priv = to_drm_psb_private(dev); 259 257 260 - r = to_gtt_range(obj); 258 + pobj = to_psb_gem_object(obj); 261 259 262 260 /* Make sure we don't parallel update on a fault, nor move or remove 263 261 something from beneath our feet */ ··· 265 263 266 264 /* For now the mmap pins the object and it stays pinned. As things 267 265 stand that will do us no harm */ 268 - if (r->mmapping == 0) { 269 - err = psb_gem_pin(r); 266 + if (pobj->mmapping == 0) { 267 + err = psb_gem_pin(pobj); 270 268 if (err < 0) { 271 269 dev_err(dev->dev, "gma500: pin failed: %d\n", err); 272 270 ret = vmf_error(err); 273 271 goto fail; 274 272 } 275 - r->mmapping = 1; 273 + pobj->mmapping = 1; 276 274 } 277 275 278 276 /* Page relative to the VMA start - we must calculate this ourselves ··· 280 278 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 281 279 282 280 /* CPU view of the page, don't go via the GART for CPU writes */ 283 - if (r->stolen) 284 - pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; 281 + if (pobj->stolen) 282 + pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT; 285 283 else 286 - pfn = page_to_pfn(r->pages[page_offset]); 284 + pfn = page_to_pfn(pobj->pages[page_offset]); 287 285 ret = vmf_insert_pfn(vma, vmf->address, pfn); 288 286 fail: 289 287 mutex_unlock(&dev_priv->mmap_mutex);
+22 -3
drivers/gpu/drm/gma500/gem.h
··· 8 8 #ifndef _GEM_H 9 9 #define _GEM_H 10 10 11 + #include <linux/kernel.h> 12 + 11 13 #include <drm/drm_gem.h> 12 14 13 15 struct drm_device; 14 16 15 - struct gtt_range * 17 + struct psb_gem_object { 18 + struct drm_gem_object base; 19 + 20 + struct resource resource; /* GTT resource for our allocation */ 21 + u32 offset; /* GTT offset of our object */ 22 + int in_gart; /* Currently in the GART (ref ct) */ 23 + bool stolen; /* Backed from stolen RAM */ 24 + bool mmapping; /* Is mmappable */ 25 + struct page **pages; /* Backing pages if present */ 26 + int npage; /* Number of backing pages */ 27 + }; 28 + 29 + static inline struct psb_gem_object *to_psb_gem_object(struct drm_gem_object *obj) 30 + { 31 + return container_of(obj, struct psb_gem_object, base); 32 + } 33 + 34 + struct psb_gem_object * 16 35 psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align); 17 36 18 - int psb_gem_pin(struct gtt_range *gt); 19 - void psb_gem_unpin(struct gtt_range *gt); 37 + int psb_gem_pin(struct psb_gem_object *pobj); 38 + void psb_gem_unpin(struct psb_gem_object *pobj); 20 39 21 40 #endif
+25 -25
drivers/gpu/drm/gma500/gma_display.c
··· 55 55 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 56 56 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 57 57 struct drm_framebuffer *fb = crtc->primary->fb; 58 - struct gtt_range *gtt; 58 + struct psb_gem_object *pobj; 59 59 int pipe = gma_crtc->pipe; 60 60 const struct psb_offset *map = &dev_priv->regmap[pipe]; 61 61 unsigned long start, offset; ··· 71 71 goto gma_pipe_cleaner; 72 72 } 73 73 74 - gtt = to_gtt_range(fb->obj[0]); 74 + pobj = to_psb_gem_object(fb->obj[0]); 75 75 76 76 /* We are displaying this buffer, make sure it is actually loaded 77 77 into the GTT */ 78 - ret = psb_gem_pin(gtt); 78 + ret = psb_gem_pin(pobj); 79 79 if (ret < 0) 80 80 goto gma_pipe_set_base_exit; 81 - start = gtt->offset; 81 + start = pobj->offset; 82 82 offset = y * fb->pitches[0] + x * fb->format->cpp[0]; 83 83 84 84 REG_WRITE(map->stride, fb->pitches[0]); ··· 126 126 gma_pipe_cleaner: 127 127 /* If there was a previous display we can now unpin it */ 128 128 if (old_fb) 129 - psb_gem_unpin(to_gtt_range(old_fb->obj[0])); 129 + psb_gem_unpin(to_psb_gem_object(old_fb->obj[0])); 130 130 131 131 gma_pipe_set_base_exit: 132 132 gma_power_end(dev); ··· 332 332 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 333 333 uint32_t temp; 334 334 size_t addr = 0; 335 - struct gtt_range *gt; 336 - struct gtt_range *cursor_gt = gma_crtc->cursor_gt; 335 + struct psb_gem_object *pobj; 336 + struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj; 337 337 struct drm_gem_object *obj; 338 338 void *tmp_dst, *tmp_src; 339 339 int ret = 0, i, cursor_pages; ··· 349 349 350 350 /* Unpin the old GEM object */ 351 351 if (gma_crtc->cursor_obj) { 352 - gt = to_gtt_range(gma_crtc->cursor_obj); 353 - psb_gem_unpin(gt); 352 + pobj = to_psb_gem_object(gma_crtc->cursor_obj); 353 + psb_gem_unpin(pobj); 354 354 drm_gem_object_put(gma_crtc->cursor_obj); 355 355 gma_crtc->cursor_obj = NULL; 356 356 } ··· 375 375 goto unref_cursor; 376 376 } 377 377 378 - gt = to_gtt_range(obj); 378 + pobj = to_psb_gem_object(obj); 379 379 380 380 /* Pin the memory into the GTT */ 381 - ret = psb_gem_pin(gt); 381 + ret = psb_gem_pin(pobj); 382 382 if (ret) { 383 383 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 384 384 goto unref_cursor; 385 385 } 386 386 387 387 if (dev_priv->ops->cursor_needs_phys) { 388 - if (cursor_gt == NULL) { 388 + if (!cursor_pobj) { 389 389 dev_err(dev->dev, "No hardware cursor mem available"); 390 390 ret = -ENOMEM; 391 391 goto unref_cursor; 392 392 } 393 393 394 394 /* Prevent overflow */ 395 - if (gt->npage > 4) 395 + if (pobj->npage > 4) 396 396 cursor_pages = 4; 397 397 else 398 - cursor_pages = gt->npage; 398 + cursor_pages = pobj->npage; 399 399 400 400 /* Copy the cursor to cursor mem */ 401 - tmp_dst = dev_priv->vram_addr + cursor_gt->offset; 401 + tmp_dst = dev_priv->vram_addr + cursor_pobj->offset; 402 402 for (i = 0; i < cursor_pages; i++) { 403 - tmp_src = kmap(gt->pages[i]); 403 + tmp_src = kmap(pobj->pages[i]); 404 404 memcpy(tmp_dst, tmp_src, PAGE_SIZE); 405 - kunmap(gt->pages[i]); 405 + kunmap(pobj->pages[i]); 406 406 tmp_dst += PAGE_SIZE; 407 407 } 408 408 409 409 addr = gma_crtc->cursor_addr; 410 410 } else { 411 - addr = gt->offset; 411 + addr = pobj->offset; 412 412 gma_crtc->cursor_addr = addr; 413 413 } 414 414 ··· 425 425 426 426 /* unpin the old bo */ 427 427 if (gma_crtc->cursor_obj) { 428 - gt = to_gtt_range(gma_crtc->cursor_obj); 429 - psb_gem_unpin(gt); 428 + pobj = to_psb_gem_object(gma_crtc->cursor_obj); 429 + psb_gem_unpin(pobj); 430 430 drm_gem_object_put(gma_crtc->cursor_obj); 431 431 } 432 432 ··· 483 483 484 484 void gma_crtc_disable(struct drm_crtc *crtc) 485 485 { 486 - struct gtt_range *gt; 486 + struct psb_gem_object *pobj; 487 487 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 488 488 489 489 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 490 490 491 491 if (crtc->primary->fb) { 492 - gt = to_gtt_range(crtc->primary->fb->obj[0]); 493 - psb_gem_unpin(gt); 492 + pobj = to_psb_gem_object(crtc->primary->fb->obj[0]); 493 + psb_gem_unpin(pobj); 494 494 } 495 495 } 496 496 ··· 498 498 { 499 499 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 500 500 501 - if (gma_crtc->cursor_gt) 502 - drm_gem_object_put(&gma_crtc->cursor_gt->gem); 501 + if (gma_crtc->cursor_pobj) 502 + drm_gem_object_put(&gma_crtc->cursor_pobj->base); 503 503 504 504 kfree(gma_crtc->crtc_state); 505 505 drm_crtc_cleanup(crtc);
+8 -7
drivers/gpu/drm/gma500/gtt.c
··· 7 7 * Alan Cox <alan@linux.intel.com> 8 8 */ 9 9 10 + #include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */ 10 11 #include "psb_drv.h" 11 12 12 13 ··· 303 302 { 304 303 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 305 304 struct resource *r = dev_priv->gtt_mem->child; 306 - struct gtt_range *range; 305 + struct psb_gem_object *pobj; 307 306 unsigned int restored = 0, total = 0, size = 0; 308 307 309 308 /* On resume, the gtt_mutex is already initialized */ ··· 313 312 while (r != NULL) { 314 313 /* 315 314 * TODO: GTT restoration needs a refactoring, so that we don't have to touch 316 - * struct gtt_range here. The type represents a GEM object and is not 317 - * related to the GTT itself. 315 + * struct psb_gem_object here. The type represents a GEM object and is 316 + * not related to the GTT itself. 318 317 */ 319 - range = container_of(r, struct gtt_range, resource); 320 - if (range->pages) { 321 - psb_gtt_insert_pages(dev_priv, &range->resource, range->pages); 322 - size += range->resource.end - range->resource.start; 318 + pobj = container_of(r, struct psb_gem_object, resource); 319 + if (pobj->pages) { 320 + psb_gtt_insert_pages(dev_priv, &pobj->resource, pobj->pages); 321 + size += pobj->resource.end - pobj->resource.start; 323 322 restored++; 324 323 } 325 324 r = r->sibling;
-15
drivers/gpu/drm/gma500/gtt.h
··· 28 28 /* Exported functions */ 29 29 extern int psb_gtt_init(struct drm_device *dev, int resume); 30 30 extern void psb_gtt_takedown(struct drm_device *dev); 31 - 32 - /* Each gtt_range describes an allocation in the GTT area */ 33 - struct gtt_range { 34 - struct resource resource; /* Resource for our allocation */ 35 - u32 offset; /* GTT offset of our object */ 36 - struct drm_gem_object gem; /* GEM high level stuff */ 37 - int in_gart; /* Currently in the GART (ref ct) */ 38 - bool stolen; /* Backed from stolen RAM */ 39 - bool mmapping; /* Is mmappable */ 40 - struct page **pages; /* Backing pages if present */ 41 - int npage; /* Number of backing pages */ 42 - }; 43 - 44 - #define to_gtt_range(x) container_of(x, struct gtt_range, gem) 45 - 46 31 extern int psb_gtt_restore(struct drm_device *dev); 47 32 48 33 int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res,
+2 -1
drivers/gpu/drm/gma500/oaktrail_crtc.c
··· 10 10 #include <drm/drm_fourcc.h> 11 11 12 12 #include "framebuffer.h" 13 + #include "gem.h" 13 14 #include "gma_display.h" 14 15 #include "power.h" 15 16 #include "psb_drv.h" ··· 609 608 if (!gma_power_begin(dev, true)) 610 609 return 0; 611 610 612 - start = to_gtt_range(fb->obj[0])->offset; 611 + start = to_psb_gem_object(fb->obj[0])->offset; 613 612 offset = y * fb->pitches[0] + x * fb->format->cpp[0]; 614 613 615 614 REG_WRITE(map->stride, fb->pitches[0]);
+7 -8
drivers/gpu/drm/gma500/psb_intel_display.c
··· 455 455 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 456 456 u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; 457 457 u32 base[3] = { CURABASE, CURBBASE, CURCBASE }; 458 - struct gtt_range *cursor_gt; 458 + struct psb_gem_object *cursor_pobj; 459 459 460 460 if (dev_priv->ops->cursor_needs_phys) { 461 461 /* Allocate 4 pages of stolen mem for a hardware cursor. That 462 462 * is enough for the 64 x 64 ARGB cursors we support. 463 463 */ 464 - cursor_gt = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE); 465 - if (IS_ERR(cursor_gt)) { 466 - gma_crtc->cursor_gt = NULL; 464 + cursor_pobj = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE); 465 + if (IS_ERR(cursor_pobj)) { 466 + gma_crtc->cursor_pobj = NULL; 467 467 goto out; 468 468 } 469 - gma_crtc->cursor_gt = cursor_gt; 470 - gma_crtc->cursor_addr = dev_priv->stolen_base + 471 - cursor_gt->offset; 469 + gma_crtc->cursor_pobj = cursor_pobj; 470 + gma_crtc->cursor_addr = dev_priv->stolen_base + cursor_pobj->offset; 472 471 } else { 473 - gma_crtc->cursor_gt = NULL; 472 + gma_crtc->cursor_pobj = NULL; 474 473 } 475 474 476 475 out:
+1 -1
drivers/gpu/drm/gma500/psb_intel_drv.h
··· 140 140 int pipe; 141 141 int plane; 142 142 uint32_t cursor_addr; 143 - struct gtt_range *cursor_gt; 143 + struct psb_gem_object *cursor_pobj; 144 144 u8 lut_adj[256]; 145 145 struct psb_intel_framebuffer *fbdev_fb; 146 146 /* a mode_set for fbdev users on this crtc */