drm: update drm_memory_debug.h

Update from DRM CVS for drm memory debug

From: Jon Smirl <jonsmirl@gmail.com>
Signed-off-by: Dave Airlie <airlied@linux.ie>

authored by Dave Airlie and committed by Dave Airlie f0c408b5 1e7d5190

+134 -135
+134 -135
drivers/char/drm/drm_memory_debug.h
··· 1 /** 2 - * \file drm_memory.h 3 * Memory management wrappers for DRM. 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> ··· 43 unsigned long bytes_freed; 44 } drm_mem_stats_t; 45 46 - static DEFINE_SPINLOCK(DRM(mem_lock)); 47 - static unsigned long DRM(ram_available) = 0; /* In pages */ 48 - static unsigned long DRM(ram_used) = 0; 49 - static drm_mem_stats_t DRM(mem_stats)[] = 50 { 51 - [DRM_MEM_DMA] = { 52 - "dmabufs"},[DRM_MEM_SAREA] = { 53 - "sareas"},[DRM_MEM_DRIVER] = { 54 - "driver"},[DRM_MEM_MAGIC] = { 55 - "magic"},[DRM_MEM_IOCTLS] = { 56 - "ioctltab"},[DRM_MEM_MAPS] = { 57 - "maplist"},[DRM_MEM_VMAS] = { 58 - "vmalist"},[DRM_MEM_BUFS] = { 59 - "buflist"},[DRM_MEM_SEGS] = { 60 - "seglist"},[DRM_MEM_PAGES] = { 61 - "pagelist"},[DRM_MEM_FILES] = { 62 - "files"},[DRM_MEM_QUEUES] = { 63 - "queues"},[DRM_MEM_CMDS] = { 64 - "commands"},[DRM_MEM_MAPPINGS] = { 65 - "mappings"},[DRM_MEM_BUFLISTS] = { 66 - "buflists"},[DRM_MEM_AGPLISTS] = { 67 - "agplist"},[DRM_MEM_SGLISTS] = { 68 - "sglist"},[DRM_MEM_TOTALAGP] = { 69 - "totalagp"},[DRM_MEM_BOUNDAGP] = { 70 - "boundagp"},[DRM_MEM_CTXBITMAP] = { 71 - "ctxbitmap"},[DRM_MEM_CTXLIST] = { 72 - "ctxlist"},[DRM_MEM_STUB] = { 73 - "stub"}, { 74 - NULL, 0,} /* Last entry must be null */ 75 }; 76 77 - void DRM(mem_init) (void) { 78 drm_mem_stats_t *mem; 79 struct sysinfo si; 80 81 - for (mem = DRM(mem_stats); mem->name; ++mem) { 82 mem->succeed_count = 0; 83 mem->free_count = 0; 84 mem->fail_count = 0; ··· 86 } 87 88 si_meminfo(&si); 89 - DRM(ram_available) = si.totalram; 90 - DRM(ram_used) = 0; 91 } 92 93 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 94 95 - static int DRM(_mem_info) (char *buf, char **start, off_t offset, 96 int request, int *eof, void *data) { 97 drm_mem_stats_t *pt; 98 int len = 0; ··· 111 " | allocs bytes\n\n"); 112 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 113 "system", 0, 0, 0, 114 - DRM(ram_available) << (PAGE_SHIFT - 10)); 115 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 116 - "locked", 0, 0, 0, DRM(ram_used) >> 10); 117 DRM_PROC_PRINT("\n"); 118 - for (pt = DRM(mem_stats); pt->name; pt++) { 119 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 120 pt->name, 121 pt->succeed_count, ··· 134 return len - offset; 135 } 136 137 - int DRM(mem_info) (char *buf, char **start, off_t offset, 138 int len, int *eof, void *data) { 139 int ret; 140 141 - spin_lock(&DRM(mem_lock)); 142 - ret = DRM(_mem_info) (buf, start, offset, len, eof, data); 143 - spin_unlock(&DRM(mem_lock)); 144 return ret; 145 } 146 147 - void *DRM(alloc) (size_t size, int area) { 148 void *pt; 149 150 if (!size) { ··· 153 } 154 155 if (!(pt = kmalloc(size, GFP_KERNEL))) { 156 - spin_lock(&DRM(mem_lock)); 157 - ++DRM(mem_stats)[area].fail_count; 158 - spin_unlock(&DRM(mem_lock)); 159 return NULL; 160 } 161 - spin_lock(&DRM(mem_lock)); 162 - ++DRM(mem_stats)[area].succeed_count; 163 - DRM(mem_stats)[area].bytes_allocated += size; 164 - spin_unlock(&DRM(mem_lock)); 165 return pt; 166 } 167 168 - void *DRM(calloc) (size_t nmemb, size_t size, int area) { 169 void *addr; 170 171 - addr = DRM(alloc) (nmemb * size, area); 172 if (addr != NULL) 173 memset((void *)addr, 0, size * nmemb); 174 175 return addr; 176 } 177 178 - void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) { 179 void *pt; 180 181 - if (!(pt = DRM(alloc) (size, area))) 182 return NULL; 183 if (oldpt && oldsize) { 184 memcpy(pt, oldpt, oldsize); 185 - DRM(free) (oldpt, oldsize, area); 186 } 187 return pt; 188 } 189 190 - void DRM(free) (void *pt, size_t size, int area) { 191 int alloc_count; 192 int free_count; 193 ··· 195 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 196 else 197 kfree(pt); 198 - spin_lock(&DRM(mem_lock)); 199 - DRM(mem_stats)[area].bytes_freed += size; 200 - free_count = ++DRM(mem_stats)[area].free_count; 201 - alloc_count = DRM(mem_stats)[area].succeed_count; 202 - spin_unlock(&DRM(mem_lock)); 203 if (free_count > alloc_count) { 204 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 205 free_count, alloc_count); 206 } 207 } 208 209 - unsigned long DRM(alloc_pages) (int order, int area) { 210 unsigned long address; 211 unsigned long bytes = PAGE_SIZE << order; 212 unsigned long addr; 213 unsigned int sz; 214 215 - spin_lock(&DRM(mem_lock)); 216 - if ((DRM(ram_used) >> PAGE_SHIFT) 217 - > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) { 218 - spin_unlock(&DRM(mem_lock)); 219 return 0; 220 } 221 - spin_unlock(&DRM(mem_lock)); 222 223 address = __get_free_pages(GFP_KERNEL, order); 224 if (!address) { 225 - spin_lock(&DRM(mem_lock)); 226 - ++DRM(mem_stats)[area].fail_count; 227 - spin_unlock(&DRM(mem_lock)); 228 return 0; 229 } 230 - spin_lock(&DRM(mem_lock)); 231 - ++DRM(mem_stats)[area].succeed_count; 232 - DRM(mem_stats)[area].bytes_allocated += bytes; 233 - DRM(ram_used) += bytes; 234 - spin_unlock(&DRM(mem_lock)); 235 236 /* Zero outside the lock */ 237 memset((void *)address, 0, bytes); ··· 245 return address; 246 } 247 248 - void DRM(free_pages) (unsigned long address, int order, int area) { 249 unsigned long bytes = PAGE_SIZE << order; 250 int alloc_count; 251 int free_count; ··· 263 free_pages(address, order); 264 } 265 266 - spin_lock(&DRM(mem_lock)); 267 - free_count = ++DRM(mem_stats)[area].free_count; 268 - alloc_count = DRM(mem_stats)[area].succeed_count; 269 - DRM(mem_stats)[area].bytes_freed += bytes; 270 - DRM(ram_used) -= bytes; 271 - spin_unlock(&DRM(mem_lock)); 272 if (free_count > alloc_count) { 273 DRM_MEM_ERROR(area, 274 "Excess frees: %d frees, %d allocs\n", ··· 276 } 277 } 278 279 - void *DRM(ioremap) (unsigned long offset, unsigned long size, 280 drm_device_t * dev) { 281 void *pt; 282 ··· 287 } 288 289 if (!(pt = drm_ioremap(offset, size, dev))) { 290 - spin_lock(&DRM(mem_lock)); 291 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 292 - spin_unlock(&DRM(mem_lock)); 293 return NULL; 294 } 295 - spin_lock(&DRM(mem_lock)); 296 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 297 - DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 298 - spin_unlock(&DRM(mem_lock)); 299 return pt; 300 } 301 302 - void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size, 303 drm_device_t * dev) { 304 void *pt; 305 ··· 310 } 311 312 if (!(pt = drm_ioremap_nocache(offset, size, dev))) { 313 - spin_lock(&DRM(mem_lock)); 314 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 315 - spin_unlock(&DRM(mem_lock)); 316 return NULL; 317 } 318 - spin_lock(&DRM(mem_lock)); 319 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 320 - DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 321 - spin_unlock(&DRM(mem_lock)); 322 return pt; 323 } 324 325 - void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) { 326 int alloc_count; 327 int free_count; 328 ··· 332 else 333 drm_ioremapfree(pt, size, dev); 334 335 - spin_lock(&DRM(mem_lock)); 336 - DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; 337 - free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count; 338 - alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 339 - spin_unlock(&DRM(mem_lock)); 340 if (free_count > alloc_count) { 341 DRM_MEM_ERROR(DRM_MEM_MAPPINGS, 342 "Excess frees: %d frees, %d allocs\n", ··· 346 347 #if __OS_HAS_AGP 348 349 - DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) { 350 DRM_AGP_MEM *handle; 351 352 if (!pages) { ··· 354 return NULL; 355 } 356 357 - if ((handle = DRM(agp_allocate_memory) (pages, type))) { 358 - spin_lock(&DRM(mem_lock)); 359 - ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 360 - DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated 361 += pages << PAGE_SHIFT; 362 - spin_unlock(&DRM(mem_lock)); 363 return handle; 364 } 365 - spin_lock(&DRM(mem_lock)); 366 - ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count; 367 - spin_unlock(&DRM(mem_lock)); 368 return NULL; 369 } 370 371 - int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) { 372 int alloc_count; 373 int free_count; 374 int retval = -EINVAL; ··· 379 return retval; 380 } 381 382 - if (DRM(agp_free_memory) (handle)) { 383 - spin_lock(&DRM(mem_lock)); 384 - free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count; 385 - alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 386 - DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed 387 += pages << PAGE_SHIFT; 388 - spin_unlock(&DRM(mem_lock)); 389 if (free_count > alloc_count) { 390 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 391 "Excess frees: %d frees, %d allocs\n", ··· 396 return retval; 397 } 398 399 - int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) { 400 int retcode = -EINVAL; 401 402 if (!handle) { ··· 405 return retcode; 406 } 407 408 - if (!(retcode = DRM(agp_bind_memory) (handle, start))) { 409 - spin_lock(&DRM(mem_lock)); 410 - ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 411 - DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated 412 += handle->page_count << PAGE_SHIFT; 413 - spin_unlock(&DRM(mem_lock)); 414 return retcode; 415 } 416 - spin_lock(&DRM(mem_lock)); 417 - ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count; 418 - spin_unlock(&DRM(mem_lock)); 419 return retcode; 420 } 421 422 - int DRM(unbind_agp) (DRM_AGP_MEM * handle) { 423 int alloc_count; 424 int free_count; 425 int retcode = -EINVAL; ··· 430 return retcode; 431 } 432 433 - if ((retcode = DRM(agp_unbind_memory) (handle))) 434 return retcode; 435 - spin_lock(&DRM(mem_lock)); 436 - free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count; 437 - alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 438 - DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed 439 += handle->page_count << PAGE_SHIFT; 440 - spin_unlock(&DRM(mem_lock)); 441 if (free_count > alloc_count) { 442 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 443 "Excess frees: %d frees, %d allocs\n",
··· 1 /** 2 + * \file drm_memory_debug.h 3 * Memory management wrappers for DRM. 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> ··· 43 unsigned long bytes_freed; 44 } drm_mem_stats_t; 45 46 + static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; 47 + static unsigned long drm_ram_available = 0; /* In pages */ 48 + static unsigned long drm_ram_used = 0; 49 + static drm_mem_stats_t drm_mem_stats[] = 50 { 51 + [DRM_MEM_DMA] = {"dmabufs"}, 52 + [DRM_MEM_SAREA] = {"sareas"}, 53 + [DRM_MEM_DRIVER] = {"driver"}, 54 + [DRM_MEM_MAGIC] = {"magic"}, 55 + [DRM_MEM_IOCTLS] = {"ioctltab"}, 56 + [DRM_MEM_MAPS] = {"maplist"}, 57 + [DRM_MEM_VMAS] = {"vmalist"}, 58 + [DRM_MEM_BUFS] = {"buflist"}, 59 + [DRM_MEM_SEGS] = {"seglist"}, 60 + [DRM_MEM_PAGES] = {"pagelist"}, 61 + [DRM_MEM_FILES] = {"files"}, 62 + [DRM_MEM_QUEUES] = {"queues"}, 63 + [DRM_MEM_CMDS] = {"commands"}, 64 + [DRM_MEM_MAPPINGS] = {"mappings"}, 65 + [DRM_MEM_BUFLISTS] = {"buflists"}, 66 + [DRM_MEM_AGPLISTS] = {"agplist"}, 67 + [DRM_MEM_SGLISTS] = {"sglist"}, 68 + [DRM_MEM_TOTALAGP] = {"totalagp"}, 69 + [DRM_MEM_BOUNDAGP] = {"boundagp"}, 70 + [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, 71 + [DRM_MEM_CTXLIST] = {"ctxlist"}, 72 + [DRM_MEM_STUB] = {"stub"}, 73 + {NULL, 0,} /* Last entry must be null */ 74 }; 75 76 + void drm_mem_init (void) { 77 drm_mem_stats_t *mem; 78 struct sysinfo si; 79 80 + for (mem = drm_mem_stats; mem->name; ++mem) { 81 mem->succeed_count = 0; 82 mem->free_count = 0; 83 mem->fail_count = 0; ··· 87 } 88 89 si_meminfo(&si); 90 + drm_ram_available = si.totalram; 91 + drm_ram_used = 0; 92 } 93 94 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 95 96 + static int drm__mem_info (char *buf, char **start, off_t offset, 97 int request, int *eof, void *data) { 98 drm_mem_stats_t *pt; 99 int len = 0; ··· 112 " | allocs bytes\n\n"); 113 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 114 "system", 0, 0, 0, 115 + drm_ram_available << (PAGE_SHIFT - 10)); 116 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 117 + "locked", 0, 0, 0, drm_ram_used >> 10); 118 DRM_PROC_PRINT("\n"); 119 + for (pt = drm_mem_stats; pt->name; pt++) { 120 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 121 pt->name, 122 pt->succeed_count, ··· 135 return len - offset; 136 } 137 138 + int drm_mem_info (char *buf, char **start, off_t offset, 139 int len, int *eof, void *data) { 140 int ret; 141 142 + spin_lock(&drm_mem_lock); 143 + ret = drm__mem_info (buf, start, offset, len, eof, data); 144 + spin_unlock(&drm_mem_lock); 145 return ret; 146 } 147 148 + void *drm_alloc (size_t size, int area) { 149 void *pt; 150 151 if (!size) { ··· 154 } 155 156 if (!(pt = kmalloc(size, GFP_KERNEL))) { 157 + spin_lock(&drm_mem_lock); 158 + ++drm_mem_stats[area].fail_count; 159 + spin_unlock(&drm_mem_lock); 160 return NULL; 161 } 162 + spin_lock(&drm_mem_lock); 163 + ++drm_mem_stats[area].succeed_count; 164 + drm_mem_stats[area].bytes_allocated += size; 165 + spin_unlock(&drm_mem_lock); 166 return pt; 167 } 168 169 + void *drm_calloc (size_t nmemb, size_t size, int area) { 170 void *addr; 171 172 + addr = drm_alloc (nmemb * size, area); 173 if (addr != NULL) 174 memset((void *)addr, 0, size * nmemb); 175 176 return addr; 177 } 178 179 + void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) { 180 void *pt; 181 182 + if (!(pt = drm_alloc (size, area))) 183 return NULL; 184 if (oldpt && oldsize) { 185 memcpy(pt, oldpt, oldsize); 186 + drm_free (oldpt, oldsize, area); 187 } 188 return pt; 189 } 190 191 + void drm_free (void *pt, size_t size, int area) { 192 int alloc_count; 193 int free_count; 194 ··· 196 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 197 else 198 kfree(pt); 199 + spin_lock(&drm_mem_lock); 200 + drm_mem_stats[area].bytes_freed += size; 201 + free_count = ++drm_mem_stats[area].free_count; 202 + alloc_count = drm_mem_stats[area].succeed_count; 203 + spin_unlock(&drm_mem_lock); 204 if (free_count > alloc_count) { 205 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 206 free_count, alloc_count); 207 } 208 } 209 210 + unsigned long drm_alloc_pages (int order, int area) { 211 unsigned long address; 212 unsigned long bytes = PAGE_SIZE << order; 213 unsigned long addr; 214 unsigned int sz; 215 216 + spin_lock(&drm_mem_lock); 217 + if ((drm_ram_used >> PAGE_SHIFT) 218 + > (DRM_RAM_PERCENT * drm_ram_available) / 100) { 219 + spin_unlock(&drm_mem_lock); 220 return 0; 221 } 222 + spin_unlock(&drm_mem_lock); 223 224 address = __get_free_pages(GFP_KERNEL, order); 225 if (!address) { 226 + spin_lock(&drm_mem_lock); 227 + ++drm_mem_stats[area].fail_count; 228 + spin_unlock(&drm_mem_lock); 229 return 0; 230 } 231 + spin_lock(&drm_mem_lock); 232 + ++drm_mem_stats[area].succeed_count; 233 + drm_mem_stats[area].bytes_allocated += bytes; 234 + drm_ram_used += bytes; 235 + spin_unlock(&drm_mem_lock); 236 237 /* Zero outside the lock */ 238 memset((void *)address, 0, bytes); ··· 246 return address; 247 } 248 249 + void drm_free_pages (unsigned long address, int order, int area) { 250 unsigned long bytes = PAGE_SIZE << order; 251 int alloc_count; 252 int free_count; ··· 264 free_pages(address, order); 265 } 266 267 + spin_lock(&drm_mem_lock); 268 + free_count = ++drm_mem_stats[area].free_count; 269 + alloc_count = drm_mem_stats[area].succeed_count; 270 + drm_mem_stats[area].bytes_freed += bytes; 271 + drm_ram_used -= bytes; 272 + spin_unlock(&drm_mem_lock); 273 if (free_count > alloc_count) { 274 DRM_MEM_ERROR(area, 275 "Excess frees: %d frees, %d allocs\n", ··· 277 } 278 } 279 280 + void *drm_ioremap (unsigned long offset, unsigned long size, 281 drm_device_t * dev) { 282 void *pt; 283 ··· 288 } 289 290 if (!(pt = drm_ioremap(offset, size, dev))) { 291 + spin_lock(&drm_mem_lock); 292 + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; 293 + spin_unlock(&drm_mem_lock); 294 return NULL; 295 } 296 + spin_lock(&drm_mem_lock); 297 + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; 298 + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; 299 + spin_unlock(&drm_mem_lock); 300 return pt; 301 } 302 303 + void *drm_ioremap_nocache (unsigned long offset, unsigned long size, 304 drm_device_t * dev) { 305 void *pt; 306 ··· 311 } 312 313 if (!(pt = drm_ioremap_nocache(offset, size, dev))) { 314 + spin_lock(&drm_mem_lock); 315 + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; 316 + spin_unlock(&drm_mem_lock); 317 return NULL; 318 } 319 + spin_lock(&drm_mem_lock); 320 + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; 321 + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; 322 + spin_unlock(&drm_mem_lock); 323 return pt; 324 } 325 326 + void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) { 327 int alloc_count; 328 int free_count; 329 ··· 333 else 334 drm_ioremapfree(pt, size, dev); 335 336 + spin_lock(&drm_mem_lock); 337 + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size; 338 + free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count; 339 + alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; 340 + spin_unlock(&drm_mem_lock); 341 if (free_count > alloc_count) { 342 DRM_MEM_ERROR(DRM_MEM_MAPPINGS, 343 "Excess frees: %d frees, %d allocs\n", ··· 347 348 #if __OS_HAS_AGP 349 350 + DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { 351 DRM_AGP_MEM *handle; 352 353 if (!pages) { ··· 355 return NULL; 356 } 357 358 + if ((handle = drm_agp_allocate_memory (pages, type))) { 359 + spin_lock(&drm_mem_lock); 360 + ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; 361 + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated 362 += pages << PAGE_SHIFT; 363 + spin_unlock(&drm_mem_lock); 364 return handle; 365 } 366 + spin_lock(&drm_mem_lock); 367 + ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; 368 + spin_unlock(&drm_mem_lock); 369 return NULL; 370 } 371 372 + int drm_free_agp (DRM_AGP_MEM * handle, int pages) { 373 int alloc_count; 374 int free_count; 375 int retval = -EINVAL; ··· 380 return retval; 381 } 382 383 + if (drm_agp_free_memory (handle)) { 384 + spin_lock(&drm_mem_lock); 385 + free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; 386 + alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; 387 + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed 388 += pages << PAGE_SHIFT; 389 + spin_unlock(&drm_mem_lock); 390 if (free_count > alloc_count) { 391 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 392 "Excess frees: %d frees, %d allocs\n", ··· 397 return retval; 398 } 399 400 + int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) { 401 int retcode = -EINVAL; 402 403 if (!handle) { ··· 406 return retcode; 407 } 408 409 + if (!(retcode = drm_agp_bind_memory (handle, start))) { 410 + spin_lock(&drm_mem_lock); 411 + ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; 412 + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated 413 += handle->page_count << PAGE_SHIFT; 414 + spin_unlock(&drm_mem_lock); 415 return retcode; 416 } 417 + spin_lock(&drm_mem_lock); 418 + ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; 419 + spin_unlock(&drm_mem_lock); 420 return retcode; 421 } 422 423 + int drm_unbind_agp (DRM_AGP_MEM * handle) { 424 int alloc_count; 425 int free_count; 426 int retcode = -EINVAL; ··· 431 return retcode; 432 } 433 434 + if ((retcode = drm_agp_unbind_memory (handle))) 435 return retcode; 436 + spin_lock(&drm_mem_lock); 437 + free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; 438 + alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; 439 + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed 440 += handle->page_count << PAGE_SHIFT; 441 + spin_unlock(&drm_mem_lock); 442 if (free_count > alloc_count) { 443 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 444 "Excess frees: %d frees, %d allocs\n",