Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: Remove memory debugging infrastructure.

It hasn't been used in ages, and having the user tell your how much
memory is being freed at free time is a recipe for disaster even if it
was ever used.

Signed-off-by: Eric Anholt <eric@anholt.net>

+324 -884
+6 -6
drivers/gpu/drm/drm_agpsupport.c
··· 203 203 204 204 if (!dev->agp || !dev->agp->acquired) 205 205 return -EINVAL; 206 - if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) 206 + if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL))) 207 207 return -ENOMEM; 208 208 209 209 memset(entry, 0, sizeof(*entry)); ··· 211 211 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; 212 212 type = (u32) request->type; 213 213 if (!(memory = drm_alloc_agp(dev, pages, type))) { 214 - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 214 + kfree(entry); 215 215 return -ENOMEM; 216 216 } 217 217 ··· 369 369 list_del(&entry->head); 370 370 371 371 drm_free_agp(entry->memory, entry->pages); 372 - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 372 + kfree(entry); 373 373 return 0; 374 374 } 375 375 EXPORT_SYMBOL(drm_agp_free); ··· 397 397 { 398 398 struct drm_agp_head *head = NULL; 399 399 400 - if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) 400 + if (!(head = kmalloc(sizeof(*head), GFP_KERNEL))) 401 401 return NULL; 402 402 memset((void *)head, 0, sizeof(*head)); 403 403 head->bridge = agp_find_bridge(dev->pdev); 404 404 if (!head->bridge) { 405 405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) { 406 - drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); 406 + kfree(head); 407 407 return NULL; 408 408 } 409 409 agp_copy_info(head->bridge, &head->agp_info); ··· 412 412 agp_copy_info(head->bridge, &head->agp_info); 413 413 } 414 414 if (head->agp_info.chipset == NOT_SUPPORTED) { 415 - drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); 415 + kfree(head); 416 416 return NULL; 417 417 } 418 418 INIT_LIST_HEAD(&head->memory);
+2 -2
drivers/gpu/drm/drm_auth.c
··· 79 79 struct drm_device *dev = master->minor->dev; 80 80 DRM_DEBUG("%d\n", magic); 81 81 82 - entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); 82 + entry = kmalloc(sizeof(*entry), GFP_KERNEL); 83 83 if (!entry) 84 84 return -ENOMEM; 85 85 memset(entry, 0, sizeof(*entry)); ··· 120 120 list_del(&pt->head); 121 121 mutex_unlock(&dev->struct_mutex); 122 122 123 - drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 123 + kfree(pt); 124 124 125 125 return 0; 126 126 }
+56 -84
drivers/gpu/drm/drm_bufs.c
··· 151 151 unsigned long user_token; 152 152 int ret; 153 153 154 - map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); 154 + map = kmalloc(sizeof(*map), GFP_KERNEL); 155 155 if (!map) 156 156 return -ENOMEM; 157 157 ··· 165 165 * when processes fork. 166 166 */ 167 167 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 168 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 168 + kfree(map); 169 169 return -EINVAL; 170 170 } 171 171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", ··· 179 179 map->size = PAGE_ALIGN(map->size); 180 180 181 181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 182 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 182 + kfree(map); 183 183 return -EINVAL; 184 184 } 185 185 map->mtrr = -1; ··· 191 191 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 192 192 if (map->offset + (map->size-1) < map->offset || 193 193 map->offset < virt_to_phys(high_memory)) { 194 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 194 + kfree(map); 195 195 return -EINVAL; 196 196 } 197 197 #endif ··· 212 212 list->map->size = map->size; 213 213 } 214 214 215 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 215 + kfree(map); 216 216 *maplist = list; 217 217 return 0; 218 218 } ··· 227 227 if (map->type == _DRM_REGISTERS) { 228 228 map->handle = ioremap(map->offset, map->size); 229 229 if (!map->handle) { 230 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 230 + kfree(map); 231 231 return -ENOMEM; 232 232 } 233 233 } ··· 243 243 list->map->size = map->size; 244 244 } 245 245 246 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 246 + kfree(map); 247 247 *maplist = list; 248 248 return 0; 249 249 } ··· 251 251 DRM_DEBUG("%lu %d %p\n", 252 252 map->size, drm_order(map->size), map->handle); 253 253 if (!map->handle) { 254 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 254 + kfree(map); 255 255 return -ENOMEM; 256 256 } 257 257 map->offset = (unsigned long)map->handle; ··· 259 259 /* Prevent a 2nd X Server from creating a 2nd lock */ 260 260 if (dev->primary->master->lock.hw_lock != NULL) { 261 261 vfree(map->handle); 262 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 262 + kfree(map); 263 263 return -EBUSY; 264 264 } 265 265 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ ··· 270 270 int valid = 0; 271 271 272 272 if (!drm_core_has_AGP(dev)) { 273 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 273 + kfree(map); 274 274 return -EINVAL; 275 275 } 276 276 #ifdef __alpha__ ··· 303 303 } 304 304 } 305 305 if (!list_empty(&dev->agp->memory) && !valid) { 306 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 306 + kfree(map); 307 307 return -EPERM; 308 308 } 309 309 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", ··· 316 316 } 317 317 case _DRM_SCATTER_GATHER: 318 318 if (!dev->sg) { 319 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 319 + kfree(map); 320 320 return -EINVAL; 321 321 } 322 322 map->offset += (unsigned long)dev->sg->virtual; ··· 328 328 * need to point to a 64bit variable first. */ 329 329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 330 330 if (!dmah) { 331 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 331 + kfree(map); 332 332 return -ENOMEM; 333 333 } 334 334 map->handle = dmah->vaddr; ··· 336 336 kfree(dmah); 337 337 break; 338 338 default: 339 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 339 + kfree(map); 340 340 return -EINVAL; 341 341 } 342 342 343 - list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 343 + list = kmalloc(sizeof(*list), GFP_KERNEL); 344 344 if (!list) { 345 345 if (map->type == _DRM_REGISTERS) 346 346 iounmap(map->handle); 347 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 347 + kfree(map); 348 348 return -EINVAL; 349 349 } 350 350 memset(list, 0, sizeof(*list)); ··· 362 362 if (ret) { 363 363 if (map->type == _DRM_REGISTERS) 364 364 iounmap(map->handle); 365 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 366 - drm_free(list, sizeof(*list), DRM_MEM_MAPS); 365 + kfree(map); 366 + kfree(list); 367 367 mutex_unlock(&dev->struct_mutex); 368 368 return ret; 369 369 } ··· 448 448 list_del(&r_list->head); 449 449 drm_ht_remove_key(&dev->map_hash, 450 450 r_list->user_token >> PAGE_SHIFT); 451 - drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); 451 + kfree(r_list); 452 452 found = 1; 453 453 break; 454 454 } ··· 491 491 DRM_ERROR("tried to rmmap GEM object\n"); 492 492 break; 493 493 } 494 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 494 + kfree(map); 495 495 496 496 return 0; 497 497 } ··· 582 582 drm_pci_free(dev, entry->seglist[i]); 583 583 } 584 584 } 585 - drm_free(entry->seglist, 586 - entry->seg_count * 587 - sizeof(*entry->seglist), DRM_MEM_SEGS); 585 + kfree(entry->seglist); 588 586 589 587 entry->seg_count = 0; 590 588 } 591 589 592 590 if (entry->buf_count) { 593 591 for (i = 0; i < entry->buf_count; i++) { 594 - if (entry->buflist[i].dev_private) { 595 - drm_free(entry->buflist[i].dev_private, 596 - entry->buflist[i].dev_priv_size, 597 - DRM_MEM_BUFS); 598 - } 592 + kfree(entry->buflist[i].dev_private); 599 593 } 600 - drm_free(entry->buflist, 601 - entry->buf_count * 602 - sizeof(*entry->buflist), DRM_MEM_BUFS); 594 + kfree(entry->buflist); 603 595 604 596 entry->buf_count = 0; 605 597 } ··· 690 698 return -EINVAL; 691 699 } 692 700 693 - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 694 - DRM_MEM_BUFS); 701 + entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); 695 702 if (!entry->buflist) { 696 703 mutex_unlock(&dev->struct_mutex); 697 704 atomic_dec(&dev->buf_alloc); ··· 720 729 buf->file_priv = NULL; 721 730 722 731 buf->dev_priv_size = dev->driver->dev_priv_size; 723 - buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 732 + buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); 724 733 if (!buf->dev_private) { 725 734 /* Set count correctly so we free the proper amount. */ 726 735 entry->buf_count = count; ··· 740 749 741 750 DRM_DEBUG("byte_count: %d\n", byte_count); 742 751 743 - temp_buflist = drm_realloc(dma->buflist, 744 - dma->buf_count * sizeof(*dma->buflist), 745 - (dma->buf_count + entry->buf_count) 746 - * sizeof(*dma->buflist), DRM_MEM_BUFS); 752 + temp_buflist = krealloc(dma->buflist, 753 + (dma->buf_count + entry->buf_count) * 754 + sizeof(*dma->buflist), GFP_KERNEL); 747 755 if (!temp_buflist) { 748 756 /* Free the entry because it isn't valid */ 749 757 drm_cleanup_buf_error(dev, entry); ··· 844 854 return -EINVAL; 845 855 } 846 856 847 - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 848 - DRM_MEM_BUFS); 857 + entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); 849 858 if (!entry->buflist) { 850 859 mutex_unlock(&dev->struct_mutex); 851 860 atomic_dec(&dev->buf_alloc); ··· 852 863 } 853 864 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 854 865 855 - entry->seglist = drm_alloc(count * sizeof(*entry->seglist), 856 - DRM_MEM_SEGS); 866 + entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL); 857 867 if (!entry->seglist) { 858 - drm_free(entry->buflist, 859 - count * sizeof(*entry->buflist), DRM_MEM_BUFS); 868 + kfree(entry->buflist); 860 869 mutex_unlock(&dev->struct_mutex); 861 870 atomic_dec(&dev->buf_alloc); 862 871 return -ENOMEM; ··· 864 877 /* Keep the original pagelist until we know all the allocations 865 878 * have succeeded 866 879 */ 867 - temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) 868 - * sizeof(*dma->pagelist), DRM_MEM_PAGES); 880 + temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * 881 + sizeof(*dma->pagelist), GFP_KERNEL); 869 882 if (!temp_pagelist) { 870 - drm_free(entry->buflist, 871 - count * sizeof(*entry->buflist), DRM_MEM_BUFS); 872 - drm_free(entry->seglist, 873 - count * sizeof(*entry->seglist), DRM_MEM_SEGS); 883 + kfree(entry->buflist); 884 + kfree(entry->seglist); 874 885 mutex_unlock(&dev->struct_mutex); 875 886 atomic_dec(&dev->buf_alloc); 876 887 return -ENOMEM; ··· 892 907 entry->buf_count = count; 893 908 entry->seg_count = count; 894 909 drm_cleanup_buf_error(dev, entry); 895 - drm_free(temp_pagelist, 896 - (dma->page_count + (count << page_order)) 897 - * sizeof(*dma->pagelist), DRM_MEM_PAGES); 910 + kfree(temp_pagelist); 898 911 mutex_unlock(&dev->struct_mutex); 899 912 atomic_dec(&dev->buf_alloc); 900 913 return -ENOMEM; ··· 923 940 buf->file_priv = NULL; 924 941 925 942 buf->dev_priv_size = dev->driver->dev_priv_size; 926 - buf->dev_private = drm_alloc(buf->dev_priv_size, 927 - DRM_MEM_BUFS); 943 + buf->dev_private = kmalloc(buf->dev_priv_size, 944 + GFP_KERNEL); 928 945 if (!buf->dev_private) { 929 946 /* Set count correctly so we free the proper amount. */ 930 947 entry->buf_count = count; 931 948 entry->seg_count = count; 932 949 drm_cleanup_buf_error(dev, entry); 933 - drm_free(temp_pagelist, 934 - (dma->page_count + 935 - (count << page_order)) 936 - * sizeof(*dma->pagelist), 937 - DRM_MEM_PAGES); 950 + kfree(temp_pagelist); 938 951 mutex_unlock(&dev->struct_mutex); 939 952 atomic_dec(&dev->buf_alloc); 940 953 return -ENOMEM; ··· 943 964 byte_count += PAGE_SIZE << page_order; 944 965 } 945 966 946 - temp_buflist = drm_realloc(dma->buflist, 947 - dma->buf_count * sizeof(*dma->buflist), 948 - (dma->buf_count + entry->buf_count) 949 - * sizeof(*dma->buflist), DRM_MEM_BUFS); 967 + temp_buflist = krealloc(dma->buflist, 968 + (dma->buf_count + entry->buf_count) * 969 + sizeof(*dma->buflist), GFP_KERNEL); 950 970 if (!temp_buflist) { 951 971 /* Free the entry because it isn't valid */ 952 972 drm_cleanup_buf_error(dev, entry); 953 - drm_free(temp_pagelist, 954 - (dma->page_count + (count << page_order)) 955 - * sizeof(*dma->pagelist), DRM_MEM_PAGES); 973 + kfree(temp_pagelist); 956 974 mutex_unlock(&dev->struct_mutex); 957 975 atomic_dec(&dev->buf_alloc); 958 976 return -ENOMEM; ··· 964 988 * with the new one. 965 989 */ 966 990 if (dma->page_count) { 967 - drm_free(dma->pagelist, 968 - dma->page_count * sizeof(*dma->pagelist), 969 - DRM_MEM_PAGES); 991 + kfree(dma->pagelist); 970 992 } 971 993 dma->pagelist = temp_pagelist; 972 994 ··· 1060 1086 return -EINVAL; 1061 1087 } 1062 1088 1063 - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1064 - DRM_MEM_BUFS); 1089 + entry->buflist = kmalloc(count * sizeof(*entry->buflist), 1090 + GFP_KERNEL); 1065 1091 if (!entry->buflist) { 1066 1092 mutex_unlock(&dev->struct_mutex); 1067 1093 atomic_dec(&dev->buf_alloc); ··· 1092 1118 buf->file_priv = NULL; 1093 1119 1094 1120 buf->dev_priv_size = dev->driver->dev_priv_size; 1095 - buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1121 + buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); 1096 1122 if (!buf->dev_private) { 1097 1123 /* Set count correctly so we free the proper amount. */ 1098 1124 entry->buf_count = count; ··· 1113 1139 1114 1140 DRM_DEBUG("byte_count: %d\n", byte_count); 1115 1141 1116 - temp_buflist = drm_realloc(dma->buflist, 1117 - dma->buf_count * sizeof(*dma->buflist), 1118 - (dma->buf_count + entry->buf_count) 1119 - * sizeof(*dma->buflist), DRM_MEM_BUFS); 1142 + temp_buflist = krealloc(dma->buflist, 1143 + (dma->buf_count + entry->buf_count) * 1144 + sizeof(*dma->buflist), GFP_KERNEL); 1120 1145 if (!temp_buflist) { 1121 1146 /* Free the entry because it isn't valid */ 1122 1147 drm_cleanup_buf_error(dev, entry); ··· 1221 1248 return -EINVAL; 1222 1249 } 1223 1250 1224 - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1225 - DRM_MEM_BUFS); 1251 + entry->buflist = kmalloc(count * sizeof(*entry->buflist), 1252 + GFP_KERNEL); 1226 1253 if (!entry->buflist) { 1227 1254 mutex_unlock(&dev->struct_mutex); 1228 1255 atomic_dec(&dev->buf_alloc); ··· 1252 1279 buf->file_priv = NULL; 1253 1280 1254 1281 buf->dev_priv_size = dev->driver->dev_priv_size; 1255 - buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1282 + buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); 1256 1283 if (!buf->dev_private) { 1257 1284 /* Set count correctly so we free the proper amount. */ 1258 1285 entry->buf_count = count; ··· 1272 1299 1273 1300 DRM_DEBUG("byte_count: %d\n", byte_count); 1274 1301 1275 - temp_buflist = drm_realloc(dma->buflist, 1276 - dma->buf_count * sizeof(*dma->buflist), 1277 - (dma->buf_count + entry->buf_count) 1278 - * sizeof(*dma->buflist), DRM_MEM_BUFS); 1302 + temp_buflist = krealloc(dma->buflist, 1303 + (dma->buf_count + entry->buf_count) * 1304 + sizeof(*dma->buflist), GFP_KERNEL); 1279 1305 if (!temp_buflist) { 1280 1306 /* Free the entry because it isn't valid */ 1281 1307 drm_cleanup_buf_error(dev, entry);
+2 -2
drivers/gpu/drm/drm_context.c
··· 341 341 } 342 342 } 343 343 344 - ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); 344 + ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); 345 345 if (!ctx_entry) { 346 346 DRM_DEBUG("out of memory\n"); 347 347 return -ENOMEM; ··· 456 456 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 457 457 if (pos->handle == ctx->handle) { 458 458 list_del(&pos->head); 459 - drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 459 + kfree(pos); 460 460 --dev->ctx_count; 461 461 } 462 462 }
+3 -6
drivers/gpu/drm/drm_debugfs.c
··· 100 100 (dev->driver->driver_features & features) != features) 101 101 continue; 102 102 103 - tmp = drm_alloc(sizeof(struct drm_info_node), 104 - _DRM_DRIVER); 103 + tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 105 104 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, 106 105 root, tmp, &drm_debugfs_fops); 107 106 if (!ent) { 108 107 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", 109 108 name, files[i].name); 110 - drm_free(tmp, sizeof(struct drm_info_node), 111 - _DRM_DRIVER); 109 + kfree(tmp); 112 110 ret = -1; 113 111 goto fail; 114 112 } ··· 194 196 if (tmp->info_ent == &files[i]) { 195 197 debugfs_remove(tmp->dent); 196 198 list_del(pos); 197 - drm_free(tmp, sizeof(struct drm_info_node), 198 - _DRM_DRIVER); 199 + kfree(tmp); 199 200 } 200 201 } 201 202 }
+7 -24
drivers/gpu/drm/drm_dma.c
··· 47 47 { 48 48 int i; 49 49 50 - dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); 50 + dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); 51 51 if (!dev->dma) 52 52 return -ENOMEM; 53 53 ··· 88 88 drm_pci_free(dev, dma->bufs[i].seglist[j]); 89 89 } 90 90 } 91 - drm_free(dma->bufs[i].seglist, 92 - dma->bufs[i].seg_count 93 - * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); 91 + kfree(dma->bufs[i].seglist); 94 92 } 95 93 if (dma->bufs[i].buf_count) { 96 94 for (j = 0; j < dma->bufs[i].buf_count; j++) { 97 - if (dma->bufs[i].buflist[j].dev_private) { 98 - drm_free(dma->bufs[i].buflist[j]. 99 - dev_private, 100 - dma->bufs[i].buflist[j]. 101 - dev_priv_size, DRM_MEM_BUFS); 102 - } 95 + kfree(dma->bufs[i].buflist[j].dev_private); 103 96 } 104 - drm_free(dma->bufs[i].buflist, 105 - dma->bufs[i].buf_count * 106 - sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS); 97 + kfree(dma->bufs[i].buflist); 107 98 } 108 99 } 109 100 110 - if (dma->buflist) { 111 - drm_free(dma->buflist, 112 - dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); 113 - } 114 - 115 - if (dma->pagelist) { 116 - drm_free(dma->pagelist, 117 - dma->page_count * sizeof(*dma->pagelist), 118 - DRM_MEM_PAGES); 119 - } 120 - drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); 101 + kfree(dma->buflist); 102 + kfree(dma->pagelist); 103 + kfree(dev->dma); 121 104 dev->dma = NULL; 122 105 } 123 106
+11 -14
drivers/gpu/drm/drm_drawable.c
··· 85 85 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 86 86 return -EINVAL; 87 87 } 88 - drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect), 89 - DRM_MEM_BUFS); 90 - drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 88 + kfree(info->rects); 89 + kfree(info); 91 90 92 91 idr_remove(&dev->drw_idr, draw->handle); 93 92 ··· 105 106 106 107 info = idr_find(&dev->drw_idr, update->handle); 107 108 if (!info) { 108 - info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); 109 + info = kzalloc(sizeof(*info), GFP_KERNEL); 109 110 if (!info) 110 111 return -ENOMEM; 111 112 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { 112 113 DRM_ERROR("No such drawable %d\n", update->handle); 113 - drm_free(info, sizeof(*info), DRM_MEM_BUFS); 114 + kfree(info); 114 115 return -EINVAL; 115 116 } 116 117 } ··· 120 121 if (update->num == 0) 121 122 rects = NULL; 122 123 else if (update->num != info->num_rects) { 123 - rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 124 - DRM_MEM_BUFS); 124 + rects = kmalloc(update->num * 125 + sizeof(struct drm_clip_rect), 126 + GFP_KERNEL); 125 127 } else 126 128 rects = info->rects; 127 129 ··· 145 145 spin_lock_irqsave(&dev->drw_lock, irqflags); 146 146 147 147 if (rects != info->rects) { 148 - drm_free(info->rects, info->num_rects * 149 - sizeof(struct drm_clip_rect), DRM_MEM_BUFS); 148 + kfree(info->rects); 150 149 } 151 150 152 151 info->rects = rects; ··· 165 166 166 167 error: 167 168 if (rects != info->rects) 168 - drm_free(rects, update->num * sizeof(struct drm_clip_rect), 169 - DRM_MEM_BUFS); 169 + kfree(rects); 170 170 171 171 return err; 172 172 } ··· 184 186 struct drm_drawable_info *info = p; 185 187 186 188 if (info) { 187 - drm_free(info->rects, info->num_rects * 188 - sizeof(struct drm_clip_rect), DRM_MEM_BUFS); 189 - drm_free(info, sizeof(*info), DRM_MEM_BUFS); 189 + kfree(info->rects); 190 + kfree(info); 190 191 } 191 192 192 193 return 0;
+5 -13
drivers/gpu/drm/drm_drv.c
··· 189 189 if (entry->bound) 190 190 drm_unbind_agp(entry->memory); 191 191 drm_free_agp(entry->memory, entry->pages); 192 - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 192 + kfree(entry); 193 193 } 194 194 INIT_LIST_HEAD(&dev->agp->memory); 195 195 ··· 208 208 /* Clear vma list (only built for debugging) */ 209 209 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 210 210 list_del(&vma->head); 211 - drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); 211 + kfree(vma); 212 212 } 213 213 214 214 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { 215 215 for (i = 0; i < dev->queue_count; i++) { 216 - if (dev->queuelist[i]) { 217 - drm_free(dev->queuelist[i], 218 - sizeof(*dev->queuelist[0]), 219 - DRM_MEM_QUEUES); 220 - dev->queuelist[i] = NULL; 221 - } 216 + kfree(dev->queuelist[i]); 217 + dev->queuelist[i] = NULL; 222 218 } 223 - drm_free(dev->queuelist, 224 - dev->queue_slots * sizeof(*dev->queuelist), 225 - DRM_MEM_QUEUES); 219 + kfree(dev->queuelist); 226 220 dev->queuelist = NULL; 227 221 } 228 222 dev->queue_count = 0; ··· 337 343 ret = -1; 338 344 goto err_p3; 339 345 } 340 - 341 - drm_mem_init(); 342 346 343 347 DRM_INFO("Initialized %s %d.%d.%d %s\n", 344 348 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+4 -4
drivers/gpu/drm/drm_fops.c
··· 240 240 241 241 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 242 242 243 - priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 243 + priv = kmalloc(sizeof(*priv), GFP_KERNEL); 244 244 if (!priv) 245 245 return -ENOMEM; 246 246 ··· 328 328 329 329 return 0; 330 330 out_free: 331 - drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 331 + kfree(priv); 332 332 filp->private_data = NULL; 333 333 return ret; 334 334 } ··· 471 471 drm_ctxbitmap_free(dev, pos->handle); 472 472 473 473 list_del(&pos->head); 474 - drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 474 + kfree(pos); 475 475 --dev->ctx_count; 476 476 } 477 477 } ··· 516 516 517 517 if (dev->driver->postclose) 518 518 dev->driver->postclose(dev, file_priv); 519 - drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); 519 + kfree(file_priv); 520 520 521 521 /* ======================================================== 522 522 * End inline drm_release
+4 -4
drivers/gpu/drm/drm_gem.c
··· 89 89 atomic_set(&dev->gtt_count, 0); 90 90 atomic_set(&dev->gtt_memory, 0); 91 91 92 - mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); 92 + mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 93 93 if (!mm) { 94 94 DRM_ERROR("out of memory\n"); 95 95 return -ENOMEM; ··· 98 98 dev->mm_private = mm; 99 99 100 100 if (drm_ht_create(&mm->offset_hash, 19)) { 101 - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 101 + kfree(mm); 102 102 return -ENOMEM; 103 103 } 104 104 105 105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 106 106 DRM_FILE_PAGE_OFFSET_SIZE)) { 107 107 drm_ht_remove(&mm->offset_hash); 108 - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 108 + kfree(mm); 109 109 return -ENOMEM; 110 110 } 111 111 ··· 119 119 120 120 drm_mm_takedown(&mm->offset_manager); 121 121 drm_ht_remove(&mm->offset_hash); 122 - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 122 + kfree(mm); 123 123 dev->mm_private = NULL; 124 124 } 125 125
+2 -4
drivers/gpu/drm/drm_hashtab.c
··· 46 46 ht->table = NULL; 47 47 ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); 48 48 if (!ht->use_vmalloc) { 49 - ht->table = drm_calloc(ht->size, sizeof(*ht->table), 50 - DRM_MEM_HASHTAB); 49 + ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL); 51 50 } 52 51 if (!ht->table) { 53 52 ht->use_vmalloc = 1; ··· 199 200 if (ht->use_vmalloc) 200 201 vfree(ht->table); 201 202 else 202 - drm_free(ht->table, ht->size * sizeof(*ht->table), 203 - DRM_MEM_HASHTAB); 203 + kfree(ht->table); 204 204 ht->table = NULL; 205 205 } 206 206 }
+6 -8
drivers/gpu/drm/drm_ioctl.c
··· 93 93 94 94 master->unique_len = u->unique_len; 95 95 master->unique_size = u->unique_len + 1; 96 - master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); 96 + master->unique = kmalloc(master->unique_size, GFP_KERNEL); 97 97 if (!master->unique) 98 98 return -ENOMEM; 99 99 if (copy_from_user(master->unique, u->unique, master->unique_len)) ··· 101 101 102 102 master->unique[master->unique_len] = '\0'; 103 103 104 - dev->devname = 105 - drm_alloc(strlen(dev->driver->pci_driver.name) + 106 - strlen(master->unique) + 2, DRM_MEM_DRIVER); 104 + dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + 105 + strlen(master->unique) + 2, GFP_KERNEL); 107 106 if (!dev->devname) 108 107 return -ENOMEM; 109 108 ··· 137 138 138 139 master->unique_len = 40; 139 140 master->unique_size = master->unique_len; 140 - master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); 141 + master->unique = kmalloc(master->unique_size, GFP_KERNEL); 141 142 if (master->unique == NULL) 142 143 return -ENOMEM; 143 144 ··· 151 152 else 152 153 master->unique_len = len; 153 154 154 - dev->devname = 155 - drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + 156 - 2, DRM_MEM_DRIVER); 155 + dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + 156 + master->unique_len + 2, GFP_KERNEL); 157 157 if (dev->devname == NULL) 158 158 return -ENOMEM; 159 159
+16 -28
drivers/gpu/drm/drm_irq.c
··· 104 104 105 105 vblank_disable_fn((unsigned long)dev); 106 106 107 - drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 108 - DRM_MEM_DRIVER); 109 - drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 110 - dev->num_crtcs, DRM_MEM_DRIVER); 111 - drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 112 - dev->num_crtcs, DRM_MEM_DRIVER); 113 - drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * 114 - dev->num_crtcs, DRM_MEM_DRIVER); 115 - drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, 116 - DRM_MEM_DRIVER); 117 - drm_free(dev->last_vblank_wait, 118 - sizeof(*dev->last_vblank_wait) * dev->num_crtcs, 119 - DRM_MEM_DRIVER); 120 - drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * 121 - dev->num_crtcs, DRM_MEM_DRIVER); 107 + kfree(dev->vbl_queue); 108 + kfree(dev->_vblank_count); 109 + kfree(dev->vblank_refcount); 110 + kfree(dev->vblank_enabled); 111 + kfree(dev->last_vblank); 112 + kfree(dev->last_vblank_wait); 113 + kfree(dev->vblank_inmodeset); 122 114 123 115 dev->num_crtcs = 0; 124 116 } ··· 124 132 spin_lock_init(&dev->vbl_lock); 125 133 dev->num_crtcs = num_crtcs; 126 134 127 - dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 128 - DRM_MEM_DRIVER); 135 + dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, 136 + GFP_KERNEL); 129 137 if (!dev->vbl_queue) 130 138 goto err; 131 139 132 - dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 133 - DRM_MEM_DRIVER); 140 + dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); 134 141 if (!dev->_vblank_count) 135 142 goto err; 136 143 137 - dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, 138 - DRM_MEM_DRIVER); 144 + dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs, 145 + GFP_KERNEL); 139 146 if (!dev->vblank_refcount) 140 147 goto err; 141 148 142 - dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), 143 - DRM_MEM_DRIVER); 149 + dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); 144 150 if (!dev->vblank_enabled) 145 151 goto err; 146 152 147 - dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); 153 + dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); 148 154 if (!dev->last_vblank) 149 155 goto err; 150 156 151 - dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32), 152 - DRM_MEM_DRIVER); 157 + dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); 153 158 if (!dev->last_vblank_wait) 154 159 goto err; 155 160 156 - dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), 157 - DRM_MEM_DRIVER); 161 + dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); 158 162 if (!dev->vblank_inmodeset) 159 163 goto err; 160 164
-25
drivers/gpu/drm/drm_memory.c
··· 36 36 #include <linux/highmem.h> 37 37 #include "drmP.h" 38 38 39 - #ifdef DEBUG_MEMORY 40 - #include "drm_memory_debug.h" 41 - #else 42 - 43 - /** No-op. */ 44 - void drm_mem_init(void) 45 - { 46 - } 47 - 48 39 /** 49 40 * Called when "/proc/dri/%dev%/mem" is read. 50 41 * ··· 53 62 int len, int *eof, void *data) 54 63 { 55 64 return 0; 56 - } 57 - 58 - /** Wrapper around kmalloc() and kfree() */ 59 - void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) 60 - { 61 - void *pt; 62 - 63 - if (!(pt = kmalloc(size, GFP_KERNEL))) 64 - return NULL; 65 - if (oldpt && oldsize) { 66 - memcpy(pt, oldpt, oldsize); 67 - kfree(oldpt); 68 - } 69 - return pt; 70 65 } 71 66 72 67 #if __OS_HAS_AGP ··· 133 156 } 134 157 135 158 #endif /* agp */ 136 - 137 - #endif /* debug_memory */ 138 159 139 160 void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) 140 161 {
+1 -52
drivers/gpu/drm/drm_pci.c
··· 55 55 unsigned long addr; 56 56 size_t sz; 57 57 #endif 58 - #ifdef DRM_DEBUG_MEMORY 59 - int area = DRM_MEM_DMA; 60 - 61 - spin_lock(&drm_mem_lock); 62 - if ((drm_ram_used >> PAGE_SHIFT) 63 - > (DRM_RAM_PERCENT * drm_ram_available) / 100) { 64 - spin_unlock(&drm_mem_lock); 65 - return 0; 66 - } 67 - spin_unlock(&drm_mem_lock); 68 - #endif 69 58 70 59 /* pci_alloc_consistent only guarantees alignment to the smallest 71 60 * PAGE_SIZE order which is greater than or equal to the requested size. ··· 75 86 dmah->size = size; 76 87 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); 77 88 78 - #ifdef DRM_DEBUG_MEMORY 79 - if (dmah->vaddr == NULL) { 80 - spin_lock(&drm_mem_lock); 81 - ++drm_mem_stats[area].fail_count; 82 - spin_unlock(&drm_mem_lock); 83 - kfree(dmah); 84 - return NULL; 85 - } 86 - 87 - spin_lock(&drm_mem_lock); 88 - ++drm_mem_stats[area].succeed_count; 89 - drm_mem_stats[area].bytes_allocated += size; 90 - drm_ram_used += size; 91 - spin_unlock(&drm_mem_lock); 92 - #else 93 89 if (dmah->vaddr == NULL) { 94 90 kfree(dmah); 95 91 return NULL; 96 92 } 97 - #endif 98 93 99 94 memset(dmah->vaddr, 0, size); 100 95 ··· 105 132 unsigned long addr; 106 133 size_t sz; 107 134 #endif 108 - #ifdef DRM_DEBUG_MEMORY 109 - int area = DRM_MEM_DMA; 110 - int alloc_count; 111 - int free_count; 112 - #endif 113 135 114 - if (!dmah->vaddr) { 115 - #ifdef DRM_DEBUG_MEMORY 116 - DRM_MEM_ERROR(area, "Attempt to free address 0\n"); 117 - #endif 118 - } else { 136 + if (dmah->vaddr) { 119 137 /* XXX - Is virt_to_page() legal for consistent mem? */ 120 138 /* Unreserve */ 121 139 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; ··· 116 152 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 117 153 dmah->busaddr); 118 154 } 119 - 120 - #ifdef DRM_DEBUG_MEMORY 121 - spin_lock(&drm_mem_lock); 122 - free_count = ++drm_mem_stats[area].free_count; 123 - alloc_count = drm_mem_stats[area].succeed_count; 124 - drm_mem_stats[area].bytes_freed += size; 125 - drm_ram_used -= size; 126 - spin_unlock(&drm_mem_lock); 127 - if (free_count > alloc_count) { 128 - DRM_MEM_ERROR(area, 129 - "Excess frees: %d frees, %d allocs\n", 130 - free_count, alloc_count); 131 - } 132 - #endif 133 - 134 155 } 135 156 136 157 /**
+3 -5
drivers/gpu/drm/drm_proc.c
··· 105 105 (dev->driver->driver_features & features) != features) 106 106 continue; 107 107 108 - tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); 108 + tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 109 109 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); 110 110 if (!ent) { 111 111 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 112 112 name, files[i].name); 113 - drm_free(tmp, sizeof(struct drm_info_node), 114 - _DRM_DRIVER); 113 + kfree(tmp); 115 114 ret = -1; 116 115 goto fail; 117 116 } ··· 191 192 remove_proc_entry(files[i].name, 192 193 minor->proc_root); 193 194 list_del(pos); 194 - drm_free(tmp, sizeof(struct drm_info_node), 195 - _DRM_DRIVER); 195 + kfree(tmp); 196 196 } 197 197 } 198 198 }
+12 -21
drivers/gpu/drm/drm_scatter.c
··· 58 58 59 59 vfree(entry->virtual); 60 60 61 - drm_free(entry->busaddr, 62 - entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); 63 - drm_free(entry->pagelist, 64 - entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); 65 - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); 61 + kfree(entry->busaddr); 62 + kfree(entry->pagelist); 63 + kfree(entry); 66 64 } 67 65 68 66 #ifdef _LP64 ··· 82 84 if (dev->sg) 83 85 return -EINVAL; 84 86 85 - entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); 87 + entry = kmalloc(sizeof(*entry), GFP_KERNEL); 86 88 if (!entry) 87 89 return -ENOMEM; 88 90 ··· 91 93 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); 92 94 93 95 entry->pages = pages; 94 - entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), 95 - DRM_MEM_PAGES); 96 + entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL); 96 97 if (!entry->pagelist) { 97 - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); 98 + kfree(entry); 98 99 return -ENOMEM; 99 100 } 100 101 101 102 memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); 102 103 103 - entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), 104 - DRM_MEM_PAGES); 104 + entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL); 105 105 if (!entry->busaddr) { 106 - drm_free(entry->pagelist, 107 - entry->pages * sizeof(*entry->pagelist), 108 - DRM_MEM_PAGES); 109 - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); 106 + kfree(entry->pagelist); 107 + kfree(entry); 110 108 return -ENOMEM; 111 109 } 112 110 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); 113 111 114 112 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); 115 113 if (!entry->virtual) { 116 - drm_free(entry->busaddr, 117 - entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); 118 - drm_free(entry->pagelist, 119 - entry->pages * sizeof(*entry->pagelist), 120 - DRM_MEM_PAGES); 121 - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); 114 + kfree(entry->busaddr); 115 + kfree(entry->pagelist); 116 + kfree(entry); 122 117 return -ENOMEM; 123 118 } 124 119
+14 -15
drivers/gpu/drm/drm_sman.c
··· 48 48 { 49 49 drm_ht_remove(&sman->user_hash_tab); 50 50 drm_ht_remove(&sman->owner_hash_tab); 51 - if (sman->mm) 52 - drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm), 53 - DRM_MEM_MM); 51 + kfree(sman->mm); 54 52 } 55 53 56 54 EXPORT_SYMBOL(drm_sman_takedown); ··· 59 61 { 60 62 int ret = 0; 61 63 62 - sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), 63 - DRM_MEM_MM); 64 + sman->mm = (struct drm_sman_mm *) kcalloc(num_managers, 65 + sizeof(*sman->mm), 66 + GFP_KERNEL); 64 67 if (!sman->mm) { 65 68 ret = -ENOMEM; 66 69 goto out; ··· 77 78 78 79 drm_ht_remove(&sman->owner_hash_tab); 79 80 out1: 80 - drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); 81 + kfree(sman->mm); 81 82 out: 82 83 return ret; 83 84 } ··· 109 110 { 110 111 struct drm_mm *mm = (struct drm_mm *) private; 111 112 drm_mm_takedown(mm); 112 - drm_free(mm, sizeof(*mm), DRM_MEM_MM); 113 + kfree(mm); 113 114 } 114 115 115 116 static unsigned long drm_sman_mm_offset(void *private, void *ref) ··· 129 130 BUG_ON(manager >= sman->num_managers); 130 131 131 132 sman_mm = &sman->mm[manager]; 132 - mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM); 133 + mm = kzalloc(sizeof(*mm), GFP_KERNEL); 133 134 if (!mm) { 134 135 return -ENOMEM; 135 136 } ··· 137 138 ret = drm_mm_init(mm, start, size); 138 139 139 140 if (ret) { 140 - drm_free(mm, sizeof(*mm), DRM_MEM_MM); 141 + kfree(mm); 141 142 return ret; 142 143 } 143 144 ··· 175 176 owner_hash); 176 177 } 177 178 178 - owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM); 179 + owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL); 179 180 if (!owner_item) 180 181 goto out; 181 182 ··· 188 189 return owner_item; 189 190 190 191 out1: 191 - drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); 192 + kfree(owner_item); 192 193 out: 193 194 return NULL; 194 195 } ··· 211 212 return NULL; 212 213 } 213 214 214 - memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM); 215 + memblock = kzalloc(sizeof(*memblock), GFP_KERNEL); 215 216 216 217 if (!memblock) 217 218 goto out; ··· 236 237 out2: 237 238 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); 238 239 out1: 239 - drm_free(memblock, sizeof(*memblock), DRM_MEM_MM); 240 + kfree(memblock); 240 241 out: 241 242 sman_mm->free(sman_mm->private, tmp); 242 243 ··· 252 253 list_del(&item->owner_list); 253 254 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); 254 255 item->mm->free(item->mm->private, item->mm_info); 255 - drm_free(item, sizeof(*item), DRM_MEM_MM); 256 + kfree(item); 256 257 } 257 258 258 259 int drm_sman_free_key(struct drm_sman *sman, unsigned int key) ··· 276 277 { 277 278 list_del(&owner_item->sman_list); 278 279 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); 279 - drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); 280 + kfree(owner_item); 280 281 } 281 282 282 283 int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+9 -10
drivers/gpu/drm/drm_stub.c
··· 107 107 { 108 108 struct drm_master *master; 109 109 110 - master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); 110 + master = kzalloc(sizeof(*master), GFP_KERNEL); 111 111 if (!master) 112 112 return NULL; 113 113 ··· 149 149 } 150 150 151 151 if (master->unique) { 152 - drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 152 + kfree(master->unique); 153 153 master->unique = NULL; 154 154 master->unique_len = 0; 155 155 } ··· 157 157 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 158 158 list_del(&pt->head); 159 159 drm_ht_remove_item(&master->magiclist, &pt->hash_item); 160 - drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 160 + kfree(pt); 161 161 } 162 162 163 163 drm_ht_remove(&master->magiclist); 164 164 165 - drm_free(master, sizeof(*master), DRM_MEM_DRIVER); 165 + kfree(master); 166 166 } 167 167 168 168 void drm_master_put(struct drm_master **master) ··· 390 390 391 391 DRM_DEBUG("\n"); 392 392 393 - dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); 393 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 394 394 if (!dev) 395 395 return -ENOMEM; 396 396 ··· 443 443 err_g2: 444 444 pci_disable_device(pdev); 445 445 err_g1: 446 - drm_free(dev, sizeof(*dev), DRM_MEM_STUB); 446 + kfree(dev); 447 447 return ret; 448 448 } 449 449 EXPORT_SYMBOL(drm_get_dev); ··· 516 516 dev->driver->unload(dev); 517 517 518 518 if (drm_core_has_AGP(dev) && dev->agp) { 519 - drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); 519 + kfree(dev->agp); 520 520 dev->agp = NULL; 521 521 } 522 522 ··· 535 535 drm_put_minor(&dev->primary); 536 536 537 537 if (dev->devname) { 538 - drm_free(dev->devname, strlen(dev->devname) + 1, 539 - DRM_MEM_DRIVER); 538 + kfree(dev->devname); 540 539 dev->devname = NULL; 541 540 } 542 - drm_free(dev, sizeof(*dev), DRM_MEM_STUB); 541 + kfree(dev); 543 542 } 544 543 EXPORT_SYMBOL(drm_put_dev);
+4 -4
drivers/gpu/drm/drm_vm.c
··· 227 227 found_maps++; 228 228 if (pt->vma == vma) { 229 229 list_del(&pt->head); 230 - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 230 + kfree(pt); 231 231 } 232 232 } 233 233 ··· 273 273 DRM_ERROR("tried to rmmap GEM object\n"); 274 274 break; 275 275 } 276 - drm_free(map, sizeof(*map), DRM_MEM_MAPS); 276 + kfree(map); 277 277 } 278 278 } 279 279 mutex_unlock(&dev->struct_mutex); ··· 414 414 vma->vm_start, vma->vm_end - vma->vm_start); 415 415 atomic_inc(&dev->vma_count); 416 416 417 - vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 417 + vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); 418 418 if (vma_entry) { 419 419 vma_entry->vma = vma; 420 420 vma_entry->pid = current->pid; ··· 454 454 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 455 455 if (pt->vma == vma) { 456 456 list_del(&pt->head); 457 - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 457 + kfree(pt); 458 458 break; 459 459 } 460 460 }
+2 -4
drivers/gpu/drm/i810/i810_dma.c
··· 227 227 /* Need to rewrite hardware status page */ 228 228 I810_WRITE(0x02080, 0x1ffff000); 229 229 } 230 - drm_free(dev->dev_private, sizeof(drm_i810_private_t), 231 - DRM_MEM_DRIVER); 230 + kfree(dev->dev_private); 232 231 dev->dev_private = NULL; 233 232 234 233 for (i = 0; i < dma->buf_count; i++) { ··· 438 439 switch (init->func) { 439 440 case I810_INIT_DMA_1_4: 440 441 DRM_INFO("Using v1.4 init.\n"); 441 - dev_priv = drm_alloc(sizeof(drm_i810_private_t), 442 - DRM_MEM_DRIVER); 442 + dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL); 443 443 if (dev_priv == NULL) 444 444 return -ENOMEM; 445 445 retcode = i810_dma_initialize(dev, dev_priv, init);
+2 -4
drivers/gpu/drm/i830/i830_dma.c
··· 232 232 I830_WRITE(0x02080, 0x1ffff000); 233 233 } 234 234 235 - drm_free(dev->dev_private, sizeof(drm_i830_private_t), 236 - DRM_MEM_DRIVER); 235 + kfree(dev->dev_private); 237 236 dev->dev_private = NULL; 238 237 239 238 for (i = 0; i < dma->buf_count; i++) { ··· 458 459 459 460 switch (init->func) { 460 461 case I830_INIT_DMA: 461 - dev_priv = drm_alloc(sizeof(drm_i830_private_t), 462 - DRM_MEM_DRIVER); 462 + dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL); 463 463 if (dev_priv == NULL) 464 464 return -ENOMEM; 465 465 retcode = i830_dma_initialize(dev, dev_priv, init);
+16 -24
drivers/gpu/drm/i915/i915_dma.c
··· 643 643 return -EINVAL; 644 644 645 645 if (batch->num_cliprects) { 646 - cliprects = drm_calloc(batch->num_cliprects, 647 - sizeof(struct drm_clip_rect), 648 - DRM_MEM_DRIVER); 646 + cliprects = kcalloc(batch->num_cliprects, 647 + sizeof(struct drm_clip_rect), 648 + GFP_KERNEL); 649 649 if (cliprects == NULL) 650 650 return -ENOMEM; 651 651 ··· 664 664 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 665 665 666 666 fail_free: 667 - drm_free(cliprects, 668 - batch->num_cliprects * sizeof(struct drm_clip_rect), 669 - DRM_MEM_DRIVER); 667 + kfree(cliprects); 670 668 671 669 return ret; 672 670 } ··· 690 692 if (cmdbuf->num_cliprects < 0) 691 693 return -EINVAL; 692 694 693 - batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); 695 + batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 694 696 if (batch_data == NULL) 695 697 return -ENOMEM; 696 698 ··· 699 701 goto fail_batch_free; 700 702 701 703 if (cmdbuf->num_cliprects) { 702 - cliprects = drm_calloc(cmdbuf->num_cliprects, 703 - sizeof(struct drm_clip_rect), 704 - DRM_MEM_DRIVER); 704 + cliprects = kcalloc(cmdbuf->num_cliprects, 705 + sizeof(struct drm_clip_rect), GFP_KERNEL); 705 706 if (cliprects == NULL) 706 707 goto fail_batch_free; 707 708 ··· 723 726 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 724 727 725 728 fail_clip_free: 726 - drm_free(cliprects, 727 - cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), 728 - DRM_MEM_DRIVER); 729 + kfree(cliprects); 729 730 fail_batch_free: 730 - drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); 731 + kfree(batch_data); 731 732 732 733 return ret; 733 734 } ··· 1062 1067 { 1063 1068 struct drm_i915_master_private *master_priv; 1064 1069 1065 - master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 1070 + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1066 1071 if (!master_priv) 1067 1072 return -ENOMEM; 1068 1073 ··· 1077 1082 if (!master_priv) 1078 1083 return; 1079 1084 1080 - drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 1085 + kfree(master_priv); 1081 1086 1082 1087 master->driver_priv = NULL; 1083 1088 } ··· 1106 1111 dev->types[8] = _DRM_STAT_SECONDARY; 1107 1112 dev->types[9] = _DRM_STAT_DMA; 1108 1113 1109 - dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); 1114 + dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1110 1115 if (dev_priv == NULL) 1111 1116 return -ENOMEM; 1112 - 1113 - memset(dev_priv, 0, sizeof(drm_i915_private_t)); 1114 1117 1115 1118 dev->dev_private = (void *)dev_priv; 1116 1119 dev_priv->dev = dev; ··· 1214 1221 out_rmmap: 1215 1222 iounmap(dev_priv->regs); 1216 1223 free_priv: 1217 - drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); 1224 + kfree(dev_priv); 1218 1225 return ret; 1219 1226 } 1220 1227 ··· 1254 1261 i915_gem_lastclose(dev); 1255 1262 } 1256 1263 1257 - drm_free(dev->dev_private, sizeof(drm_i915_private_t), 1258 - DRM_MEM_DRIVER); 1264 + kfree(dev->dev_private); 1259 1265 1260 1266 return 0; 1261 1267 } ··· 1265 1273 1266 1274 DRM_DEBUG_DRIVER(I915_DRV, "\n"); 1267 1275 i915_file_priv = (struct drm_i915_file_private *) 1268 - drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1276 + kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 1269 1277 1270 1278 if (!i915_file_priv) 1271 1279 return -ENOMEM; ··· 1318 1326 { 1319 1327 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1320 1328 1321 - drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 1329 + kfree(i915_file_priv); 1322 1330 } 1323 1331 1324 1332 struct drm_ioctl_desc i915_ioctls[] = {
+16 -19
drivers/gpu/drm/i915/i915_gem.c
··· 1207 1207 1208 1208 /* Set the object up for mmap'ing */ 1209 1209 list = &obj->map_list; 1210 - list->map = drm_calloc(1, sizeof(struct drm_map_list), 1211 - DRM_MEM_DRIVER); 1210 + list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 1212 1211 if (!list->map) 1213 1212 return -ENOMEM; 1214 1213 ··· 1247 1248 out_free_mm: 1248 1249 drm_mm_put_block(list->file_offset_node); 1249 1250 out_free_list: 1250 - drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); 1251 + kfree(list->map); 1251 1252 1252 1253 return ret; 1253 1254 } ··· 1269 1270 } 1270 1271 1271 1272 if (list->map) { 1272 - drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); 1273 + kfree(list->map); 1273 1274 list->map = NULL; 1274 1275 } 1275 1276 ··· 1492 1493 if (file_priv != NULL) 1493 1494 i915_file_priv = file_priv->driver_priv; 1494 1495 1495 - request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 1496 + request = kzalloc(sizeof(*request), GFP_KERNEL); 1496 1497 if (request == NULL) 1497 1498 return 0; 1498 1499 ··· 1674 1675 1675 1676 list_del(&request->list); 1676 1677 list_del(&request->client_list); 1677 - drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 1678 + kfree(request); 1678 1679 } else 1679 1680 break; 1680 1681 } ··· 2832 2833 /* Free the page_cpu_valid mappings which are now stale, whether 2833 2834 * or not we've got I915_GEM_DOMAIN_CPU. 2834 2835 */ 2835 - drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 2836 - DRM_MEM_DRIVER); 2836 + kfree(obj_priv->page_cpu_valid); 2837 2837 obj_priv->page_cpu_valid = NULL; 2838 2838 } 2839 2839 ··· 2874 2876 * newly adding I915_GEM_DOMAIN_CPU 2875 2877 */ 2876 2878 if (obj_priv->page_cpu_valid == NULL) { 2877 - obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 2878 - DRM_MEM_DRIVER); 2879 + obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, 2880 + GFP_KERNEL); 2879 2881 if (obj_priv->page_cpu_valid == NULL) 2880 2882 return -ENOMEM; 2881 2883 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) ··· 3298 3300 } 3299 3301 3300 3302 if (args->num_cliprects != 0) { 3301 - cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), 3302 - DRM_MEM_DRIVER); 3303 + cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3304 + GFP_KERNEL); 3303 3305 if (cliprects == NULL) 3304 3306 goto pre_mutex_err; 3305 3307 ··· 3552 3554 pre_mutex_err: 3553 3555 drm_free_large(object_list); 3554 3556 drm_free_large(exec_list); 3555 - drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3556 - DRM_MEM_DRIVER); 3557 + kfree(cliprects); 3557 3558 3558 3559 return ret; 3559 3560 } ··· 3769 3772 { 3770 3773 struct drm_i915_gem_object *obj_priv; 3771 3774 3772 - obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); 3775 + obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); 3773 3776 if (obj_priv == NULL) 3774 3777 return -ENOMEM; 3775 3778 ··· 3807 3810 3808 3811 i915_gem_free_mmap_offset(obj); 3809 3812 3810 - drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3813 + kfree(obj_priv->page_cpu_valid); 3811 3814 kfree(obj_priv->bit_17); 3812 - drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3815 + kfree(obj->driver_private); 3813 3816 } 3814 3817 3815 3818 /** Unbinds all objects that are on the given buffer list. */ ··· 4263 4266 if (dev_priv->mm.phys_objs[id - 1] || !size) 4264 4267 return 0; 4265 4268 4266 - phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); 4269 + phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); 4267 4270 if (!phys_obj) 4268 4271 return -ENOMEM; 4269 4272 ··· 4282 4285 4283 4286 return 0; 4284 4287 kfree_obj: 4285 - drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); 4288 + kfree(phys_obj); 4286 4289 return ret; 4287 4290 } 4288 4291
+12 -12
drivers/gpu/drm/i915/i915_mem.c
··· 94 94 { 95 95 /* Maybe cut off the start of an existing block */ 96 96 if (start > p->start) { 97 - struct mem_block *newblock = 98 - drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); 97 + struct mem_block *newblock = kmalloc(sizeof(*newblock), 98 + GFP_KERNEL); 99 99 if (!newblock) 100 100 goto out; 101 101 newblock->start = start; ··· 111 111 112 112 /* Maybe cut off the end of an existing block */ 113 113 if (size < p->size) { 114 - struct mem_block *newblock = 115 - drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); 114 + struct mem_block *newblock = kmalloc(sizeof(*newblock), 115 + GFP_KERNEL); 116 116 if (!newblock) 117 117 goto out; 118 118 newblock->start = start + size; ··· 169 169 p->size += q->size; 170 170 p->next = q->next; 171 171 p->next->prev = p; 172 - drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 172 + kfree(q); 173 173 } 174 174 175 175 if (p->prev->file_priv == NULL) { ··· 177 177 q->size += p->size; 178 178 q->next = p->next; 179 179 q->next->prev = q; 180 - drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS); 180 + kfree(p); 181 181 } 182 182 } 183 183 ··· 185 185 */ 186 186 static int init_heap(struct mem_block **heap, int start, int size) 187 187 { 188 - struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS); 188 + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); 189 189 190 190 if (!blocks) 191 191 return -ENOMEM; 192 192 193 - *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS); 193 + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); 194 194 if (!*heap) { 195 - drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); 195 + kfree(blocks); 196 196 return -ENOMEM; 197 197 } 198 198 ··· 233 233 p->size += q->size; 234 234 p->next = q->next; 235 235 p->next->prev = p; 236 - drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 236 + kfree(q); 237 237 } 238 238 } 239 239 } ··· 250 250 for (p = (*heap)->next; p != *heap;) { 251 251 struct mem_block *q = p; 252 252 p = p->next; 253 - drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 253 + kfree(q); 254 254 } 255 255 256 - drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); 256 + kfree(*heap); 257 257 *heap = NULL; 258 258 } 259 259
+2 -4
drivers/gpu/drm/i915/intel_bios.c
··· 124 124 entry = &lvds_lfp_data->data[lvds_options->panel_type]; 125 125 dvo_timing = &entry->dvo_timing; 126 126 127 - panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 128 - DRM_MEM_DRIVER); 127 + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 129 128 130 129 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 131 130 ··· 155 156 if (!dvo_timing) 156 157 return; 157 158 158 - panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 159 - DRM_MEM_DRIVER); 159 + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 160 160 161 161 if (!panel_fixed_mode) 162 162 return;
+5 -6
drivers/gpu/drm/i915/intel_tv.c
··· 1561 1561 1562 1562 drm_sysfs_connector_remove(connector); 1563 1563 drm_connector_cleanup(connector); 1564 - drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv), 1565 - DRM_MEM_DRIVER); 1564 + kfree(intel_output); 1566 1565 } 1567 1566 1568 1567 ··· 1694 1695 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1695 1696 return; 1696 1697 1697 - intel_output = drm_calloc(1, sizeof(struct intel_output) + 1698 - sizeof(struct intel_tv_priv), DRM_MEM_DRIVER); 1698 + intel_output = kzalloc(sizeof(struct intel_output) + 1699 + sizeof(struct intel_tv_priv), GFP_KERNEL); 1699 1700 if (!intel_output) { 1700 1701 return; 1701 1702 } ··· 1729 1730 connector->doublescan_allowed = false; 1730 1731 1731 1732 /* Create TV properties then attach current values */ 1732 - tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, 1733 - DRM_MEM_DRIVER); 1733 + tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, 1734 + GFP_KERNEL); 1734 1735 if (!tv_format_names) 1735 1736 goto out; 1736 1737 for (i = 0; i < NUM_TV_MODES; i++)
+5 -9
drivers/gpu/drm/mga/mga_dma.c
··· 254 254 int i; 255 255 DRM_DEBUG("count=%d\n", dma->buf_count); 256 256 257 - dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 257 + dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); 258 258 if (dev_priv->head == NULL) 259 259 return -ENOMEM; 260 260 261 - memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); 262 261 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); 263 262 264 263 for (i = 0; i < dma->buf_count; i++) { 265 264 buf = dma->buflist[i]; 266 265 buf_priv = buf->dev_private; 267 266 268 - entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 267 + entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); 269 268 if (entry == NULL) 270 269 return -ENOMEM; 271 - 272 - memset(entry, 0, sizeof(drm_mga_freelist_t)); 273 270 274 271 entry->next = dev_priv->head->next; 275 272 entry->prev = dev_priv->head; ··· 298 301 entry = dev_priv->head; 299 302 while (entry) { 300 303 next = entry->next; 301 - drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 304 + kfree(entry); 302 305 entry = next; 303 306 } 304 307 ··· 396 399 drm_mga_private_t *dev_priv; 397 400 int ret; 398 401 399 - dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 402 + dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL); 400 403 if (!dev_priv) 401 404 return -ENOMEM; 402 405 403 406 dev->dev_private = (void *)dev_priv; 404 - memset(dev_priv, 0, sizeof(drm_mga_private_t)); 405 407 406 408 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 407 409 dev_priv->chipset = flags; ··· 1146 1150 */ 1147 1151 int mga_driver_unload(struct drm_device * dev) 1148 1152 { 1149 - drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 1153 + kfree(dev->dev_private); 1150 1154 dev->dev_private = NULL; 1151 1155 1152 1156 return 0;
+4 -8
drivers/gpu/drm/r128/r128_cce.c
··· 353 353 354 354 DRM_DEBUG("\n"); 355 355 356 - dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); 356 + dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL); 357 357 if (dev_priv == NULL) 358 358 return -ENOMEM; 359 - 360 - memset(dev_priv, 0, sizeof(drm_r128_private_t)); 361 359 362 360 dev_priv->is_pci = init->is_pci; 363 361 ··· 617 619 ("failed to cleanup PCI GART!\n"); 618 620 } 619 621 620 - drm_free(dev->dev_private, sizeof(drm_r128_private_t), 621 - DRM_MEM_DRIVER); 622 + kfree(dev->dev_private); 622 623 dev->dev_private = NULL; 623 624 } 624 625 ··· 765 768 drm_r128_freelist_t *entry; 766 769 int i; 767 770 768 - dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 771 + dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); 769 772 if (dev_priv->head == NULL) 770 773 return -ENOMEM; 771 774 772 - memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); 773 775 dev_priv->head->age = R128_BUFFER_USED; 774 776 775 777 for (i = 0; i < dma->buf_count; i++) { 776 778 buf = dma->buflist[i]; 777 779 buf_priv = buf->dev_private; 778 780 779 - entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 781 + entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); 780 782 if (!entry) 781 783 return -ENOMEM; 782 784
+42 -42
drivers/gpu/drm/r128/r128_state.c
··· 910 910 } 911 911 912 912 buffer_size = depth->n * sizeof(u32); 913 - buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 913 + buffer = kmalloc(buffer_size, GFP_KERNEL); 914 914 if (buffer == NULL) 915 915 return -ENOMEM; 916 916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 917 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 917 + kfree(buffer); 918 918 return -EFAULT; 919 919 } 920 920 921 921 mask_size = depth->n * sizeof(u8); 922 922 if (depth->mask) { 923 - mask = drm_alloc(mask_size, DRM_MEM_BUFS); 923 + mask = kmalloc(mask_size, GFP_KERNEL); 924 924 if (mask == NULL) { 925 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 925 + kfree(buffer); 926 926 return -ENOMEM; 927 927 } 928 928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 929 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 930 - drm_free(mask, mask_size, DRM_MEM_BUFS); 929 + kfree(buffer); 930 + kfree(mask); 931 931 return -EFAULT; 932 932 } 933 933 ··· 954 954 } 955 955 } 956 956 957 - drm_free(mask, mask_size, DRM_MEM_BUFS); 957 + kfree(mask); 958 958 } else { 959 959 for (i = 0; i < count; i++, x++) { 960 960 BEGIN_RING(6); ··· 978 978 } 979 979 } 980 980 981 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 981 + kfree(buffer); 982 982 983 983 return 0; 984 984 } ··· 1000 1000 1001 1001 xbuf_size = count * sizeof(*x); 1002 1002 ybuf_size = count * sizeof(*y); 1003 - x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1003 + x = kmalloc(xbuf_size, GFP_KERNEL); 1004 1004 if (x == NULL) { 1005 1005 return -ENOMEM; 1006 1006 } 1007 - y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1007 + y = kmalloc(ybuf_size, GFP_KERNEL); 1008 1008 if (y == NULL) { 1009 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1009 + kfree(x); 1010 1010 return -ENOMEM; 1011 1011 } 1012 1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1013 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1014 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1013 + kfree(x); 1014 + kfree(y); 1015 1015 return -EFAULT; 1016 1016 } 1017 1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { 1018 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1019 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1018 + kfree(x); 1019 + kfree(y); 1020 1020 return -EFAULT; 1021 1021 } 1022 1022 1023 1023 buffer_size = depth->n * sizeof(u32); 1024 - buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 1024 + buffer = kmalloc(buffer_size, GFP_KERNEL); 1025 1025 if (buffer == NULL) { 1026 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1027 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1026 + kfree(x); 1027 + kfree(y); 1028 1028 return -ENOMEM; 1029 1029 } 1030 1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 1031 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1032 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1033 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1031 + kfree(x); 1032 + kfree(y); 1033 + kfree(buffer); 1034 1034 return -EFAULT; 1035 1035 } 1036 1036 1037 1037 if (depth->mask) { 1038 1038 mask_size = depth->n * sizeof(u8); 1039 - mask = drm_alloc(mask_size, DRM_MEM_BUFS); 1039 + mask = kmalloc(mask_size, GFP_KERNEL); 1040 1040 if (mask == NULL) { 1041 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1042 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1043 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1041 + kfree(x); 1042 + kfree(y); 1043 + kfree(buffer); 1044 1044 return -ENOMEM; 1045 1045 } 1046 1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 1047 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1048 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1049 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1050 - drm_free(mask, mask_size, DRM_MEM_BUFS); 1047 + kfree(x); 1048 + kfree(y); 1049 + kfree(buffer); 1050 + kfree(mask); 1051 1051 return -EFAULT; 1052 1052 } 1053 1053 ··· 1074 1074 } 1075 1075 } 1076 1076 1077 - drm_free(mask, mask_size, DRM_MEM_BUFS); 1077 + kfree(mask); 1078 1078 } else { 1079 1079 for (i = 0; i < count; i++) { 1080 1080 BEGIN_RING(6); ··· 1098 1098 } 1099 1099 } 1100 1100 1101 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1102 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1103 - drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1101 + kfree(x); 1102 + kfree(y); 1103 + kfree(buffer); 1104 1104 1105 1105 return 0; 1106 1106 } ··· 1167 1167 1168 1168 xbuf_size = count * sizeof(*x); 1169 1169 ybuf_size = count * sizeof(*y); 1170 - x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1170 + x = kmalloc(xbuf_size, GFP_KERNEL); 1171 1171 if (x == NULL) { 1172 1172 return -ENOMEM; 1173 1173 } 1174 - y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1174 + y = kmalloc(ybuf_size, GFP_KERNEL); 1175 1175 if (y == NULL) { 1176 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1176 + kfree(x); 1177 1177 return -ENOMEM; 1178 1178 } 1179 1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1180 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1181 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1180 + kfree(x); 1181 + kfree(y); 1182 1182 return -EFAULT; 1183 1183 } 1184 1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { 1185 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1186 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1185 + kfree(x); 1186 + kfree(y); 1187 1187 return -EFAULT; 1188 1188 } 1189 1189 ··· 1210 1210 ADVANCE_RING(); 1211 1211 } 1212 1212 1213 - drm_free(x, xbuf_size, DRM_MEM_BUFS); 1214 - drm_free(y, ybuf_size, DRM_MEM_BUFS); 1213 + kfree(x); 1214 + kfree(y); 1215 1215 1216 1216 return 0; 1217 1217 }
+4 -5
drivers/gpu/drm/radeon/radeon_cp.c
··· 2045 2045 drm_radeon_private_t *dev_priv; 2046 2046 int ret = 0; 2047 2047 2048 - dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 2048 + dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL); 2049 2049 if (dev_priv == NULL) 2050 2050 return -ENOMEM; 2051 2051 2052 - memset(dev_priv, 0, sizeof(drm_radeon_private_t)); 2053 2052 dev->dev_private = (void *)dev_priv; 2054 2053 dev_priv->flags = flags; 2055 2054 ··· 2102 2103 unsigned long sareapage; 2103 2104 int ret; 2104 2105 2105 - master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 2106 + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 2106 2107 if (!master_priv) 2107 2108 return -ENOMEM; 2108 2109 ··· 2136 2137 if (master_priv->sarea) 2137 2138 drm_rmmap_locked(dev, master_priv->sarea); 2138 2139 2139 - drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 2140 + kfree(master_priv); 2140 2141 2141 2142 master->driver_priv = NULL; 2142 2143 } ··· 2170 2171 2171 2172 drm_rmmap(dev, dev_priv->mmio); 2172 2173 2173 - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2174 + kfree(dev_priv); 2174 2175 2175 2176 dev->dev_private = NULL; 2176 2177 return 0;
+3 -3
drivers/gpu/drm/radeon/radeon_i2c.c
··· 162 162 struct radeon_i2c_chan *i2c; 163 163 int ret; 164 164 165 - i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 165 + i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); 166 166 if (i2c == NULL) 167 167 return NULL; 168 168 ··· 189 189 190 190 return i2c; 191 191 out_free: 192 - drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 192 + kfree(i2c); 193 193 return NULL; 194 194 195 195 } ··· 200 200 return; 201 201 202 202 i2c_del_adapter(&i2c->adapter); 203 - drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 203 + kfree(i2c); 204 204 } 205 205 206 206 struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+2 -2
drivers/gpu/drm/radeon/radeon_kms.c
··· 169 169 unsigned long sareapage; 170 170 int ret; 171 171 172 - master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 172 + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 173 173 if (master_priv == NULL) { 174 174 return -ENOMEM; 175 175 } ··· 199 199 if (master_priv->sarea) { 200 200 drm_rmmap_locked(dev, master_priv->sarea); 201 201 } 202 - drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 202 + kfree(master_priv); 203 203 master->driver_priv = NULL; 204 204 } 205 205
+12 -12
drivers/gpu/drm/radeon/radeon_mem.c
··· 43 43 { 44 44 /* Maybe cut off the start of an existing block */ 45 45 if (start > p->start) { 46 - struct mem_block *newblock = 47 - drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 46 + struct mem_block *newblock = kmalloc(sizeof(*newblock), 47 + GFP_KERNEL); 48 48 if (!newblock) 49 49 goto out; 50 50 newblock->start = start; ··· 60 60 61 61 /* Maybe cut off the end of an existing block */ 62 62 if (size < p->size) { 63 - struct mem_block *newblock = 64 - drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 63 + struct mem_block *newblock = kmalloc(sizeof(*newblock), 64 + GFP_KERNEL); 65 65 if (!newblock) 66 66 goto out; 67 67 newblock->start = start + size; ··· 118 118 p->size += q->size; 119 119 p->next = q->next; 120 120 p->next->prev = p; 121 - drm_free(q, sizeof(*q), DRM_MEM_BUFS); 121 + kfree(q); 122 122 } 123 123 124 124 if (p->prev->file_priv == NULL) { ··· 126 126 q->size += p->size; 127 127 q->next = p->next; 128 128 q->next->prev = q; 129 - drm_free(p, sizeof(*q), DRM_MEM_BUFS); 129 + kfree(p); 130 130 } 131 131 } 132 132 ··· 134 134 */ 135 135 static int init_heap(struct mem_block **heap, int start, int size) 136 136 { 137 - struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 137 + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); 138 138 139 139 if (!blocks) 140 140 return -ENOMEM; 141 141 142 - *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 142 + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); 143 143 if (!*heap) { 144 - drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 144 + kfree(blocks); 145 145 return -ENOMEM; 146 146 } 147 147 ··· 179 179 p->size += q->size; 180 180 p->next = q->next; 181 181 p->next->prev = p; 182 - drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 182 + kfree(q); 183 183 } 184 184 } 185 185 } ··· 196 196 for (p = (*heap)->next; p != *heap;) { 197 197 struct mem_block *q = p; 198 198 p = p->next; 199 - drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 199 + kfree(q); 200 200 } 201 201 202 - drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); 202 + kfree(*heap); 203 203 *heap = NULL; 204 204 } 205 205
+7 -9
drivers/gpu/drm/radeon/radeon_state.c
··· 2866 2866 */ 2867 2867 orig_bufsz = cmdbuf->bufsz; 2868 2868 if (orig_bufsz != 0) { 2869 - kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); 2869 + kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); 2870 2870 if (kbuf == NULL) 2871 2871 return -ENOMEM; 2872 2872 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2873 2873 cmdbuf->bufsz)) { 2874 - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2874 + kfree(kbuf); 2875 2875 return -EFAULT; 2876 2876 } 2877 2877 cmdbuf->buf = kbuf; ··· 2884 2884 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2885 2885 2886 2886 if (orig_bufsz != 0) 2887 - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2887 + kfree(kbuf); 2888 2888 2889 2889 return temp; 2890 2890 } ··· 2991 2991 } 2992 2992 2993 2993 if (orig_bufsz != 0) 2994 - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2994 + kfree(kbuf); 2995 2995 2996 2996 DRM_DEBUG("DONE\n"); 2997 2997 COMMIT_RING(); ··· 2999 2999 3000 3000 err: 3001 3001 if (orig_bufsz != 0) 3002 - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 3002 + kfree(kbuf); 3003 3003 return -EINVAL; 3004 3004 } 3005 3005 ··· 3175 3175 struct drm_radeon_driver_file_fields *radeon_priv; 3176 3176 3177 3177 DRM_DEBUG("\n"); 3178 - radeon_priv = 3179 - (struct drm_radeon_driver_file_fields *) 3180 - drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); 3178 + radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL); 3181 3179 3182 3180 if (!radeon_priv) 3183 3181 return -ENOMEM; ··· 3194 3196 struct drm_radeon_driver_file_fields *radeon_priv = 3195 3197 file_priv->driver_priv; 3196 3198 3197 - drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); 3199 + kfree(radeon_priv); 3198 3200 } 3199 3201 3200 3202 struct drm_ioctl_desc radeon_ioctls[] = {
+8 -13
drivers/gpu/drm/savage/savage_bci.c
··· 298 298 299 299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / 300 300 (SAVAGE_DMA_PAGE_SIZE * 4); 301 - dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 302 - dev_priv->nr_dma_pages, DRM_MEM_DRIVER); 301 + dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) * 302 + dev_priv->nr_dma_pages, GFP_KERNEL); 303 303 if (dev_priv->dma_pages == NULL) 304 304 return -ENOMEM; 305 305 ··· 539 539 { 540 540 drm_savage_private_t *dev_priv; 541 541 542 - dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 542 + dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); 543 543 if (dev_priv == NULL) 544 544 return -ENOMEM; 545 545 ··· 671 671 { 672 672 drm_savage_private_t *dev_priv = dev->dev_private; 673 673 674 - drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 674 + kfree(dev_priv); 675 675 676 676 return 0; 677 677 } ··· 804 804 dev_priv->fake_dma.offset = 0; 805 805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; 806 806 dev_priv->fake_dma.type = _DRM_SHM; 807 - dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, 808 - DRM_MEM_DRIVER); 807 + dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE, 808 + GFP_KERNEL); 809 809 if (!dev_priv->fake_dma.handle) { 810 810 DRM_ERROR("could not allocate faked DMA buffer!\n"); 811 811 savage_do_cleanup_bci(dev); ··· 903 903 drm_savage_private_t *dev_priv = dev->dev_private; 904 904 905 905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) { 906 - if (dev_priv->fake_dma.handle) 907 - drm_free(dev_priv->fake_dma.handle, 908 - SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); 906 + kfree(dev_priv->fake_dma.handle); 909 907 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && 910 908 dev_priv->cmd_dma->type == _DRM_AGP && 911 909 dev_priv->dma_type == SAVAGE_DMA_AGP) ··· 918 920 dev->agp_buffer_map = NULL; 919 921 } 920 922 921 - if (dev_priv->dma_pages) 922 - drm_free(dev_priv->dma_pages, 923 - sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, 924 - DRM_MEM_DRIVER); 923 + kfree(dev_priv->dma_pages); 925 924 926 925 return 0; 927 926 }
+8 -9
drivers/gpu/drm/savage/savage_state.c
··· 988 988 * for locking on FreeBSD. 989 989 */ 990 990 if (cmdbuf->size) { 991 - kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); 991 + kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL); 992 992 if (kcmd_addr == NULL) 993 993 return -ENOMEM; 994 994 995 995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, 996 996 cmdbuf->size * 8)) 997 997 { 998 - drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 998 + kfree(kcmd_addr); 999 999 return -EFAULT; 1000 1000 } 1001 1001 cmdbuf->cmd_addr = kcmd_addr; 1002 1002 } 1003 1003 if (cmdbuf->vb_size) { 1004 - kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); 1004 + kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL); 1005 1005 if (kvb_addr == NULL) { 1006 1006 ret = -ENOMEM; 1007 1007 goto done; ··· 1015 1015 cmdbuf->vb_addr = kvb_addr; 1016 1016 } 1017 1017 if (cmdbuf->nbox) { 1018 - kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), 1019 - DRM_MEM_DRIVER); 1018 + kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), 1019 + GFP_KERNEL); 1020 1020 if (kbox_addr == NULL) { 1021 1021 ret = -ENOMEM; 1022 1022 goto done; ··· 1154 1154 1155 1155 done: 1156 1156 /* If we didn't need to allocate them, these'll be NULL */ 1157 - drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 1158 - drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); 1159 - drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), 1160 - DRM_MEM_DRIVER); 1157 + kfree(kcmd_addr); 1158 + kfree(kvb_addr); 1159 + kfree(kbox_addr); 1161 1160 1162 1161 return ret; 1163 1162 }
+3 -3
drivers/gpu/drm/sis/sis_drv.c
··· 40 40 drm_sis_private_t *dev_priv; 41 41 int ret; 42 42 43 - dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); 43 + dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL); 44 44 if (dev_priv == NULL) 45 45 return -ENOMEM; 46 46 ··· 48 48 dev_priv->chipset = chipset; 49 49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 50 50 if (ret) { 51 - drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); 51 + kfree(dev_priv); 52 52 } 53 53 54 54 return ret; ··· 59 59 drm_sis_private_t *dev_priv = dev->dev_private; 60 60 61 61 drm_sman_takedown(&dev_priv->sman); 62 - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 62 + kfree(dev_priv); 63 63 64 64 return 0; 65 65 }
+4 -4
drivers/gpu/drm/via/via_map.c
··· 96 96 drm_via_private_t *dev_priv; 97 97 int ret = 0; 98 98 99 - dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 99 + dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL); 100 100 if (dev_priv == NULL) 101 101 return -ENOMEM; 102 102 ··· 106 106 107 107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 108 108 if (ret) { 109 - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 109 + kfree(dev_priv); 110 110 return ret; 111 111 } 112 112 113 113 ret = drm_vblank_init(dev, 1); 114 114 if (ret) { 115 115 drm_sman_takedown(&dev_priv->sman); 116 - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 116 + kfree(dev_priv); 117 117 return ret; 118 118 } 119 119 ··· 126 126 127 127 drm_sman_takedown(&dev_priv->sman); 128 128 129 - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 129 + kfree(dev_priv); 130 130 131 131 return 0; 132 132 }
-52
include/drm/drmP.h
··· 34 34 #ifndef _DRM_P_H_ 35 35 #define _DRM_P_H_ 36 36 37 - /* If you want the memory alloc debug functionality, change define below */ 38 - /* #define DEBUG_MEMORY */ 39 - 40 37 #ifdef __KERNEL__ 41 38 #ifdef __alpha__ 42 39 /* add include of current.h so that "current" is defined ··· 129 132 #define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ 130 133 131 134 #define DRM_FLAG_DEBUG 0x01 132 - 133 - #define DRM_MEM_DMA 0 134 - #define DRM_MEM_SAREA 1 135 - #define DRM_MEM_DRIVER 2 136 - #define DRM_MEM_MAGIC 3 137 - #define DRM_MEM_IOCTLS 4 138 - #define DRM_MEM_MAPS 5 139 - #define DRM_MEM_VMAS 6 140 - #define DRM_MEM_BUFS 7 141 - #define DRM_MEM_SEGS 8 142 - #define DRM_MEM_PAGES 9 143 - #define DRM_MEM_FILES 10 144 - #define DRM_MEM_QUEUES 11 145 - #define DRM_MEM_CMDS 12 146 - #define DRM_MEM_MAPPINGS 13 147 - #define DRM_MEM_BUFLISTS 14 148 - #define DRM_MEM_AGPLISTS 15 149 - #define DRM_MEM_TOTALAGP 16 150 - #define DRM_MEM_BOUNDAGP 17 151 - #define DRM_MEM_CTXBITMAP 18 152 - #define DRM_MEM_STUB 19 153 - #define DRM_MEM_SGLISTS 20 154 - #define DRM_MEM_CTXLIST 21 155 - #define DRM_MEM_MM 22 156 - #define DRM_MEM_HASHTAB 23 157 135 158 136 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) 159 137 #define DRM_MAP_HASH_OFFSET 0x10000000 ··· 1489 1517 { 1490 1518 } 1491 1519 1492 - #ifndef DEBUG_MEMORY 1493 - /** Wrapper around kmalloc() */ 1494 - static __inline__ void *drm_alloc(size_t size, int area) 1495 - { 1496 - return kmalloc(size, GFP_KERNEL); 1497 - } 1498 - 1499 - /** Wrapper around kfree() */ 1500 - static __inline__ void drm_free(void *pt, size_t size, int area) 1501 - { 1502 - kfree(pt); 1503 - } 1504 - 1505 - /** Wrapper around kcalloc() */ 1506 - static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area) 1507 - { 1508 - return kcalloc(nmemb, size, GFP_KERNEL); 1509 - } 1510 1520 1511 1521 static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) 1512 1522 { ··· 1509 1555 1510 1556 vfree(ptr); 1511 1557 } 1512 - #else 1513 - extern void *drm_alloc(size_t size, int area); 1514 - extern void drm_free(void *pt, size_t size, int area); 1515 - extern void *drm_calloc(size_t nmemb, size_t size, int area); 1516 - #endif 1517 - 1518 1558 /*@}*/ 1519 1559 1520 1560 #endif /* __KERNEL__ */
-309
include/drm/drm_memory_debug.h
··· 1 - /** 2 - * \file drm_memory_debug.h 3 - * Memory management wrappers for DRM. 4 - * 5 - * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 - * \author Gareth Hughes <gareth@valinux.com> 7 - */ 8 - 9 - /* 10 - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 11 - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 12 - * All Rights Reserved. 13 - * 14 - * Permission is hereby granted, free of charge, to any person obtaining a 15 - * copy of this software and associated documentation files (the "Software"), 16 - * to deal in the Software without restriction, including without limitation 17 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 18 - * and/or sell copies of the Software, and to permit persons to whom the 19 - * Software is furnished to do so, subject to the following conditions: 20 - * 21 - * The above copyright notice and this permission notice (including the next 22 - * paragraph) shall be included in all copies or substantial portions of the 23 - * Software. 24 - * 25 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 28 - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 29 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 30 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 31 - * OTHER DEALINGS IN THE SOFTWARE. 32 - */ 33 - 34 - #include "drmP.h" 35 - 36 - typedef struct drm_mem_stats { 37 - const char *name; 38 - int succeed_count; 39 - int free_count; 40 - int fail_count; 41 - unsigned long bytes_allocated; 42 - unsigned long bytes_freed; 43 - } drm_mem_stats_t; 44 - 45 - static DEFINE_SPINLOCK(drm_mem_lock); 46 - static unsigned long drm_ram_available = 0; /* In pages */ 47 - static unsigned long drm_ram_used = 0; 48 - static drm_mem_stats_t drm_mem_stats[] = 49 - { 50 - [DRM_MEM_DMA] = {"dmabufs"}, 51 - [DRM_MEM_SAREA] = {"sareas"}, 52 - [DRM_MEM_DRIVER] = {"driver"}, 53 - [DRM_MEM_MAGIC] = {"magic"}, 54 - [DRM_MEM_IOCTLS] = {"ioctltab"}, 55 - [DRM_MEM_MAPS] = {"maplist"}, 56 - [DRM_MEM_VMAS] = {"vmalist"}, 57 - [DRM_MEM_BUFS] = {"buflist"}, 58 - [DRM_MEM_SEGS] = {"seglist"}, 59 - [DRM_MEM_PAGES] = {"pagelist"}, 60 - [DRM_MEM_FILES] = {"files"}, 61 - [DRM_MEM_QUEUES] = {"queues"}, 62 - [DRM_MEM_CMDS] = {"commands"}, 63 - [DRM_MEM_MAPPINGS] = {"mappings"}, 64 - [DRM_MEM_BUFLISTS] = {"buflists"}, 65 - [DRM_MEM_AGPLISTS] = {"agplist"}, 66 - [DRM_MEM_SGLISTS] = {"sglist"}, 67 - [DRM_MEM_TOTALAGP] = {"totalagp"}, 68 - [DRM_MEM_BOUNDAGP] = {"boundagp"}, 69 - [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, 70 - [DRM_MEM_CTXLIST] = {"ctxlist"}, 71 - [DRM_MEM_STUB] = {"stub"}, 72 - {NULL, 0,} /* Last entry must be null */ 73 - }; 74 - 75 - void drm_mem_init (void) { 76 - drm_mem_stats_t *mem; 77 - struct sysinfo si; 78 - 79 - for (mem = drm_mem_stats; mem->name; ++mem) { 80 - mem->succeed_count = 0; 81 - mem->free_count = 0; 82 - mem->fail_count = 0; 83 - mem->bytes_allocated = 0; 84 - mem->bytes_freed = 0; 85 - } 86 - 87 - si_meminfo(&si); 88 - drm_ram_available = si.totalram; 89 - drm_ram_used = 0; 90 - } 91 - 92 - /* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 93 - 94 - static int drm__mem_info (char *buf, char **start, off_t offset, 95 - int request, int *eof, void *data) { 96 - drm_mem_stats_t *pt; 97 - int len = 0; 98 - 99 - if (offset > DRM_PROC_LIMIT) { 100 - *eof = 1; 101 - return 0; 102 - } 103 - 104 - *eof = 0; 105 - *start = &buf[offset]; 106 - 107 - DRM_PROC_PRINT(" total counts " 108 - " | outstanding \n"); 109 - DRM_PROC_PRINT("type alloc freed fail bytes freed" 110 - " | allocs bytes\n\n"); 111 - DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 112 - "system", 0, 0, 0, 113 - drm_ram_available << (PAGE_SHIFT - 10)); 114 - DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 115 - "locked", 0, 0, 0, drm_ram_used >> 10); 116 - DRM_PROC_PRINT("\n"); 117 - for (pt = drm_mem_stats; pt->name; pt++) { 118 - DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 119 - pt->name, 120 - pt->succeed_count, 121 - pt->free_count, 122 - pt->fail_count, 123 - pt->bytes_allocated, 124 - pt->bytes_freed, 125 - pt->succeed_count - pt->free_count, 126 - (long)pt->bytes_allocated 127 - - (long)pt->bytes_freed); 128 - } 129 - 130 - if (len > request + offset) 131 - return request; 132 - *eof = 1; 133 - return len - offset; 134 - } 135 - 136 - int drm_mem_info (char *buf, char **start, off_t offset, 137 - int len, int *eof, void *data) { 138 - int ret; 139 - 140 - spin_lock(&drm_mem_lock); 141 - ret = drm__mem_info (buf, start, offset, len, eof, data); 142 - spin_unlock(&drm_mem_lock); 143 - return ret; 144 - } 145 - 146 - void *drm_alloc (size_t size, int area) { 147 - void *pt; 148 - 149 - if (!size) { 150 - DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); 151 - return NULL; 152 - } 153 - 154 - if (!(pt = kmalloc(size, GFP_KERNEL))) { 155 - spin_lock(&drm_mem_lock); 156 - ++drm_mem_stats[area].fail_count; 157 - spin_unlock(&drm_mem_lock); 158 - return NULL; 159 - } 160 - spin_lock(&drm_mem_lock); 161 - ++drm_mem_stats[area].succeed_count; 162 - drm_mem_stats[area].bytes_allocated += size; 163 - spin_unlock(&drm_mem_lock); 164 - return pt; 165 - } 166 - 167 - void *drm_calloc (size_t nmemb, size_t size, int area) { 168 - void *addr; 169 - 170 - addr = drm_alloc (nmemb * size, area); 171 - if (addr != NULL) 172 - memset((void *)addr, 0, size * nmemb); 173 - 174 - return addr; 175 - } 176 - 177 - void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) { 178 - void *pt; 179 - 180 - if (!(pt = drm_alloc (size, area))) 181 - return NULL; 182 - if (oldpt && oldsize) { 183 - memcpy(pt, oldpt, oldsize); 184 - drm_free (oldpt, oldsize, area); 185 - } 186 - return pt; 187 - } 188 - 189 - void drm_free (void *pt, size_t size, int area) { 190 - int alloc_count; 191 - int free_count; 192 - 193 - if (!pt) 194 - DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 195 - else 196 - kfree(pt); 197 - spin_lock(&drm_mem_lock); 198 - drm_mem_stats[area].bytes_freed += size; 199 - free_count = ++drm_mem_stats[area].free_count; 200 - alloc_count = drm_mem_stats[area].succeed_count; 201 - spin_unlock(&drm_mem_lock); 202 - if (free_count > alloc_count) { 203 - DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 204 - free_count, alloc_count); 205 - } 206 - } 207 - 208 - #if __OS_HAS_AGP 209 - 210 - DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { 211 - DRM_AGP_MEM *handle; 212 - 213 - if (!pages) { 214 - DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); 215 - return NULL; 216 - } 217 - 218 - if ((handle = drm_agp_allocate_memory (pages, type))) { 219 - spin_lock(&drm_mem_lock); 220 - ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; 221 - drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated 222 - += pages << PAGE_SHIFT; 223 - spin_unlock(&drm_mem_lock); 224 - return handle; 225 - } 226 - spin_lock(&drm_mem_lock); 227 - ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; 228 - spin_unlock(&drm_mem_lock); 229 - return NULL; 230 - } 231 - 232 - int drm_free_agp (DRM_AGP_MEM * handle, int pages) { 233 - int alloc_count; 234 - int free_count; 235 - int retval = -EINVAL; 236 - 237 - if (!handle) { 238 - DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 239 - "Attempt to free NULL AGP handle\n"); 240 - return retval; 241 - } 242 - 243 - if (drm_agp_free_memory (handle)) { 244 - spin_lock(&drm_mem_lock); 245 - free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; 246 - alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; 247 - drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed 248 - += pages << PAGE_SHIFT; 249 - spin_unlock(&drm_mem_lock); 250 - if (free_count > alloc_count) { 251 - DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 252 - "Excess frees: %d frees, %d allocs\n", 253 - free_count, alloc_count); 254 - } 255 - return 0; 256 - } 257 - return retval; 258 - } 259 - 260 - int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) { 261 - int retcode = -EINVAL; 262 - 263 - if (!handle) { 264 - DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 265 - "Attempt to bind NULL AGP handle\n"); 266 - return retcode; 267 - } 268 - 269 - if (!(retcode = drm_agp_bind_memory (handle, start))) { 270 - spin_lock(&drm_mem_lock); 271 - ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; 272 - drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated 273 - += handle->page_count << PAGE_SHIFT; 274 - spin_unlock(&drm_mem_lock); 275 - return retcode; 276 - } 277 - spin_lock(&drm_mem_lock); 278 - ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; 279 - spin_unlock(&drm_mem_lock); 280 - return retcode; 281 - } 282 - 283 - int drm_unbind_agp (DRM_AGP_MEM * handle) { 284 - int alloc_count; 285 - int free_count; 286 - int retcode = -EINVAL; 287 - 288 - if (!handle) { 289 - DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 290 - "Attempt to unbind NULL AGP handle\n"); 291 - return retcode; 292 - } 293 - 294 - if ((retcode = drm_agp_unbind_memory (handle))) 295 - return retcode; 296 - spin_lock(&drm_mem_lock); 297 - free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; 298 - alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; 299 - drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed 300 - += handle->page_count << PAGE_SHIFT; 301 - spin_unlock(&drm_mem_lock); 302 - if (free_count > alloc_count) { 303 - DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 304 - "Excess frees: %d frees, %d allocs\n", 305 - free_count, alloc_count); 306 - } 307 - return retcode; 308 - } 309 - #endif