Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tree wide: use kvfree() than conditional kfree()/vfree()

There are many locations that do

if (memory_was_allocated_by_vmalloc)
vfree(ptr);
else
kfree(ptr);

but kvfree() can handle both kmalloc()ed memory and vmalloc()ed memory
using is_vmalloc_addr(). Unless callers have special reasons, we can
replace this branch with kvfree(). Please check and reply if you found
problems.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Jan Kara <jack@suse.com>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Acked-by: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Acked-by: David Rientjes <rientjes@google.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Boris Petkov <bp@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Tetsuo Handa and committed by
Linus Torvalds
1d5cfdb0 eab95db6

+36 -103
+2 -9
arch/arm/mm/dma-mapping.c
··· 1200 1200 while (i--) 1201 1201 if (pages[i]) 1202 1202 __free_pages(pages[i], 0); 1203 - if (array_size <= PAGE_SIZE) 1204 - kfree(pages); 1205 - else 1206 - vfree(pages); 1203 + kvfree(pages); 1207 1204 return NULL; 1208 1205 } 1209 1206 ··· 1208 1211 size_t size, struct dma_attrs *attrs) 1209 1212 { 1210 1213 int count = size >> PAGE_SHIFT; 1211 - int array_size = count * sizeof(struct page *); 1212 1214 int i; 1213 1215 1214 1216 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { ··· 1218 1222 __free_pages(pages[i], 0); 1219 1223 } 1220 1224 1221 - if (array_size <= PAGE_SIZE) 1222 - kfree(pages); 1223 - else 1224 - vfree(pages); 1225 + kvfree(pages); 1225 1226 return 0; 1226 1227 } 1227 1228
+2 -4
drivers/acpi/apei/erst.c
··· 32 32 #include <linux/hardirq.h> 33 33 #include <linux/pstore.h> 34 34 #include <linux/vmalloc.h> 35 + #include <linux/mm.h> /* kvfree() */ 35 36 #include <acpi/apei.h> 36 37 37 38 #include "apei-internal.h" ··· 533 532 return -ENOMEM; 534 533 memcpy(new_entries, entries, 535 534 erst_record_id_cache.len * sizeof(entries[0])); 536 - if (erst_record_id_cache.size < PAGE_SIZE) 537 - kfree(entries); 538 - else 539 - vfree(entries); 535 + kvfree(entries); 540 536 erst_record_id_cache.entries = entries = new_entries; 541 537 erst_record_id_cache.size = new_size; 542 538 }
+7 -19
drivers/block/drbd/drbd_bitmap.c
··· 364 364 } 365 365 } 366 366 367 - static void bm_vk_free(void *ptr, int v) 367 + static inline void bm_vk_free(void *ptr) 368 368 { 369 - if (v) 370 - vfree(ptr); 371 - else 372 - kfree(ptr); 369 + kvfree(ptr); 373 370 } 374 371 375 372 /* ··· 376 379 { 377 380 struct page **old_pages = b->bm_pages; 378 381 struct page **new_pages, *page; 379 - unsigned int i, bytes, vmalloced = 0; 382 + unsigned int i, bytes; 380 383 unsigned long have = b->bm_number_of_pages; 381 384 382 385 BUG_ON(have == 0 && old_pages != NULL); ··· 398 401 PAGE_KERNEL); 399 402 if (!new_pages) 400 403 return NULL; 401 - vmalloced = 1; 402 404 } 403 405 404 406 if (want >= have) { ··· 407 411 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 408 412 if (!page) { 409 413 bm_free_pages(new_pages + have, i - have); 410 - bm_vk_free(new_pages, vmalloced); 414 + bm_vk_free(new_pages); 411 415 return NULL; 412 416 } 413 417 /* we want to know which page it is ··· 422 426 bm_free_pages(old_pages + want, have - want); 423 427 */ 424 428 } 425 - 426 - if (vmalloced) 427 - b->bm_flags |= BM_P_VMALLOCED; 428 - else 429 - b->bm_flags &= ~BM_P_VMALLOCED; 430 429 431 430 return new_pages; 432 431 } ··· 460 469 if (!expect(device->bitmap)) 461 470 return; 462 471 bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages); 463 - bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags)); 472 + bm_vk_free(device->bitmap->bm_pages); 464 473 kfree(device->bitmap); 465 474 device->bitmap = NULL; 466 475 } ··· 634 643 unsigned long want, have, onpages; /* number of pages */ 635 644 struct page **npages, **opages = NULL; 636 645 int err = 0, growing; 637 - int opages_vmalloced; 638 646 639 647 if (!expect(b)) 640 648 return -ENOMEM; ··· 645 655 646 656 if (capacity == b->bm_dev_capacity) 647 657 goto out; 648 - 649 - opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); 650 658 651 659 if (capacity == 0) { 652 660 spin_lock_irq(&b->bm_lock); ··· 659 671 b->bm_dev_capacity = 0; 660 672 spin_unlock_irq(&b->bm_lock); 661 673 bm_free_pages(opages, onpages); 662 - bm_vk_free(opages, opages_vmalloced); 674 + bm_vk_free(opages); 663 675 goto out; 664 676 } 665 677 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); ··· 732 744 733 745 spin_unlock_irq(&b->bm_lock); 734 746 if (opages != npages) 735 - bm_vk_free(opages, opages_vmalloced); 747 + bm_vk_free(opages); 736 748 if (!growing) 737 749 b->bm_set = bm_count_bits(b); 738 750 drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
-3
drivers/block/drbd/drbd_int.h
··· 536 536 /* definition of bits in bm_flags to be used in drbd_bm_lock 537 537 * and drbd_bitmap_io and friends. */ 538 538 enum bm_flag { 539 - /* do we need to kfree, or vfree bm_pages? */ 540 - BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ 541 - 542 539 /* currently locked for bulk operation */ 543 540 BM_LOCKED_MASK = 0xf, 544 541
+3 -12
drivers/char/mspec.c
··· 93 93 spinlock_t lock; /* Serialize access to this structure. */ 94 94 int count; /* Number of pages allocated. */ 95 95 enum mspec_page_type type; /* Type of pages allocated. */ 96 - int flags; /* See VMD_xxx below. */ 97 96 unsigned long vm_start; /* Original (unsplit) base. */ 98 97 unsigned long vm_end; /* Original (unsplit) end. */ 99 98 unsigned long maddr[0]; /* Array of MSPEC addresses. */ 100 99 }; 101 - 102 - #define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */ 103 100 104 101 /* used on shub2 to clear FOP cache in the HUB */ 105 102 static unsigned long scratch_page[MAX_NUMNODES]; ··· 182 185 "failed to zero page %ld\n", my_page); 183 186 } 184 187 185 - if (vdata->flags & VMD_VMALLOCED) 186 - vfree(vdata); 187 - else 188 - kfree(vdata); 188 + kvfree(vdata); 189 189 } 190 190 191 191 /* ··· 250 256 enum mspec_page_type type) 251 257 { 252 258 struct vma_data *vdata; 253 - int pages, vdata_size, flags = 0; 259 + int pages, vdata_size; 254 260 255 261 if (vma->vm_pgoff != 0) 256 262 return -EINVAL; ··· 265 271 vdata_size = sizeof(struct vma_data) + pages * sizeof(long); 266 272 if (vdata_size <= PAGE_SIZE) 267 273 vdata = kzalloc(vdata_size, GFP_KERNEL); 268 - else { 274 + else 269 275 vdata = vzalloc(vdata_size); 270 - flags = VMD_VMALLOCED; 271 - } 272 276 if (!vdata) 273 277 return -ENOMEM; 274 278 275 279 vdata->vm_start = vma->vm_start; 276 280 vdata->vm_end = vma->vm_end; 277 - vdata->flags = flags; 278 281 vdata->type = type; 279 282 spin_lock_init(&vdata->lock); 280 283 atomic_set(&vdata->refcnt, 1);
+1 -4
drivers/gpu/drm/drm_hashtab.c
··· 198 198 void drm_ht_remove(struct drm_open_hash *ht) 199 199 { 200 200 if (ht->table) { 201 - if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order) 202 - kfree(ht->table); 203 - else 204 - vfree(ht->table); 201 + kvfree(ht->table); 205 202 ht->table = NULL; 206 203 } 207 204 }
+2 -6
drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
··· 151 151 152 152 #define LIBCFS_FREE(ptr, size) \ 153 153 do { \ 154 - int s = (size); \ 155 154 if (unlikely((ptr) == NULL)) { \ 156 155 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ 157 - "%s:%d\n", s, __FILE__, __LINE__); \ 156 + "%s:%d\n", (int)(size), __FILE__, __LINE__); \ 158 157 break; \ 159 158 } \ 160 - if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \ 161 - vfree(ptr); \ 162 - else \ 163 - kfree(ptr); \ 159 + kvfree(ptr); \ 164 160 } while (0) 165 161 166 162 /******************************************************************************/
+1 -2
fs/coda/coda_linux.h
··· 72 72 } while (0) 73 73 74 74 75 - #define CODA_FREE(ptr,size) \ 76 - do { if (size < PAGE_SIZE) kfree((ptr)); else vfree((ptr)); } while (0) 75 + #define CODA_FREE(ptr, size) kvfree((ptr)) 77 76 78 77 /* inode to cnode access functions */ 79 78
+2 -6
fs/jffs2/build.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/vmalloc.h> 19 19 #include <linux/mtd/mtd.h> 20 + #include <linux/mm.h> /* kvfree() */ 20 21 #include "nodelist.h" 21 22 22 23 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, ··· 384 383 return 0; 385 384 386 385 out_free: 387 - #ifndef __ECOS 388 - if (jffs2_blocks_use_vmalloc(c)) 389 - vfree(c->blocks); 390 - else 391 - #endif 392 - kfree(c->blocks); 386 + kvfree(c->blocks); 393 387 394 388 return ret; 395 389 }
+1 -4
fs/jffs2/fs.c
··· 596 596 out_root: 597 597 jffs2_free_ino_caches(c); 598 598 jffs2_free_raw_node_refs(c); 599 - if (jffs2_blocks_use_vmalloc(c)) 600 - vfree(c->blocks); 601 - else 602 - kfree(c->blocks); 599 + kvfree(c->blocks); 603 600 out_inohash: 604 601 jffs2_clear_xattr_subsystem(c); 605 602 kfree(c->inocache_list);
+1 -4
fs/jffs2/super.c
··· 331 331 332 332 jffs2_free_ino_caches(c); 333 333 jffs2_free_raw_node_refs(c); 334 - if (jffs2_blocks_use_vmalloc(c)) 335 - vfree(c->blocks); 336 - else 337 - kfree(c->blocks); 334 + kvfree(c->blocks); 338 335 jffs2_flash_cleanup(c); 339 336 kfree(c->inocache_list); 340 337 jffs2_clear_xattr_subsystem(c);
+1 -6
fs/udf/super.c
··· 279 279 { 280 280 int i; 281 281 int nr_groups = bitmap->s_nr_groups; 282 - int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * 283 - nr_groups); 284 282 285 283 for (i = 0; i < nr_groups; i++) 286 284 if (bitmap->s_block_bitmap[i]) 287 285 brelse(bitmap->s_block_bitmap[i]); 288 286 289 - if (size <= PAGE_SIZE) 290 - kfree(bitmap); 291 - else 292 - vfree(bitmap); 287 + kvfree(bitmap); 293 288 } 294 289 295 290 static void udf_free_partition(struct udf_part_map *map)
+1 -1
ipc/sem.c
··· 1493 1493 wake_up_sem_queue_do(&tasks); 1494 1494 out_free: 1495 1495 if (sem_io != fast_sem_io) 1496 - ipc_free(sem_io, sizeof(ushort)*nsems); 1496 + ipc_free(sem_io); 1497 1497 return err; 1498 1498 } 1499 1499
+3 -8
ipc/util.c
··· 414 414 /** 415 415 * ipc_free - free ipc space 416 416 * @ptr: pointer returned by ipc_alloc 417 - * @size: size of block 418 417 * 419 - * Free a block created with ipc_alloc(). The caller must know the size 420 - * used in the allocation call. 418 + * Free a block created with ipc_alloc(). 421 419 */ 422 - void ipc_free(void *ptr, int size) 420 + void ipc_free(void *ptr) 423 421 { 424 - if (size > PAGE_SIZE) 425 - vfree(ptr); 426 - else 427 - kfree(ptr); 422 + kvfree(ptr); 428 423 } 429 424 430 425 /**
+1 -1
ipc/util.h
··· 118 118 * both function can sleep 119 119 */ 120 120 void *ipc_alloc(int size); 121 - void ipc_free(void *ptr, int size); 121 + void ipc_free(void *ptr); 122 122 123 123 /* 124 124 * For allocation that need to be freed by RCU.
+7 -11
mm/percpu.c
··· 305 305 /** 306 306 * pcpu_mem_free - free memory 307 307 * @ptr: memory to free 308 - * @size: size of the area 309 308 * 310 309 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 311 310 */ 312 - static void pcpu_mem_free(void *ptr, size_t size) 311 + static void pcpu_mem_free(void *ptr) 313 312 { 314 - if (size <= PAGE_SIZE) 315 - kfree(ptr); 316 - else 317 - vfree(ptr); 313 + kvfree(ptr); 318 314 } 319 315 320 316 /** ··· 459 463 * pcpu_mem_free() might end up calling vfree() which uses 460 464 * IRQ-unsafe lock and thus can't be called under pcpu_lock. 461 465 */ 462 - pcpu_mem_free(old, old_size); 463 - pcpu_mem_free(new, new_size); 466 + pcpu_mem_free(old); 467 + pcpu_mem_free(new); 464 468 465 469 return 0; 466 470 } ··· 728 732 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 729 733 sizeof(chunk->map[0])); 730 734 if (!chunk->map) { 731 - pcpu_mem_free(chunk, pcpu_chunk_struct_size); 735 + pcpu_mem_free(chunk); 732 736 return NULL; 733 737 } 734 738 ··· 749 753 { 750 754 if (!chunk) 751 755 return; 752 - pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 753 - pcpu_mem_free(chunk, pcpu_chunk_struct_size); 756 + pcpu_mem_free(chunk->map); 757 + pcpu_mem_free(chunk); 754 758 } 755 759 756 760 /**
+1 -3
net/ipv4/fib_trie.c
··· 289 289 290 290 if (!n->tn_bits) 291 291 kmem_cache_free(trie_leaf_kmem, n); 292 - else if (n->tn_bits <= TNODE_KMALLOC_MAX) 293 - kfree(n); 294 292 else 295 - vfree(n); 293 + kvfree(n); 296 294 } 297 295 298 296 #define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)