Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
rcu: Fix wrong check in list_splice_init_rcu()
net,rcu: Convert call_rcu(xt_rateest_free_rcu) to kfree_rcu()
sysctl,rcu: Convert call_rcu(free_head) to kfree
vmalloc,rcu: Convert call_rcu(rcu_free_vb) to kfree_rcu()
vmalloc,rcu: Convert call_rcu(rcu_free_va) to kfree_rcu()
ipc,rcu: Convert call_rcu(ipc_immediate_free) to kfree_rcu()
ipc,rcu: Convert call_rcu(free_un) to kfree_rcu()
security,rcu: Convert call_rcu(sel_netport_free) to kfree_rcu()
security,rcu: Convert call_rcu(sel_netnode_free) to kfree_rcu()
ia64,rcu: Convert call_rcu(sn_irq_info_free) to kfree_rcu()
block,rcu: Convert call_rcu(disk_free_ptbl_rcu_cb) to kfree_rcu()
scsi,rcu: Convert call_rcu(fc_rport_free_rcu) to kfree_rcu()
audit_tree,rcu: Convert call_rcu(__put_tree) to kfree_rcu()
security,rcu: Convert call_rcu(whitelist_item_free) to kfree_rcu()
md,rcu: Convert call_rcu(free_conf) to kfree_rcu()

+21 -148
+2 -12
arch/ia64/sn/kernel/irq.c
··· 112 112 irq_move_irq(data); 113 113 } 114 114 115 - static void sn_irq_info_free(struct rcu_head *head); 116 - 117 115 struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, 118 116 nasid_t nasid, int slice) 119 117 { ··· 175 177 spin_lock(&sn_irq_info_lock); 176 178 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); 177 179 spin_unlock(&sn_irq_info_lock); 178 - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 180 + kfree_rcu(sn_irq_info, rcu); 179 181 180 182 181 183 finish_up: ··· 336 338 rcu_read_unlock(); 337 339 } 338 340 339 - static void sn_irq_info_free(struct rcu_head *head) 340 - { 341 - struct sn_irq_info *sn_irq_info; 342 - 343 - sn_irq_info = container_of(head, struct sn_irq_info, rcu); 344 - kfree(sn_irq_info); 345 - } 346 - 347 341 void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) 348 342 { 349 343 nasid_t nasid = sn_irq_info->irq_nasid; ··· 389 399 spin_unlock(&sn_irq_info_lock); 390 400 if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) 391 401 free_irq_vector(sn_irq_info->irq_irq); 392 - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 402 + kfree_rcu(sn_irq_info, rcu); 393 403 pci_dev_put(pci_dev); 394 404 395 405 }
+1 -9
block/genhd.c
··· 1018 1018 NULL 1019 1019 }; 1020 1020 1021 - static void disk_free_ptbl_rcu_cb(struct rcu_head *head) 1022 - { 1023 - struct disk_part_tbl *ptbl = 1024 - container_of(head, struct disk_part_tbl, rcu_head); 1025 - 1026 - kfree(ptbl); 1027 - } 1028 - 1029 1021 /** 1030 1022 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way 1031 1023 * @disk: disk to replace part_tbl for ··· 1038 1046 1039 1047 if (old_ptbl) { 1040 1048 rcu_assign_pointer(old_ptbl->last_lookup, NULL); 1041 - call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); 1049 + kfree_rcu(old_ptbl, rcu_head); 1042 1050 } 1043 1051 } 1044 1052
+1 -7
drivers/md/linear.c
··· 213 213 return md_integrity_register(mddev); 214 214 } 215 215 216 - static void free_conf(struct rcu_head *head) 217 - { 218 - linear_conf_t *conf = container_of(head, linear_conf_t, rcu); 219 - kfree(conf); 220 - } 221 - 222 216 static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) 223 217 { 224 218 /* Adding a drive to a linear array allows the array to grow. ··· 241 247 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 242 248 set_capacity(mddev->gendisk, mddev->array_sectors); 243 249 revalidate_disk(mddev->gendisk); 244 - call_rcu(&oldconf->rcu, free_conf); 250 + kfree_rcu(oldconf, rcu); 245 251 return 0; 246 252 } 247 253
+1 -13
drivers/scsi/libfc/fc_rport.c
··· 153 153 } 154 154 155 155 /** 156 - * fc_rport_free_rcu() - Free a remote port 157 - * @rcu: The rcu_head structure inside the remote port 158 - */ 159 - static void fc_rport_free_rcu(struct rcu_head *rcu) 160 - { 161 - struct fc_rport_priv *rdata; 162 - 163 - rdata = container_of(rcu, struct fc_rport_priv, rcu); 164 - kfree(rdata); 165 - } 166 - 167 - /** 168 156 * fc_rport_destroy() - Free a remote port after last reference is released 169 157 * @kref: The remote port's kref 170 158 */ ··· 161 173 struct fc_rport_priv *rdata; 162 174 163 175 rdata = container_of(kref, struct fc_rport_priv, kref); 164 - call_rcu(&rdata->rcu, fc_rport_free_rcu); 176 + kfree_rcu(rdata, rcu); 165 177 } 166 178 167 179 /**
+1 -1
include/linux/rculist.h
··· 183 183 struct list_head *last = list->prev; 184 184 struct list_head *at = head->next; 185 185 186 - if (list_empty(head)) 186 + if (list_empty(list)) 187 187 return; 188 188 189 189 /* "first" and "last" tracking list, so initialize it. */
+2 -8
ipc/sem.c
··· 689 689 return semzcnt; 690 690 } 691 691 692 - static void free_un(struct rcu_head *head) 693 - { 694 - struct sem_undo *un = container_of(head, struct sem_undo, rcu); 695 - kfree(un); 696 - } 697 - 698 692 /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked 699 693 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex 700 694 * remains locked on exit. ··· 708 714 un->semid = -1; 709 715 list_del_rcu(&un->list_proc); 710 716 spin_unlock(&un->ulp->lock); 711 - call_rcu(&un->rcu, free_un); 717 + kfree_rcu(un, rcu); 712 718 } 713 719 714 720 /* Wake up all pending processes and let them fail with EIDRM. */ ··· 1606 1612 sem_unlock(sma); 1607 1613 wake_up_sem_queue_do(&tasks); 1608 1614 1609 - call_rcu(&un->rcu, free_un); 1615 + kfree_rcu(un, rcu); 1610 1616 } 1611 1617 kfree(ulp); 1612 1618 }
+1 -15
ipc/util.c
··· 579 579 schedule_work(&sched->work); 580 580 } 581 581 582 - /** 583 - * ipc_immediate_free - free ipc + rcu space 584 - * @head: RCU callback structure that contains pointer to be freed 585 - * 586 - * Free from the RCU callback context. 587 - */ 588 - static void ipc_immediate_free(struct rcu_head *head) 589 - { 590 - struct ipc_rcu_grace *free = 591 - container_of(head, struct ipc_rcu_grace, rcu); 592 - kfree(free); 593 - } 594 - 595 582 void ipc_rcu_putref(void *ptr) 596 583 { 597 584 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) ··· 588 601 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 589 602 ipc_schedule_free); 590 603 } else { 591 - call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 592 - ipc_immediate_free); 604 + kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu); 593 605 } 594 606 } 595 607
+1 -7
kernel/audit_tree.c
··· 93 93 atomic_inc(&tree->count); 94 94 } 95 95 96 - static void __put_tree(struct rcu_head *rcu) 97 - { 98 - struct audit_tree *tree = container_of(rcu, struct audit_tree, head); 99 - kfree(tree); 100 - } 101 - 102 96 static inline void put_tree(struct audit_tree *tree) 103 97 { 104 98 if (atomic_dec_and_test(&tree->count)) 105 - call_rcu(&tree->head, __put_tree); 99 + kfree_rcu(tree, head); 106 100 } 107 101 108 102 /* to avoid bringing the entire thing in audit.h */
+3 -8
kernel/sysctl.c
··· 1590 1590 spin_unlock(&sysctl_lock); 1591 1591 } 1592 1592 1593 - static void free_head(struct rcu_head *rcu) 1594 - { 1595 - kfree(container_of(rcu, struct ctl_table_header, rcu)); 1596 - } 1597 - 1598 1593 void sysctl_head_put(struct ctl_table_header *head) 1599 1594 { 1600 1595 spin_lock(&sysctl_lock); 1601 1596 if (!--head->count) 1602 - call_rcu(&head->rcu, free_head); 1597 + kfree_rcu(head, rcu); 1603 1598 spin_unlock(&sysctl_lock); 1604 1599 } 1605 1600 ··· 1966 1971 start_unregistering(header); 1967 1972 if (!--header->parent->count) { 1968 1973 WARN_ON(1); 1969 - call_rcu(&header->parent->rcu, free_head); 1974 + kfree_rcu(header->parent, rcu); 1970 1975 } 1971 1976 if (!--header->count) 1972 - call_rcu(&header->rcu, free_head); 1977 + kfree_rcu(header, rcu); 1973 1978 spin_unlock(&sysctl_lock); 1974 1979 } 1975 1980
+2 -16
mm/vmalloc.c
··· 452 452 return ERR_PTR(-EBUSY); 453 453 } 454 454 455 - static void rcu_free_va(struct rcu_head *head) 456 - { 457 - struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 458 - 459 - kfree(va); 460 - } 461 - 462 455 static void __free_vmap_area(struct vmap_area *va) 463 456 { 464 457 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); ··· 484 491 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 485 492 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 486 493 487 - call_rcu(&va->rcu_head, rcu_free_va); 494 + kfree_rcu(va, rcu_head); 488 495 } 489 496 490 497 /* ··· 830 837 return vb; 831 838 } 832 839 833 - static void rcu_free_vb(struct rcu_head *head) 834 - { 835 - struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 836 - 837 - kfree(vb); 838 - } 839 - 840 840 static void free_vmap_block(struct vmap_block *vb) 841 841 { 842 842 struct vmap_block *tmp; ··· 842 856 BUG_ON(tmp != vb); 843 857 844 858 free_vmap_area_noflush(vb->va); 845 - call_rcu(&vb->rcu_head, rcu_free_vb); 859 + kfree_rcu(vb, rcu_head); 846 860 } 847 861 848 862 static void purge_fragmented_blocks(int cpu)
+1 -7
net/netfilter/xt_RATEEST.c
··· 60 60 } 61 61 EXPORT_SYMBOL_GPL(xt_rateest_lookup); 62 62 63 - static void xt_rateest_free_rcu(struct rcu_head *head) 64 - { 65 - kfree(container_of(head, struct xt_rateest, rcu)); 66 - } 67 - 68 63 void xt_rateest_put(struct xt_rateest *est) 69 64 { 70 65 mutex_lock(&xt_rateest_mutex); ··· 70 75 * gen_estimator est_timer() might access est->lock or bstats, 71 76 * wait a RCU grace period before freeing 'est' 72 77 */ 73 - call_rcu(&est->rcu, xt_rateest_free_rcu); 78 + kfree_rcu(est, rcu); 74 79 } 75 80 mutex_unlock(&xt_rateest_mutex); 76 81 } ··· 183 188 static void __exit xt_rateest_tg_fini(void) 184 189 { 185 190 xt_unregister_target(&xt_rateest_tg_reg); 186 - rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */ 187 191 } 188 192 189 193
+1 -9
security/device_cgroup.c
··· 125 125 return 0; 126 126 } 127 127 128 - static void whitelist_item_free(struct rcu_head *rcu) 129 - { 130 - struct dev_whitelist_item *item; 131 - 132 - item = container_of(rcu, struct dev_whitelist_item, rcu); 133 - kfree(item); 134 - } 135 - 136 128 /* 137 129 * called under devcgroup_mutex 138 130 */ ··· 147 155 walk->access &= ~wh->access; 148 156 if (!walk->access) { 149 157 list_del_rcu(&walk->list); 150 - call_rcu(&walk->rcu, whitelist_item_free); 158 + kfree_rcu(walk, rcu); 151 159 } 152 160 } 153 161 }
+2 -18
security/selinux/netnode.c
··· 69 69 static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE]; 70 70 71 71 /** 72 - * sel_netnode_free - Frees a node entry 73 - * @p: the entry's RCU field 74 - * 75 - * Description: 76 - * This function is designed to be used as a callback to the call_rcu() 77 - * function so that memory allocated to a hash table node entry can be 78 - * released safely. 79 - * 80 - */ 81 - static void sel_netnode_free(struct rcu_head *p) 82 - { 83 - struct sel_netnode *node = container_of(p, struct sel_netnode, rcu); 84 - kfree(node); 85 - } 86 - 87 - /** 88 72 * sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table 89 73 * @addr: IPv4 address 90 74 * ··· 177 193 rcu_dereference(sel_netnode_hash[idx].list.prev), 178 194 struct sel_netnode, list); 179 195 list_del_rcu(&tail->list); 180 - call_rcu(&tail->rcu, sel_netnode_free); 196 + kfree_rcu(tail, rcu); 181 197 } else 182 198 sel_netnode_hash[idx].size++; 183 199 } ··· 290 306 list_for_each_entry_safe(node, node_tmp, 291 307 &sel_netnode_hash[idx].list, list) { 292 308 list_del_rcu(&node->list); 293 - call_rcu(&node->rcu, sel_netnode_free); 309 + kfree_rcu(node, rcu); 294 310 } 295 311 sel_netnode_hash[idx].size = 0; 296 312 }
+2 -18
security/selinux/netport.c
··· 68 68 static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE]; 69 69 70 70 /** 71 - * sel_netport_free - Frees a port entry 72 - * @p: the entry's RCU field 73 - * 74 - * Description: 75 - * This function is designed to be used as a callback to the call_rcu() 76 - * function so that memory allocated to a hash table port entry can be 77 - * released safely. 78 - * 79 - */ 80 - static void sel_netport_free(struct rcu_head *p) 81 - { 82 - struct sel_netport *port = container_of(p, struct sel_netport, rcu); 83 - kfree(port); 84 - } 85 - 86 - /** 87 71 * sel_netport_hashfn - Hashing function for the port table 88 72 * @pnum: port number 89 73 * ··· 126 142 rcu_dereference(sel_netport_hash[idx].list.prev), 127 143 struct sel_netport, list); 128 144 list_del_rcu(&tail->list); 129 - call_rcu(&tail->rcu, sel_netport_free); 145 + kfree_rcu(tail, rcu); 130 146 } else 131 147 sel_netport_hash[idx].size++; 132 148 } ··· 225 241 list_for_each_entry_safe(port, port_tmp, 226 242 &sel_netport_hash[idx].list, list) { 227 243 list_del_rcu(&port->list); 228 - call_rcu(&port->rcu, sel_netport_free); 244 + kfree_rcu(port, rcu); 229 245 } 230 246 sel_netport_hash[idx].size = 0; 231 247 }