Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netprio_cgroup: reimplement priomap expansion

netprio kept track of the highest prioidx allocated and resized
priomaps accordingly when necessary. This makes it necessary to keep
track of prioidx allocation and may end up resizing on every new
prioidx.

Update extend_netdev_table() such that it takes @target_idx which the
priomap should be able to accomodate. If the priomap is large enough,
nothing happens; otherwise, the size is doubled until @target_idx can
be accomodated.

This makes max_prioidx and write_update_netdev_table() unnecessary.
write_priomap() now calls extend_netdev_table() directly.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-and-Acked-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
Acked-by: David S. Miller <davem@davemloft.net>

Tejun Heo 4a6ee25c 52bca930

+33 -23
+33 -23
net/core/netprio_cgroup.c
··· 27 27 28 28 #include <linux/fdtable.h> 29 29 30 + #define PRIOMAP_MIN_SZ 128 30 31 #define PRIOIDX_SZ 128 31 32 32 33 static unsigned long prioidx_map[PRIOIDX_SZ]; 33 34 static DEFINE_SPINLOCK(prioidx_map_lock); 34 - static atomic_t max_prioidx = ATOMIC_INIT(0); 35 35 36 36 static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) 37 37 { ··· 51 51 return -ENOSPC; 52 52 } 53 53 set_bit(prioidx, prioidx_map); 54 - if (atomic_read(&max_prioidx) < prioidx) 55 - atomic_set(&max_prioidx, prioidx); 56 54 spin_unlock_irqrestore(&prioidx_map_lock, flags); 57 55 *prio = prioidx; 58 56 return 0; ··· 65 67 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 68 } 67 69 68 - static int extend_netdev_table(struct net_device *dev, u32 new_len) 70 + /* 71 + * Extend @dev->priomap so that it's large enough to accomodate 72 + * @target_idx. @dev->priomap.priomap_len > @target_idx after successful 73 + * return. Must be called under rtnl lock. 74 + */ 75 + static int extend_netdev_table(struct net_device *dev, u32 target_idx) 69 76 { 70 - size_t new_size = sizeof(struct netprio_map) + 71 - ((sizeof(u32) * new_len)); 72 - struct netprio_map *new = kzalloc(new_size, GFP_KERNEL); 73 - struct netprio_map *old; 77 + struct netprio_map *old, *new; 78 + size_t new_sz, new_len; 74 79 80 + /* is the existing priomap large enough? */ 75 81 old = rtnl_dereference(dev->priomap); 82 + if (old && old->priomap_len > target_idx) 83 + return 0; 76 84 85 + /* 86 + * Determine the new size. Let's keep it power-of-two. We start 87 + * from PRIOMAP_MIN_SZ and double it until it's large enough to 88 + * accommodate @target_idx. 89 + */ 90 + new_sz = PRIOMAP_MIN_SZ; 91 + while (true) { 92 + new_len = (new_sz - offsetof(struct netprio_map, priomap)) / 93 + sizeof(new->priomap[0]); 94 + if (new_len > target_idx) 95 + break; 96 + new_sz *= 2; 97 + /* overflowed? */ 98 + if (WARN_ON(new_sz < PRIOMAP_MIN_SZ)) 99 + return -ENOSPC; 100 + } 101 + 102 + /* allocate & copy */ 103 + new = kzalloc(new_sz, GFP_KERNEL); 77 104 if (!new) { 78 105 pr_warn("Unable to alloc new priomap!\n"); 79 106 return -ENOMEM; ··· 110 87 111 88 new->priomap_len = new_len; 112 89 90 + /* install the new priomap */ 113 91 rcu_assign_pointer(dev->priomap, new); 114 92 if (old) 115 93 kfree_rcu(old, rcu); 116 94 return 0; 117 - } 118 - 119 - static int write_update_netdev_table(struct net_device *dev) 120 - { 121 - int ret = 0; 122 - u32 max_len; 123 - struct netprio_map *map; 124 - 125 - max_len = atomic_read(&max_prioidx) + 1; 126 - map = rtnl_dereference(dev->priomap); 127 - if (!map || map->priomap_len < max_len) 128 - ret = extend_netdev_table(dev, max_len); 129 - 130 - return ret; 131 95 } 132 96 133 97 static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp) ··· 201 191 202 192 rtnl_lock(); 203 193 204 - ret = write_update_netdev_table(dev); 194 + ret = extend_netdev_table(dev, prioidx); 205 195 if (ret) 206 196 goto out_unlock; 207 197