Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netprio_cgroup: use cgroup->id instead of cgroup_netprio_state->prioidx

With priomap expansion no longer depending on knowing max id
allocated, netprio_cgroup can use cgroup->id insted of cs->prioidx.
Drop prioidx alloc/free logic and convert all uses to cgroup->id.

* In cgrp_css_alloc(), parent->id test is moved above @cs allocation
to simplify error path.

* In cgrp_css_free(), @cs assignment is made initialization.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-and-Acked-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
Acked-by: David S. Miller <davem@davemloft.net>

Tejun Heo 88d642fa 4a6ee25c

+15 -61
+4 -7
include/net/netprio_cgroup.h
··· 27 27 28 28 struct cgroup_netprio_state { 29 29 struct cgroup_subsys_state css; 30 - u32 prioidx; 31 30 }; 32 31 33 32 extern void sock_update_netprioidx(struct sock *sk, struct task_struct *task); ··· 35 36 36 37 static inline u32 task_netprioidx(struct task_struct *p) 37 38 { 38 - struct cgroup_netprio_state *state; 39 + struct cgroup_subsys_state *css; 39 40 u32 idx; 40 41 41 42 rcu_read_lock(); 42 - state = container_of(task_subsys_state(p, net_prio_subsys_id), 43 - struct cgroup_netprio_state, css); 44 - idx = state->prioidx; 43 + css = task_subsys_state(p, net_prio_subsys_id); 44 + idx = css->cgroup->id; 45 45 rcu_read_unlock(); 46 46 return idx; 47 47 } ··· 55 57 rcu_read_lock(); 56 58 css = task_subsys_state(p, net_prio_subsys_id); 57 59 if (css) 58 - idx = container_of(css, 59 - struct cgroup_netprio_state, css)->prioidx; 60 + idx = css->cgroup->id; 60 61 rcu_read_unlock(); 61 62 return idx; 62 63 }
+11 -54
net/core/netprio_cgroup.c
··· 28 28 #include <linux/fdtable.h> 29 29 30 30 #define PRIOMAP_MIN_SZ 128 31 - #define PRIOIDX_SZ 128 32 - 33 - static unsigned long prioidx_map[PRIOIDX_SZ]; 34 - static DEFINE_SPINLOCK(prioidx_map_lock); 35 31 36 32 static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) 37 33 { 38 34 return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id), 39 35 struct cgroup_netprio_state, css); 40 - } 41 - 42 - static int get_prioidx(u32 *prio) 43 - { 44 - unsigned long flags; 45 - u32 prioidx; 46 - 47 - spin_lock_irqsave(&prioidx_map_lock, flags); 48 - prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 49 - if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) { 50 - spin_unlock_irqrestore(&prioidx_map_lock, flags); 51 - return -ENOSPC; 52 - } 53 - set_bit(prioidx, prioidx_map); 54 - spin_unlock_irqrestore(&prioidx_map_lock, flags); 55 - *prio = prioidx; 56 - return 0; 57 - } 58 - 59 - static void put_prioidx(u32 idx) 60 - { 61 - unsigned long flags; 62 - 63 - spin_lock_irqsave(&prioidx_map_lock, flags); 64 - clear_bit(idx, prioidx_map); 65 - spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 36 } 67 37 68 38 /* ··· 90 120 static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp) 91 121 { 92 122 struct cgroup_netprio_state *cs; 93 - int ret = -EINVAL; 123 + 124 + if (cgrp->parent && cgrp->parent->id) 125 + return ERR_PTR(-EINVAL); 94 126 95 127 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 96 128 if (!cs) 97 129 return ERR_PTR(-ENOMEM); 98 130 99 - if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) 100 - goto out; 101 - 102 - ret = get_prioidx(&cs->prioidx); 103 - if (ret < 0) { 104 - pr_warn("No space in priority index array\n"); 105 - goto out; 106 - } 107 - 108 131 return &cs->css; 109 - out: 110 - kfree(cs); 111 - return ERR_PTR(ret); 112 132 } 113 133 114 134 static void cgrp_css_free(struct cgroup *cgrp) 115 135 { 116 - struct cgroup_netprio_state *cs; 136 + struct cgroup_netprio_state *cs = cgrp_netprio_state(cgrp); 117 137 struct net_device *dev; 118 138 struct netprio_map *map; 119 139 120 - cs = cgrp_netprio_state(cgrp); 121 140 rtnl_lock(); 122 141 for_each_netdev(&init_net, dev) { 123 142 map = rtnl_dereference(dev->priomap); 124 - if (map && cs->prioidx < map->priomap_len) 125 - map->priomap[cs->prioidx] = 0; 143 + if (map && cgrp->id < map->priomap_len) 144 + map->priomap[cgrp->id] = 0; 126 145 } 127 146 rtnl_unlock(); 128 - put_prioidx(cs->prioidx); 129 147 kfree(cs); 130 148 } 131 149 132 150 static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) 133 151 { 134 - return (u64)cgrp_netprio_state(cgrp)->prioidx; 152 + return cgrp->id; 135 153 } 136 154 137 155 static int read_priomap(struct cgroup *cont, struct cftype *cft, 138 156 struct cgroup_map_cb *cb) 139 157 { 140 158 struct net_device *dev; 141 - u32 prioidx = cgrp_netprio_state(cont)->prioidx; 159 + u32 id = cont->id; 142 160 u32 priority; 143 161 struct netprio_map *map; 144 162 145 163 rcu_read_lock(); 146 164 for_each_netdev_rcu(&init_net, dev) { 147 165 map = rcu_dereference(dev->priomap); 148 - priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0; 166 + priority = (map && id < map->priomap_len) ? map->priomap[id] : 0; 149 167 cb->fill(cb, dev->name, priority); 150 168 } 151 169 rcu_read_unlock(); ··· 143 185 static int write_priomap(struct cgroup *cgrp, struct cftype *cft, 144 186 const char *buffer) 145 187 { 146 - u32 prioidx = cgrp_netprio_state(cgrp)->prioidx; 147 188 char devname[IFNAMSIZ + 1]; 148 189 struct net_device *dev; 149 190 struct netprio_map *map; ··· 158 201 159 202 rtnl_lock(); 160 203 161 - ret = extend_netdev_table(dev, prioidx); 204 + ret = extend_netdev_table(dev, cgrp->id); 162 205 if (ret) 163 206 goto out_unlock; 164 207 165 208 map = rtnl_dereference(dev->priomap); 166 209 if (map) 167 - map->priomap[prioidx] = prio; 210 + map->priomap[cgrp->id] = prio; 168 211 out_unlock: 169 212 rtnl_unlock(); 170 213 dev_put(dev);