Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: nf_flow_table_offload: add flow_action_entry_next() and use it

This function retrieves a spare action entry from the array of actions.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

+38 -38
+38 -38
net/netfilter/nf_flow_table_offload.c
··· 112 112 memcpy(&entry->mangle.val, value, sizeof(u32)); 113 113 } 114 114 115 + static inline struct flow_action_entry * 116 + flow_action_entry_next(struct nf_flow_rule *flow_rule) 117 + { 118 + int i = flow_rule->rule->action.num_entries++; 119 + 120 + return &flow_rule->rule->action.entries[i]; 121 + } 122 + 115 123 static int flow_offload_eth_src(struct net *net, 116 124 const struct flow_offload *flow, 117 125 enum flow_offload_tuple_dir dir, 118 - struct flow_action_entry *entry0, 119 - struct flow_action_entry *entry1) 126 + struct nf_flow_rule *flow_rule) 120 127 { 121 128 const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple; 129 + struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); 130 + struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); 122 131 struct net_device *dev; 123 132 u32 mask, val; 124 133 u16 val16; ··· 154 145 static int flow_offload_eth_dst(struct net *net, 155 146 const struct flow_offload *flow, 156 147 enum flow_offload_tuple_dir dir, 157 - struct flow_action_entry *entry0, 158 - struct flow_action_entry *entry1) 148 + struct nf_flow_rule *flow_rule) 159 149 { 160 150 const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple; 151 + struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); 152 + struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); 161 153 struct neighbour *n; 162 154 u32 mask, val; 163 155 u16 val16; ··· 185 175 static void flow_offload_ipv4_snat(struct net *net, 186 176 const struct flow_offload *flow, 187 177 enum flow_offload_tuple_dir dir, 188 - struct flow_action_entry *entry) 178 + struct nf_flow_rule *flow_rule) 189 179 { 180 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 190 181 u32 mask = ~htonl(0xffffffff); 191 182 __be32 addr; 192 183 u32 offset; ··· 212 201 static void flow_offload_ipv4_dnat(struct net *net, 213 202 const struct flow_offload *flow, 214 203 enum flow_offload_tuple_dir dir, 215 - struct flow_action_entry *entry) 204 + struct nf_flow_rule *flow_rule) 216 205 { 206 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 217 207 u32 mask = ~htonl(0xffffffff); 218 208 __be32 addr; 219 209 u32 offset; ··· 258 246 static void flow_offload_port_snat(struct net *net, 259 247 const struct flow_offload *flow, 260 248 enum flow_offload_tuple_dir dir, 261 - struct flow_action_entry *entry) 249 + struct nf_flow_rule *flow_rule) 262 250 { 251 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 263 252 u32 mask = ~htonl(0xffff0000); 264 253 __be16 port; 265 254 u32 offset; ··· 285 272 static void flow_offload_port_dnat(struct net *net, 286 273 const struct flow_offload *flow, 287 274 enum flow_offload_tuple_dir dir, 288 - struct flow_action_entry *entry) 275 + struct nf_flow_rule *flow_rule) 289 276 { 277 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 290 278 u32 mask = ~htonl(0xffff); 291 279 __be16 port; 292 280 u32 offset; ··· 311 297 312 298 static void flow_offload_ipv4_checksum(struct net *net, 313 299 const struct flow_offload *flow, 314 - struct flow_action_entry *entry) 300 + struct nf_flow_rule *flow_rule) 315 301 { 316 302 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto; 303 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 317 304 318 305 entry->id = FLOW_ACTION_CSUM; 319 306 entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR; ··· 331 316 332 317 static void flow_offload_redirect(const struct flow_offload *flow, 333 318 enum flow_offload_tuple_dir dir, 334 - struct flow_action_entry *entry) 319 + struct nf_flow_rule *flow_rule) 335 320 { 321 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 336 322 struct rtable *rt; 337 323 338 324 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache; ··· 346 330 enum flow_offload_tuple_dir dir, 347 331 struct nf_flow_rule *flow_rule) 348 332 { 349 - int i; 350 - 351 - if (flow_offload_eth_src(net, flow, dir, 352 - &flow_rule->rule->action.entries[0], 353 - &flow_rule->rule->action.entries[1]) < 0) 333 + if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 || 334 + flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) 354 335 return -1; 355 336 356 - if (flow_offload_eth_dst(net, flow, dir, 357 - &flow_rule->rule->action.entries[2], 358 - &flow_rule->rule->action.entries[3]) < 0) 359 - return -1; 360 - 361 - i = 4; 362 337 if (flow->flags & FLOW_OFFLOAD_SNAT) { 363 - flow_offload_ipv4_snat(net, flow, dir, 364 - &flow_rule->rule->action.entries[i++]); 365 - flow_offload_port_snat(net, flow, dir, 366 - &flow_rule->rule->action.entries[i++]); 338 + flow_offload_ipv4_snat(net, flow, dir, flow_rule); 339 + flow_offload_port_snat(net, flow, dir, flow_rule); 367 340 } 368 341 if (flow->flags & FLOW_OFFLOAD_DNAT) { 369 - flow_offload_ipv4_dnat(net, flow, dir, 370 - &flow_rule->rule->action.entries[i++]); 371 - flow_offload_port_dnat(net, flow, dir, 372 - &flow_rule->rule->action.entries[i++]); 342 + flow_offload_ipv4_dnat(net, flow, dir, flow_rule); 343 + flow_offload_port_dnat(net, flow, dir, flow_rule); 373 344 } 374 345 if (flow->flags & FLOW_OFFLOAD_SNAT || 375 346 flow->flags & FLOW_OFFLOAD_DNAT) 376 - flow_offload_ipv4_checksum(net, flow, 377 - &flow_rule->rule->action.entries[i++]); 347 + flow_offload_ipv4_checksum(net, flow, flow_rule); 378 348 379 - flow_offload_redirect(flow, dir, &flow_rule->rule->action.entries[i++]); 349 + flow_offload_redirect(flow, dir, flow_rule); 380 350 381 - return i; 351 + return 0; 382 352 } 383 353 EXPORT_SYMBOL_GPL(nf_flow_rule_route); 384 354 ··· 377 375 const struct flow_offload *flow = offload->flow; 378 376 const struct flow_offload_tuple *tuple; 379 377 struct nf_flow_rule *flow_rule; 380 - int err = -ENOMEM, num_actions; 378 + int err = -ENOMEM; 381 379 382 380 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL); 383 381 if (!flow_rule) ··· 396 394 if (err < 0) 397 395 goto err_flow_match; 398 396 399 - num_actions = flowtable->type->action(net, flow, dir, flow_rule); 400 - if (num_actions < 0) 397 + flow_rule->rule->action.num_entries = 0; 398 + if (flowtable->type->action(net, flow, dir, flow_rule) < 0) 401 399 goto err_flow_match; 402 - 403 - flow_rule->rule->action.num_entries = num_actions; 404 400 405 401 return flow_rule; 406 402