Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: nf_flow_table_offload: add IPv6 support

Add nf_flow_rule_route_ipv6() and use it from the IPv6 and the inet
flowtable type definitions. Rename the nf_flow_rule_route() function to
nf_flow_rule_route_ipv4().

Adjust maximum number of actions, which now becomes 16 to leave
sufficient room for the IPv6 address mangling for NAT.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

+127 -11
+6 -3
include/net/netfilter/nf_flow_table.h
··· 163 163 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, 164 164 struct net_device *dev, 165 165 enum flow_block_command cmd); 166 - int nf_flow_rule_route(struct net *net, const struct flow_offload *flow, 167 - enum flow_offload_tuple_dir dir, 168 - struct nf_flow_rule *flow_rule); 166 + int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, 167 + enum flow_offload_tuple_dir dir, 168 + struct nf_flow_rule *flow_rule); 169 + int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow, 170 + enum flow_offload_tuple_dir dir, 171 + struct nf_flow_rule *flow_rule); 169 172 170 173 int nf_flow_table_offload_init(void); 171 174 void nf_flow_table_offload_exit(void);
+1 -1
net/ipv4/netfilter/nf_flow_table_ipv4.c
··· 10 10 .family = NFPROTO_IPV4, 11 11 .init = nf_flow_table_init, 12 12 .setup = nf_flow_table_offload_setup, 13 - .action = nf_flow_rule_route, 13 + .action = nf_flow_rule_route_ipv4, 14 14 .free = nf_flow_table_free, 15 15 .hook = nf_flow_offload_ip_hook, 16 16 .owner = THIS_MODULE,
+1 -1
net/ipv6/netfilter/nf_flow_table_ipv6.c
··· 11 11 .family = NFPROTO_IPV6, 12 12 .init = nf_flow_table_init, 13 13 .setup = nf_flow_table_offload_setup, 14 - .action = nf_flow_rule_route, 14 + .action = nf_flow_rule_route_ipv6, 15 15 .free = nf_flow_table_free, 16 16 .hook = nf_flow_offload_ipv6_hook, 17 17 .owner = THIS_MODULE,
+24 -1
net/netfilter/nf_flow_table_inet.c
··· 21 21 return NF_ACCEPT; 22 22 } 23 23 24 + static int nf_flow_rule_route_inet(struct net *net, 25 + const struct flow_offload *flow, 26 + enum flow_offload_tuple_dir dir, 27 + struct nf_flow_rule *flow_rule) 28 + { 29 + const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; 30 + int err; 31 + 32 + switch (flow_tuple->l3proto) { 33 + case NFPROTO_IPV4: 34 + err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule); 35 + break; 36 + case NFPROTO_IPV6: 37 + err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule); 38 + break; 39 + default: 40 + err = -1; 41 + break; 42 + } 43 + 44 + return err; 45 + } 46 + 24 47 static struct nf_flowtable_type flowtable_inet = { 25 48 .family = NFPROTO_INET, 26 49 .init = nf_flow_table_init, 27 50 .setup = nf_flow_table_offload_setup, 28 - .action = nf_flow_rule_route, 51 + .action = nf_flow_rule_route_inet, 29 52 .free = nf_flow_table_free, 30 53 .hook = nf_flow_offload_inet_hook, 31 54 .owner = THIS_MODULE,
+95 -5
net/netfilter/nf_flow_table_offload.c
··· 236 236 (u8 *)&addr, (u8 *)&mask); 237 237 } 238 238 239 + static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, 240 + unsigned int offset, 241 + u8 *addr, u8 *mask) 242 + { 243 + struct flow_action_entry *entry; 244 + int i; 245 + 246 + for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) { 247 + entry = flow_action_entry_next(flow_rule); 248 + flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, 249 + offset + i, 250 + &addr[i], mask); 251 + } 252 + } 253 + 254 + static void flow_offload_ipv6_snat(struct net *net, 255 + const struct flow_offload *flow, 256 + enum flow_offload_tuple_dir dir, 257 + struct nf_flow_rule *flow_rule) 258 + { 259 + u32 mask = ~htonl(0xffffffff); 260 + const u8 *addr; 261 + u32 offset; 262 + 263 + switch (dir) { 264 + case FLOW_OFFLOAD_DIR_ORIGINAL: 265 + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr; 266 + offset = offsetof(struct ipv6hdr, saddr); 267 + break; 268 + case FLOW_OFFLOAD_DIR_REPLY: 269 + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr; 270 + offset = offsetof(struct ipv6hdr, daddr); 271 + break; 272 + default: 273 + return; 274 + } 275 + 276 + flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); 277 + } 278 + 279 + static void flow_offload_ipv6_dnat(struct net *net, 280 + const struct flow_offload *flow, 281 + enum flow_offload_tuple_dir dir, 282 + struct nf_flow_rule *flow_rule) 283 + { 284 + u32 mask = ~htonl(0xffffffff); 285 + const u8 *addr; 286 + u32 offset; 287 + 288 + switch (dir) { 289 + case FLOW_OFFLOAD_DIR_ORIGINAL: 290 + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr; 291 + offset = offsetof(struct ipv6hdr, daddr); 292 + break; 293 + case FLOW_OFFLOAD_DIR_REPLY: 294 + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr; 295 + offset = offsetof(struct ipv6hdr, saddr); 296 + break; 297 + default: 298 + return; 299 + } 300 + 301 + flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); 302 + } 303 + 239 304 static int flow_offload_l4proto(const struct flow_offload *flow) 240 305 { 241 306 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto; ··· 407 342 dev_hold(rt->dst.dev); 408 343 } 409 344 410 - int nf_flow_rule_route(struct net *net, const struct flow_offload *flow, 411 - enum flow_offload_tuple_dir dir, 412 - struct nf_flow_rule *flow_rule) 345 + int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, 346 + enum flow_offload_tuple_dir dir, 347 + struct nf_flow_rule *flow_rule) 413 348 { 414 349 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 || 415 350 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) ··· 431 366 432 367 return 0; 433 368 } 434 - EXPORT_SYMBOL_GPL(nf_flow_rule_route); 369 + EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4); 370 + 371 + int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow, 372 + enum flow_offload_tuple_dir dir, 373 + struct nf_flow_rule *flow_rule) 374 + { 375 + if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 || 376 + flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) 377 + return -1; 378 + 379 + if (flow->flags & FLOW_OFFLOAD_SNAT) { 380 + flow_offload_ipv6_snat(net, flow, dir, flow_rule); 381 + flow_offload_port_snat(net, flow, dir, flow_rule); 382 + } 383 + if (flow->flags & FLOW_OFFLOAD_DNAT) { 384 + flow_offload_ipv6_dnat(net, flow, dir, flow_rule); 385 + flow_offload_port_dnat(net, flow, dir, flow_rule); 386 + } 387 + 388 + flow_offload_redirect(flow, dir, flow_rule); 389 + 390 + return 0; 391 + } 392 + EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6); 393 + 394 + #define NF_FLOW_RULE_ACTION_MAX 16 435 395 436 396 static struct nf_flow_rule * 437 397 nf_flow_offload_rule_alloc(struct net *net, ··· 473 383 if (!flow_rule) 474 384 goto err_flow; 475 385 476 - flow_rule->rule = flow_rule_alloc(10); 386 + flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX); 477 387 if (!flow_rule->rule) 478 388 goto err_flow_rule; 479 389