Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IPVS: netns, use ip_vs_proto_data as param.

ip_vs_protocol *pp is replaced by ip_vs_proto_data *pd in
function call in ip_vs_protocol struct i.e. :,
- timeout_change()
- state_transition()

ip_vs_protocol_timeout_change() got ipvs as param, due to above
and a upcoming patch - defence work

Most of this changes are triggered by Julians comment:
"tcp_timeout_change should work with the new struct ip_vs_proto_data
so that tcp_state_table will go to pd->state_table
and set_tcp_state will get pd instead of pp"

*v3
Mostly comments from Julian
The pp -> pd conversion should start from functions like
ip_vs_out() that use pp = ip_vs_proto_get(iph.protocol),
now they should use ip_vs_proto_data_get(net, iph.protocol).
conn_in_get() and conn_out_get() unused param *pp, removed.

*v4
ip_vs_protocol_timeout_change() walk the proto_data path.

Signed-off-by: Hans Schillstrom <hans.schillstrom@ericsson.com>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>

authored by

Hans Schillstrom and committed by
Simon Horman
9330419d 88fe2d37

+129 -110
+6 -12
include/net/ip_vs.h
··· 372 372 void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); 373 373 374 374 int (*conn_schedule)(int af, struct sk_buff *skb, 375 - struct ip_vs_protocol *pp, 375 + struct ip_vs_proto_data *pd, 376 376 int *verdict, struct ip_vs_conn **cpp); 377 377 378 378 struct ip_vs_conn * 379 379 (*conn_in_get)(int af, 380 380 const struct sk_buff *skb, 381 - struct ip_vs_protocol *pp, 382 381 const struct ip_vs_iphdr *iph, 383 382 unsigned int proto_off, 384 383 int inverse); ··· 385 386 struct ip_vs_conn * 386 387 (*conn_out_get)(int af, 387 388 const struct sk_buff *skb, 388 - struct ip_vs_protocol *pp, 389 389 const struct ip_vs_iphdr *iph, 390 390 unsigned int proto_off, 391 391 int inverse); ··· 402 404 403 405 int (*state_transition)(struct ip_vs_conn *cp, int direction, 404 406 const struct sk_buff *skb, 405 - struct ip_vs_protocol *pp); 407 + struct ip_vs_proto_data *pd); 406 408 407 409 int (*register_app)(struct ip_vs_app *inc); 408 410 ··· 415 417 int offset, 416 418 const char *msg); 417 419 418 - void (*timeout_change)(struct ip_vs_protocol *pp, int flags); 419 - 420 - int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to); 420 + void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 421 421 }; 422 422 423 423 /* ··· 774 778 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 775 779 776 780 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 777 - struct ip_vs_protocol *pp, 778 781 const struct ip_vs_iphdr *iph, 779 782 unsigned int proto_off, 780 783 int inverse); ··· 781 786 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 782 787 783 788 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 784 - struct ip_vs_protocol *pp, 785 789 const struct ip_vs_iphdr *iph, 786 790 unsigned int proto_off, 787 791 int inverse); ··· 911 917 */ 912 918 extern int ip_vs_protocol_init(void); 913 919 extern void ip_vs_protocol_cleanup(void); 914 - extern void ip_vs_protocol_timeout_change(int flags); 920 + extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 915 921 extern int *ip_vs_create_timeout_table(int *table, int size); 916 922 extern int 917 923 ip_vs_set_state_timeout(int *table, int num, const char *const *names, ··· 941 947 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 942 948 extern struct ip_vs_conn * 943 949 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 944 - struct ip_vs_protocol *pp, int *ignored); 950 + struct ip_vs_proto_data *pd, int *ignored); 945 951 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 946 - struct ip_vs_protocol *pp); 952 + struct ip_vs_proto_data *pd); 947 953 948 954 949 955 /*
-2
net/netfilter/ipvs/ip_vs_conn.c
··· 329 329 330 330 struct ip_vs_conn * 331 331 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 332 - struct ip_vs_protocol *pp, 333 332 const struct ip_vs_iphdr *iph, 334 333 unsigned int proto_off, int inverse) 335 334 { ··· 427 428 428 429 struct ip_vs_conn * 429 430 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 430 - struct ip_vs_protocol *pp, 431 431 const struct ip_vs_iphdr *iph, 432 432 unsigned int proto_off, int inverse) 433 433 {
+47 -30
net/netfilter/ipvs/ip_vs_core.c
··· 177 177 static inline int 178 178 ip_vs_set_state(struct ip_vs_conn *cp, int direction, 179 179 const struct sk_buff *skb, 180 - struct ip_vs_protocol *pp) 180 + struct ip_vs_proto_data *pd) 181 181 { 182 - if (unlikely(!pp->state_transition)) 182 + if (unlikely(!pd->pp->state_transition)) 183 183 return 0; 184 - return pp->state_transition(cp, direction, skb, pp); 184 + return pd->pp->state_transition(cp, direction, skb, pd); 185 185 } 186 186 187 187 static inline int ··· 378 378 */ 379 379 struct ip_vs_conn * 380 380 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 381 - struct ip_vs_protocol *pp, int *ignored) 381 + struct ip_vs_proto_data *pd, int *ignored) 382 382 { 383 + struct ip_vs_protocol *pp = pd->pp; 383 384 struct ip_vs_conn *cp = NULL; 384 385 struct ip_vs_iphdr iph; 385 386 struct ip_vs_dest *dest; ··· 409 408 * Do not schedule replies from local real server. 410 409 */ 411 410 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && 412 - (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) { 411 + (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) { 413 412 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0, 414 413 "Not scheduling reply for existing connection"); 415 414 __ip_vs_conn_put(cp); ··· 480 479 * no destination is available for a new connection. 481 480 */ 482 481 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 483 - struct ip_vs_protocol *pp) 482 + struct ip_vs_proto_data *pd) 484 483 { 485 484 __be16 _ports[2], *pptr; 486 485 struct ip_vs_iphdr iph; 487 486 int unicast; 487 + 488 488 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 489 489 490 490 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); ··· 532 530 ip_vs_in_stats(cp, skb); 533 531 534 532 /* set state */ 535 - cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp); 533 + cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 536 534 537 535 /* transmit the first SYN packet */ 538 - ret = cp->packet_xmit(skb, cp, pp); 536 + ret = cp->packet_xmit(skb, cp, pd->pp); 539 537 /* do not touch skb anymore */ 540 538 541 539 atomic_inc(&cp->in_pkts); ··· 842 840 843 841 ip_vs_fill_iphdr(AF_INET, cih, &ciph); 844 842 /* The embedded headers contain source and dest in reverse order */ 845 - cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); 843 + cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); 846 844 if (!cp) 847 845 return NF_ACCEPT; 848 846 ··· 919 917 920 918 ip_vs_fill_iphdr(AF_INET6, cih, &ciph); 921 919 /* The embedded headers contain source and dest in reverse order */ 922 - cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); 920 + cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1); 923 921 if (!cp) 924 922 return NF_ACCEPT; 925 923 ··· 958 956 * Used for NAT and local client. 959 957 */ 960 958 static unsigned int 961 - handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 959 + handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 962 960 struct ip_vs_conn *cp, int ihl) 963 961 { 962 + struct ip_vs_protocol *pp = pd->pp; 963 + 964 964 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet"); 965 965 966 966 if (!skb_make_writable(skb, ihl)) ··· 1011 1007 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); 1012 1008 1013 1009 ip_vs_out_stats(cp, skb); 1014 - ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); 1010 + ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd); 1015 1011 skb->ipvs_property = 1; 1016 1012 if (!(cp->flags & IP_VS_CONN_F_NFCT)) 1017 1013 ip_vs_notrack(skb); ··· 1038 1034 struct net *net = NULL; 1039 1035 struct ip_vs_iphdr iph; 1040 1036 struct ip_vs_protocol *pp; 1037 + struct ip_vs_proto_data *pd; 1041 1038 struct ip_vs_conn *cp; 1042 1039 1043 1040 EnterFunction(11); ··· 1084 1079 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1085 1080 } 1086 1081 1087 - pp = ip_vs_proto_get(iph.protocol); 1088 - if (unlikely(!pp)) 1082 + pd = ip_vs_proto_data_get(net, iph.protocol); 1083 + if (unlikely(!pd)) 1089 1084 return NF_ACCEPT; 1085 + pp = pd->pp; 1090 1086 1091 1087 /* reassemble IP fragments */ 1092 1088 #ifdef CONFIG_IP_VS_IPV6 ··· 1113 1107 /* 1114 1108 * Check if the packet belongs to an existing entry 1115 1109 */ 1116 - cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); 1110 + cp = pp->conn_out_get(af, skb, &iph, iph.len, 0); 1117 1111 1118 1112 if (likely(cp)) 1119 - return handle_response(af, skb, pp, cp, iph.len); 1113 + return handle_response(af, skb, pd, cp, iph.len); 1120 1114 if (sysctl_ip_vs_nat_icmp_send && 1121 1115 (pp->protocol == IPPROTO_TCP || 1122 1116 pp->protocol == IPPROTO_UDP || ··· 1242 1236 static int 1243 1237 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) 1244 1238 { 1239 + struct net *net = NULL; 1245 1240 struct iphdr *iph; 1246 1241 struct icmphdr _icmph, *ic; 1247 1242 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ 1248 1243 struct ip_vs_iphdr ciph; 1249 1244 struct ip_vs_conn *cp; 1250 1245 struct ip_vs_protocol *pp; 1246 + struct ip_vs_proto_data *pd; 1251 1247 unsigned int offset, ihl, verdict; 1252 1248 union nf_inet_addr snet; 1253 1249 ··· 1291 1283 if (cih == NULL) 1292 1284 return NF_ACCEPT; /* The packet looks wrong, ignore */ 1293 1285 1294 - pp = ip_vs_proto_get(cih->protocol); 1295 - if (!pp) 1286 + net = skb_net(skb); 1287 + pd = ip_vs_proto_data_get(net, cih->protocol); 1288 + if (!pd) 1296 1289 return NF_ACCEPT; 1290 + pp = pd->pp; 1297 1291 1298 1292 /* Is the embedded protocol header present? */ 1299 1293 if (unlikely(cih->frag_off & htons(IP_OFFSET) && ··· 1309 1299 1310 1300 ip_vs_fill_iphdr(AF_INET, cih, &ciph); 1311 1301 /* The embedded headers contain source and dest in reverse order */ 1312 - cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1); 1302 + cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1); 1313 1303 if (!cp) { 1314 1304 /* The packet could also belong to a local client */ 1315 - cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); 1305 + cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); 1316 1306 if (cp) { 1317 1307 snet.ip = iph->saddr; 1318 1308 return handle_response_icmp(AF_INET, skb, &snet, ··· 1356 1346 static int 1357 1347 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) 1358 1348 { 1349 + struct net *net = NULL; 1359 1350 struct ipv6hdr *iph; 1360 1351 struct icmp6hdr _icmph, *ic; 1361 1352 struct ipv6hdr _ciph, *cih; /* The ip header contained ··· 1364 1353 struct ip_vs_iphdr ciph; 1365 1354 struct ip_vs_conn *cp; 1366 1355 struct ip_vs_protocol *pp; 1356 + struct ip_vs_proto_data *pd; 1367 1357 unsigned int offset, verdict; 1368 1358 union nf_inet_addr snet; 1369 1359 struct rt6_info *rt; ··· 1407 1395 if (cih == NULL) 1408 1396 return NF_ACCEPT; /* The packet looks wrong, ignore */ 1409 1397 1410 - pp = ip_vs_proto_get(cih->nexthdr); 1411 - if (!pp) 1398 + net = skb_net(skb); 1399 + pd = ip_vs_proto_data_get(net, cih->nexthdr); 1400 + if (!pd) 1412 1401 return NF_ACCEPT; 1402 + pp = pd->pp; 1413 1403 1414 1404 /* Is the embedded protocol header present? */ 1415 1405 /* TODO: we don't support fragmentation at the moment anyways */ ··· 1425 1411 1426 1412 ip_vs_fill_iphdr(AF_INET6, cih, &ciph); 1427 1413 /* The embedded headers contain source and dest in reverse order */ 1428 - cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1); 1414 + cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1); 1429 1415 if (!cp) { 1430 1416 /* The packet could also belong to a local client */ 1431 - cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); 1417 + cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1); 1432 1418 if (cp) { 1433 1419 ipv6_addr_copy(&snet.in6, &iph->saddr); 1434 1420 return handle_response_icmp(AF_INET6, skb, &snet, ··· 1471 1457 static unsigned int 1472 1458 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) 1473 1459 { 1460 + struct net *net = NULL; 1474 1461 struct ip_vs_iphdr iph; 1475 1462 struct ip_vs_protocol *pp; 1463 + struct ip_vs_proto_data *pd; 1476 1464 struct ip_vs_conn *cp; 1477 1465 int ret, restart, pkts; 1478 1466 ··· 1530 1514 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1531 1515 } 1532 1516 1517 + net = skb_net(skb); 1533 1518 /* Protocol supported? */ 1534 - pp = ip_vs_proto_get(iph.protocol); 1535 - if (unlikely(!pp)) 1519 + pd = ip_vs_proto_data_get(net, iph.protocol); 1520 + if (unlikely(!pd)) 1536 1521 return NF_ACCEPT; 1537 - 1522 + pp = pd->pp; 1538 1523 /* 1539 1524 * Check if the packet belongs to an existing connection entry 1540 1525 */ 1541 - cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0); 1526 + cp = pp->conn_in_get(af, skb, &iph, iph.len, 0); 1542 1527 1543 1528 if (unlikely(!cp)) { 1544 1529 int v; 1545 1530 1546 - if (!pp->conn_schedule(af, skb, pp, &v, &cp)) 1531 + if (!pp->conn_schedule(af, skb, pd, &v, &cp)) 1547 1532 return v; 1548 1533 } 1549 1534 ··· 1572 1555 } 1573 1556 1574 1557 ip_vs_in_stats(cp, skb); 1575 - restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp); 1558 + restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 1576 1559 if (cp->packet_xmit) 1577 1560 ret = cp->packet_xmit(skb, cp, pp); 1578 1561 /* do not touch skb anymore */
+34 -21
net/netfilter/ipvs/ip_vs_ctl.c
··· 38 38 #include <linux/mutex.h> 39 39 40 40 #include <net/net_namespace.h> 41 + #include <linux/nsproxy.h> 41 42 #include <net/ip.h> 42 43 #ifdef CONFIG_IP_VS_IPV6 43 44 #include <net/ipv6.h> ··· 126 125 * update_defense_level is called from keventd and from sysctl, 127 126 * so it needs to protect itself from softirqs 128 127 */ 129 - static void update_defense_level(void) 128 + static void update_defense_level(struct netns_ipvs *ipvs) 130 129 { 131 130 struct sysinfo i; 132 131 static int old_secure_tcp = 0; ··· 240 239 } 241 240 old_secure_tcp = sysctl_ip_vs_secure_tcp; 242 241 if (to_change >= 0) 243 - ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1); 242 + ip_vs_protocol_timeout_change(ipvs, 243 + sysctl_ip_vs_secure_tcp > 1); 244 244 spin_unlock(&ip_vs_securetcp_lock); 245 245 246 246 local_bh_enable(); ··· 257 255 258 256 static void defense_work_handler(struct work_struct *work) 259 257 { 260 - update_defense_level(); 258 + struct net *net = &init_net; 259 + struct netns_ipvs *ipvs = net_ipvs(net); 260 + 261 + update_defense_level(ipvs); 261 262 if (atomic_read(&ip_vs_dropentry)) 262 263 ip_vs_random_dropentry(); 263 264 ··· 1507 1502 proc_do_defense_mode(ctl_table *table, int write, 1508 1503 void __user *buffer, size_t *lenp, loff_t *ppos) 1509 1504 { 1505 + struct net *net = current->nsproxy->net_ns; 1510 1506 int *valp = table->data; 1511 1507 int val = *valp; 1512 1508 int rc; ··· 1518 1512 /* Restore the correct value */ 1519 1513 *valp = val; 1520 1514 } else { 1521 - update_defense_level(); 1515 + update_defense_level(net_ipvs(net)); 1522 1516 } 1523 1517 } 1524 1518 return rc; ··· 2039 2033 /* 2040 2034 * Set timeout values for tcp tcpfin udp in the timeout_table. 2041 2035 */ 2042 - static int ip_vs_set_timeout(struct ip_vs_timeout_user *u) 2036 + static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u) 2043 2037 { 2038 + struct ip_vs_proto_data *pd; 2039 + 2044 2040 IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n", 2045 2041 u->tcp_timeout, 2046 2042 u->tcp_fin_timeout, ··· 2050 2042 2051 2043 #ifdef CONFIG_IP_VS_PROTO_TCP 2052 2044 if (u->tcp_timeout) { 2053 - ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] 2045 + pd = ip_vs_proto_data_get(net, IPPROTO_TCP); 2046 + pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] 2054 2047 = u->tcp_timeout * HZ; 2055 2048 } 2056 2049 2057 2050 if (u->tcp_fin_timeout) { 2058 - ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] 2051 + pd = ip_vs_proto_data_get(net, IPPROTO_TCP); 2052 + pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] 2059 2053 = u->tcp_fin_timeout * HZ; 2060 2054 } 2061 2055 #endif 2062 2056 2063 2057 #ifdef CONFIG_IP_VS_PROTO_UDP 2064 2058 if (u->udp_timeout) { 2065 - ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] 2059 + pd = ip_vs_proto_data_get(net, IPPROTO_UDP); 2060 + pd->timeout_table[IP_VS_UDP_S_NORMAL] 2066 2061 = u->udp_timeout * HZ; 2067 2062 } 2068 2063 #endif ··· 2169 2158 goto out_unlock; 2170 2159 } else if (cmd == IP_VS_SO_SET_TIMEOUT) { 2171 2160 /* Set timeout values for (tcp tcpfin udp) */ 2172 - ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg); 2161 + ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); 2173 2162 goto out_unlock; 2174 2163 } else if (cmd == IP_VS_SO_SET_STARTDAEMON) { 2175 2164 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; ··· 2381 2370 } 2382 2371 2383 2372 static inline void 2384 - __ip_vs_get_timeouts(struct ip_vs_timeout_user *u) 2373 + __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u) 2385 2374 { 2375 + struct ip_vs_proto_data *pd; 2376 + 2386 2377 #ifdef CONFIG_IP_VS_PROTO_TCP 2387 - u->tcp_timeout = 2388 - ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; 2389 - u->tcp_fin_timeout = 2390 - ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ; 2378 + pd = ip_vs_proto_data_get(net, IPPROTO_TCP); 2379 + u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; 2380 + u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ; 2391 2381 #endif 2392 2382 #ifdef CONFIG_IP_VS_PROTO_UDP 2383 + pd = ip_vs_proto_data_get(net, IPPROTO_UDP); 2393 2384 u->udp_timeout = 2394 - ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ; 2385 + pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ; 2395 2386 #endif 2396 2387 } 2397 2388 ··· 2534 2521 { 2535 2522 struct ip_vs_timeout_user t; 2536 2523 2537 - __ip_vs_get_timeouts(&t); 2524 + __ip_vs_get_timeouts(net, &t); 2538 2525 if (copy_to_user(user, &t, sizeof(t)) != 0) 2539 2526 ret = -EFAULT; 2540 2527 } ··· 3105 3092 return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); 3106 3093 } 3107 3094 3108 - static int ip_vs_genl_set_config(struct nlattr **attrs) 3095 + static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs) 3109 3096 { 3110 3097 struct ip_vs_timeout_user t; 3111 3098 3112 - __ip_vs_get_timeouts(&t); 3099 + __ip_vs_get_timeouts(net, &t); 3113 3100 3114 3101 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]) 3115 3102 t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]); ··· 3121 3108 if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]) 3122 3109 t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]); 3123 3110 3124 - return ip_vs_set_timeout(&t); 3111 + return ip_vs_set_timeout(net, &t); 3125 3112 } 3126 3113 3127 3114 static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) ··· 3142 3129 ret = ip_vs_flush(net); 3143 3130 goto out; 3144 3131 } else if (cmd == IPVS_CMD_SET_CONFIG) { 3145 - ret = ip_vs_genl_set_config(info->attrs); 3132 + ret = ip_vs_genl_set_config(net, info->attrs); 3146 3133 goto out; 3147 3134 } else if (cmd == IPVS_CMD_NEW_DAEMON || 3148 3135 cmd == IPVS_CMD_DEL_DAEMON) { ··· 3294 3281 { 3295 3282 struct ip_vs_timeout_user t; 3296 3283 3297 - __ip_vs_get_timeouts(&t); 3284 + __ip_vs_get_timeouts(net, &t); 3298 3285 #ifdef CONFIG_IP_VS_PROTO_TCP 3299 3286 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); 3300 3287 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
+14 -7
net/netfilter/ipvs/ip_vs_proto.c
··· 152 152 * get ip_vs_protocol object data by netns and proto 153 153 */ 154 154 struct ip_vs_proto_data * 155 - ip_vs_proto_data_get(struct net *net, unsigned short proto) 155 + __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) 156 156 { 157 - struct netns_ipvs *ipvs = net_ipvs(net); 158 157 struct ip_vs_proto_data *pd; 159 158 unsigned hash = IP_VS_PROTO_HASH(proto); 160 159 ··· 164 165 165 166 return NULL; 166 167 } 168 + 169 + struct ip_vs_proto_data * 170 + ip_vs_proto_data_get(struct net *net, unsigned short proto) 171 + { 172 + struct netns_ipvs *ipvs = net_ipvs(net); 173 + 174 + return __ipvs_proto_data_get(ipvs, proto); 175 + } 167 176 EXPORT_SYMBOL(ip_vs_proto_data_get); 168 177 169 178 /* 170 179 * Propagate event for state change to all protocols 171 180 */ 172 - void ip_vs_protocol_timeout_change(int flags) 181 + void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags) 173 182 { 174 - struct ip_vs_protocol *pp; 183 + struct ip_vs_proto_data *pd; 175 184 int i; 176 185 177 186 for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { 178 - for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) { 179 - if (pp->timeout_change) 180 - pp->timeout_change(pp, flags); 187 + for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) { 188 + if (pd->pp->timeout_change) 189 + pd->pp->timeout_change(pd, flags); 181 190 } 182 191 } 183 192 }
+4 -6
net/netfilter/ipvs/ip_vs_proto_ah_esp.c
··· 55 55 } 56 56 57 57 static struct ip_vs_conn * 58 - ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, 58 + ah_esp_conn_in_get(int af, const struct sk_buff *skb, 59 59 const struct ip_vs_iphdr *iph, unsigned int proto_off, 60 60 int inverse) 61 61 { ··· 72 72 IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet " 73 73 "%s%s %s->%s\n", 74 74 inverse ? "ICMP+" : "", 75 - pp->name, 75 + ip_vs_proto_get(iph->protocol)->name, 76 76 IP_VS_DBG_ADDR(af, &iph->saddr), 77 77 IP_VS_DBG_ADDR(af, &iph->daddr)); 78 78 } ··· 83 83 84 84 static struct ip_vs_conn * 85 85 ah_esp_conn_out_get(int af, const struct sk_buff *skb, 86 - struct ip_vs_protocol *pp, 87 86 const struct ip_vs_iphdr *iph, 88 87 unsigned int proto_off, 89 88 int inverse) ··· 96 97 IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet " 97 98 "%s%s %s->%s\n", 98 99 inverse ? "ICMP+" : "", 99 - pp->name, 100 + ip_vs_proto_get(iph->protocol)->name, 100 101 IP_VS_DBG_ADDR(af, &iph->saddr), 101 102 IP_VS_DBG_ADDR(af, &iph->daddr)); 102 103 } ··· 106 107 107 108 108 109 static int 109 - ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 110 + ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 110 111 int *verdict, struct ip_vs_conn **cpp) 111 112 { 112 113 /* ··· 136 137 .app_conn_bind = NULL, 137 138 .debug_packet = ip_vs_tcpudp_debug_packet, 138 139 .timeout_change = NULL, /* ISAKMP */ 139 - .set_state_timeout = NULL, 140 140 }; 141 141 #endif 142 142
+7 -9
net/netfilter/ipvs/ip_vs_proto_sctp.c
··· 9 9 #include <net/ip_vs.h> 10 10 11 11 static int 12 - sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 12 + sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 13 13 int *verdict, struct ip_vs_conn **cpp) 14 14 { 15 15 struct net *net; ··· 47 47 * Let the virtual server select a real server for the 48 48 * incoming connection, and create a connection entry. 49 49 */ 50 - *cpp = ip_vs_schedule(svc, skb, pp, &ignored); 50 + *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 51 51 if (!*cpp && ignored <= 0) { 52 52 if (!ignored) 53 - *verdict = ip_vs_leave(svc, skb, pp); 53 + *verdict = ip_vs_leave(svc, skb, pd); 54 54 else { 55 55 ip_vs_service_put(svc); 56 56 *verdict = NF_DROP; ··· 907 907 } 908 908 909 909 static inline int 910 - set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, 910 + set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, 911 911 int direction, const struct sk_buff *skb) 912 912 { 913 913 sctp_chunkhdr_t _sctpch, *sch; 914 914 unsigned char chunk_type; 915 915 int event, next_state; 916 916 int ihl; 917 - struct ip_vs_proto_data *pd; 918 917 919 918 #ifdef CONFIG_IP_VS_IPV6 920 919 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); ··· 965 966 966 967 IP_VS_DBG_BUF(8, "%s %s %s:%d->" 967 968 "%s:%d state: %s->%s conn->refcnt:%d\n", 968 - pp->name, 969 + pd->pp->name, 969 970 ((direction == IP_VS_DIR_OUTPUT) ? 970 971 "output " : "input "), 971 972 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ··· 989 990 } 990 991 } 991 992 } 992 - pd = ip_vs_proto_data_get(&init_net, pp->protocol); /* tmp fix */ 993 993 if (likely(pd)) 994 994 cp->timeout = pd->timeout_table[cp->state = next_state]; 995 995 else /* What to do ? */ ··· 999 1001 1000 1002 static int 1001 1003 sctp_state_transition(struct ip_vs_conn *cp, int direction, 1002 - const struct sk_buff *skb, struct ip_vs_protocol *pp) 1004 + const struct sk_buff *skb, struct ip_vs_proto_data *pd) 1003 1005 { 1004 1006 int ret = 0; 1005 1007 1006 1008 spin_lock(&cp->lock); 1007 - ret = set_sctp_state(pp, cp, direction, skb); 1009 + ret = set_sctp_state(pd, cp, direction, skb); 1008 1010 spin_unlock(&cp->lock); 1009 1011 1010 1012 return ret;
+12 -15
net/netfilter/ipvs/ip_vs_proto_tcp.c
··· 32 32 #include <net/ip_vs.h> 33 33 34 34 static int 35 - tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 35 + tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 36 36 int *verdict, struct ip_vs_conn **cpp) 37 37 { 38 38 struct net *net; ··· 68 68 * Let the virtual server select a real server for the 69 69 * incoming connection, and create a connection entry. 70 70 */ 71 - *cpp = ip_vs_schedule(svc, skb, pp, &ignored); 71 + *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 72 72 if (!*cpp && ignored <= 0) { 73 73 if (!ignored) 74 - *verdict = ip_vs_leave(svc, skb, pp); 74 + *verdict = ip_vs_leave(svc, skb, pd); 75 75 else { 76 76 ip_vs_service_put(svc); 77 77 *verdict = NF_DROP; ··· 448 448 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, 449 449 }; 450 450 451 - static struct tcp_states_t *tcp_state_table = tcp_states; 452 - 453 - 454 - static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags) 451 + static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) 455 452 { 456 453 int on = (flags & 1); /* secure_tcp */ 457 454 ··· 458 461 ** for most if not for all of the applications. Something 459 462 ** like "capabilities" (flags) for each object. 460 463 */ 461 - tcp_state_table = (on? tcp_states_dos : tcp_states); 464 + pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); 462 465 } 463 466 464 467 static inline int tcp_state_idx(struct tcphdr *th) ··· 475 478 } 476 479 477 480 static inline void 478 - set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, 481 + set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, 479 482 int direction, struct tcphdr *th) 480 483 { 481 484 int state_idx; 482 485 int new_state = IP_VS_TCP_S_CLOSE; 483 486 int state_off = tcp_state_off[direction]; 484 - struct ip_vs_proto_data *pd; /* Temp fix */ 485 487 486 488 /* 487 489 * Update state offset to INPUT_ONLY if necessary ··· 498 502 goto tcp_state_out; 499 503 } 500 504 501 - new_state = tcp_state_table[state_off+state_idx].next_state[cp->state]; 505 + new_state = 506 + pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; 502 507 503 508 tcp_state_out: 504 509 if (new_state != cp->state) { ··· 507 510 508 511 IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->" 509 512 "%s:%d state: %s->%s conn->refcnt:%d\n", 510 - pp->name, 513 + pd->pp->name, 511 514 ((state_off == TCP_DIR_OUTPUT) ? 512 515 "output " : "input "), 513 516 th->syn ? 'S' : '.', ··· 537 540 } 538 541 } 539 542 540 - pd = ip_vs_proto_data_get(&init_net, pp->protocol); 541 543 if (likely(pd)) 542 544 cp->timeout = pd->timeout_table[cp->state = new_state]; 543 545 else /* What to do ? */ ··· 549 553 static int 550 554 tcp_state_transition(struct ip_vs_conn *cp, int direction, 551 555 const struct sk_buff *skb, 552 - struct ip_vs_protocol *pp) 556 + struct ip_vs_proto_data *pd) 553 557 { 554 558 struct tcphdr _tcph, *th; 555 559 ··· 564 568 return 0; 565 569 566 570 spin_lock(&cp->lock); 567 - set_tcp_state(pp, cp, direction, th); 571 + set_tcp_state(pd, cp, direction, th); 568 572 spin_unlock(&cp->lock); 569 573 570 574 return 1; ··· 687 691 spin_lock_init(&ipvs->tcp_app_lock); 688 692 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 689 693 sizeof(tcp_timeouts)); 694 + pd->tcp_state_table = tcp_states; 690 695 } 691 696 692 697 static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
+4 -7
net/netfilter/ipvs/ip_vs_proto_udp.c
··· 29 29 #include <net/ip6_checksum.h> 30 30 31 31 static int 32 - udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 32 + udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 33 33 int *verdict, struct ip_vs_conn **cpp) 34 34 { 35 35 struct net *net; ··· 64 64 * Let the virtual server select a real server for the 65 65 * incoming connection, and create a connection entry. 66 66 */ 67 - *cpp = ip_vs_schedule(svc, skb, pp, &ignored); 67 + *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 68 68 if (!*cpp && ignored <= 0) { 69 69 if (!ignored) 70 - *verdict = ip_vs_leave(svc, skb, pp); 70 + *verdict = ip_vs_leave(svc, skb, pd); 71 71 else { 72 72 ip_vs_service_put(svc); 73 73 *verdict = NF_DROP; ··· 457 457 static int 458 458 udp_state_transition(struct ip_vs_conn *cp, int direction, 459 459 const struct sk_buff *skb, 460 - struct ip_vs_protocol *pp) 460 + struct ip_vs_proto_data *pd) 461 461 { 462 - struct ip_vs_proto_data *pd; /* Temp fix, pp will be replaced by pd */ 463 - 464 - pd = ip_vs_proto_data_get(&init_net, IPPROTO_UDP); 465 462 if (unlikely(!pd)) { 466 463 pr_err("UDP no ns data\n"); 467 464 return 0;
+1 -1
net/netfilter/xt_ipvs.c
··· 85 85 /* 86 86 * Check if the packet belongs to an existing entry 87 87 */ 88 - cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */); 88 + cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */); 89 89 if (unlikely(cp == NULL)) { 90 90 match = false; 91 91 goto out;