Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ice-pfcp-filter'

Alexander Lobakin says:

====================
ice: add PFCP filter support

Add support for creating PFCP filters in switchdev mode. Add pfcp module
that allows to create a PFCP-type netdev. The netdev then can be passed to
tc when creating a filter to indicate that PFCP filter should be created.

To add a PFCP filter, a special netdev must be created and passed to tc
command:

ip link add pfcp0 type pfcp
tc filter add dev eth0 ingress prio 1 flower pfcp_opts \
1:12ab/ff:fffffffffffffff0 skip_hw action mirred egress redirect \
dev pfcp0

Changes in iproute2 [1] are required to use pfcp_opts in tc.

ICE COMMS package is required as it contains PFCP profiles.

Part of this patchset modifies IP_TUNNEL_*_OPTs, which were previously
stored in a __be16. All possible values have already been used, making
it impossible to add new ones.

* 1-3: add new bitmap_{read,write}(), which is used later in the IP
tunnel flags code (from Alexander's ARM64 MTE series[2]);
* 4-14: some bitmap code preparations also used later in IP tunnels;
* 15-17: convert IP tunnel flags from __be16 to a bitmap;
* 18-21: add PFCP module and support for it in ice.

[1] https://lore.kernel.org/netdev/20230614091758.11180-1-marcin.szycik@linux.intel.com
[2] https://lore.kernel.org/linux-kernel/20231218124033.551770-1-glider@google.com
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1984 -620
-5
drivers/md/dm-clone-metadata.c
··· 465 465 466 466 /*---------------------------------------------------------------------------*/ 467 467 468 - static size_t bitmap_size(unsigned long nr_bits) 469 - { 470 - return BITS_TO_LONGS(nr_bits) * sizeof(long); 471 - } 472 - 473 468 static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words, 474 469 unsigned long nr_regions) 475 470 {
+13
drivers/net/Kconfig
··· 290 290 To compile this drivers as a module, choose M here: the module 291 291 will be called gtp. 292 292 293 + config PFCP 294 + tristate "Packet Forwarding Control Protocol (PFCP)" 295 + depends on INET 296 + select NET_UDP_TUNNEL 297 + help 298 + This allows one to create PFCP virtual interfaces that allows to 299 + set up software and hardware offload of PFCP packets. 300 + Note that this module does not support PFCP protocol in the kernel space. 301 + There is no support for parsing any PFCP messages. 302 + 303 + To compile this drivers as a module, choose M here: the module 304 + will be called pfcp. 305 + 293 306 config AMT 294 307 tristate "Automatic Multicast Tunneling (AMT)" 295 308 depends on INET && IP_MULTICAST
+1
drivers/net/Makefile
··· 38 38 obj-$(CONFIG_BAREUDP) += bareudp.o 39 39 obj-$(CONFIG_GTP) += gtp.o 40 40 obj-$(CONFIG_NLMON) += nlmon.o 41 + obj-$(CONFIG_PFCP) += pfcp.o 41 42 obj-$(CONFIG_NET_VRF) += vrf.o 42 43 obj-$(CONFIG_VSOCKMON) += vsockmon.o 43 44 obj-$(CONFIG_MHI_NET) += mhi_net.o
+13 -6
drivers/net/bareudp.c
··· 61 61 static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 62 62 { 63 63 struct metadata_dst *tun_dst = NULL; 64 + IP_TUNNEL_DECLARE_FLAGS(key) = { }; 64 65 struct bareudp_dev *bareudp; 65 66 unsigned short family; 66 67 unsigned int len; ··· 138 137 bareudp->dev->stats.rx_dropped++; 139 138 goto drop; 140 139 } 141 - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); 140 + 141 + __set_bit(IP_TUNNEL_KEY_BIT, key); 142 + 143 + tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0); 142 144 if (!tun_dst) { 143 145 bareudp->dev->stats.rx_dropped++; 144 146 goto drop; ··· 289 285 struct bareudp_dev *bareudp, 290 286 const struct ip_tunnel_info *info) 291 287 { 288 + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 292 289 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); 293 290 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 294 291 struct socket *sock = rcu_dereference(bareudp->sock); 295 - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 296 292 const struct ip_tunnel_key *key = &info->key; 297 293 struct rtable *rt; 298 294 __be16 sport, df; ··· 320 316 321 317 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 322 318 ttl = key->ttl; 323 - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 319 + df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? 320 + htons(IP_DF) : 0; 324 321 skb_scrub_packet(skb, xnet); 325 322 326 323 err = -ENOSPC; ··· 343 338 udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, 344 339 tos, ttl, df, sport, bareudp->port, 345 340 !net_eq(bareudp->net, dev_net(bareudp->dev)), 346 - !(info->key.tun_flags & TUNNEL_CSUM)); 341 + !test_bit(IP_TUNNEL_CSUM_BIT, 342 + info->key.tun_flags)); 347 343 return 0; 348 344 349 345 free_dst: ··· 356 350 struct bareudp_dev *bareudp, 357 351 const struct ip_tunnel_info *info) 358 352 { 353 + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 359 354 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); 360 355 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 361 356 struct socket *sock = rcu_dereference(bareudp->sock); 362 - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 363 357 const struct ip_tunnel_key *key = &info->key; 364 358 struct dst_entry *dst = NULL; 365 359 struct in6_addr saddr, daddr; ··· 408 402 udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, 409 403 &saddr, &daddr, prio, ttl, 410 404 info->key.label, sport, bareudp->port, 411 - !(info->key.tun_flags & TUNNEL_CSUM)); 405 + !test_bit(IP_TUNNEL_CSUM_BIT, 406 + info->key.tun_flags)); 412 407 return 0; 413 408 414 409 free_dst:
+9
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 721 721 } 722 722 } 723 723 724 + static bool ice_is_pfcp_profile(u16 prof_idx) 725 + { 726 + return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && 727 + prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; 728 + } 729 + 724 730 /** 725 731 * ice_get_sw_prof_type - determine switch profile type 726 732 * @hw: pointer to the HW structure ··· 743 737 744 738 if (ice_is_gtp_u_profile(prof_idx)) 745 739 return ICE_PROF_TUN_GTPU; 740 + 741 + if (ice_is_pfcp_profile(prof_idx)) 742 + return ICE_PROF_TUN_PFCP; 746 743 747 744 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { 748 745 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+3 -1
drivers/net/ethernet/intel/ice/ice_flex_type.h
··· 93 93 TNL_GRETAP, 94 94 TNL_GTPC, 95 95 TNL_GTPU, 96 + TNL_PFCP, 96 97 __TNL_TYPE_CNT, 97 98 TNL_LAST = 0xFF, 98 99 TNL_ALL = 0xFF, ··· 359 358 ICE_PROF_TUN_GRE = 0x4, 360 359 ICE_PROF_TUN_GTPU = 0x8, 361 360 ICE_PROF_TUN_GTPC = 0x10, 362 - ICE_PROF_TUN_ALL = 0x1E, 361 + ICE_PROF_TUN_PFCP = 0x20, 362 + ICE_PROF_TUN_ALL = 0x3E, 363 363 ICE_PROF_ALL = 0xFF, 364 364 }; 365 365
+12
drivers/net/ethernet/intel/ice/ice_protocol_type.h
··· 43 43 ICE_NVGRE, 44 44 ICE_GTP, 45 45 ICE_GTP_NO_PAY, 46 + ICE_PFCP, 46 47 ICE_PPPOE, 47 48 ICE_L2TPV3, 48 49 ICE_VLAN_EX, ··· 62 61 ICE_SW_TUN_NVGRE, 63 62 ICE_SW_TUN_GTPU, 64 63 ICE_SW_TUN_GTPC, 64 + ICE_SW_TUN_PFCP, 65 65 ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ 66 66 }; 67 67 ··· 203 201 u8 qfi; 204 202 u8 rsvrd; 205 203 }; 204 + 205 + struct ice_pfcp_hdr { 206 + u8 flags; 207 + u8 msg_type; 208 + __be16 length; 209 + __be64 seid; 210 + __be32 seq; 211 + u8 spare; 212 + } __packed __aligned(__alignof__(u16)); 206 213 207 214 struct ice_pppoe_hdr { 208 215 u8 rsrvd_ver_type; ··· 429 418 struct ice_udp_tnl_hdr tnl_hdr; 430 419 struct ice_nvgre_hdr nvgre_hdr; 431 420 struct ice_udp_gtp_hdr gtp_hdr; 421 + struct ice_pfcp_hdr pfcp_hdr; 432 422 struct ice_pppoe_hdr pppoe_hdr; 433 423 struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr; 434 424 struct ice_hw_metadata metadata;
+85
drivers/net/ethernet/intel/ice/ice_switch.c
··· 42 42 ICE_PKT_KMALLOC = BIT(9), 43 43 ICE_PKT_PPPOE = BIT(10), 44 44 ICE_PKT_L2TPV3 = BIT(11), 45 + ICE_PKT_PFCP = BIT(12), 45 46 }; 46 47 47 48 struct ice_dummy_pkt_offsets { ··· 1111 1110 0x00, 0x00, 1112 1111 }; 1113 1112 1113 + ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = { 1114 + { ICE_MAC_OFOS, 0 }, 1115 + { ICE_ETYPE_OL, 12 }, 1116 + { ICE_IPV4_OFOS, 14 }, 1117 + { ICE_UDP_ILOS, 34 }, 1118 + { ICE_PFCP, 42 }, 1119 + { ICE_PROTOCOL_LAST, 0 }, 1120 + }; 1121 + 1122 + ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = { 1123 + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1124 + 0x00, 0x00, 0x00, 0x00, 1125 + 0x00, 0x00, 0x00, 0x00, 1126 + 1127 + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 1128 + 1129 + 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */ 1130 + 0x00, 0x01, 0x00, 0x00, 1131 + 0x00, 0x11, 0x00, 0x00, 1132 + 0x00, 0x00, 0x00, 0x00, 1133 + 0x00, 0x00, 0x00, 0x00, 1134 + 1135 + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */ 1136 + 0x00, 0x18, 0x00, 0x00, 1137 + 1138 + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */ 1139 + 0x00, 0x00, 0x00, 0x00, 1140 + 0x00, 0x00, 0x00, 0x00, 1141 + 0x00, 0x00, 0x00, 0x00, 1142 + 1143 + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 1144 + }; 1145 + 1146 + ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = { 1147 + { ICE_MAC_OFOS, 0 }, 1148 + { ICE_ETYPE_OL, 12 }, 1149 + { ICE_IPV6_OFOS, 14 }, 1150 + { ICE_UDP_ILOS, 54 }, 1151 + { ICE_PFCP, 62 }, 1152 + { ICE_PROTOCOL_LAST, 0 }, 1153 + }; 1154 + 1155 + ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = { 1156 + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1157 + 0x00, 0x00, 0x00, 0x00, 1158 + 0x00, 0x00, 0x00, 0x00, 1159 + 1160 + 0x86, 0xdd, /* ICE_ETYPE_OL 12 */ 1161 + 1162 + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ 1163 + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 1164 + 0x00, 0x00, 0x00, 0x00, 1165 + 0x00, 0x00, 0x00, 0x00, 1166 + 0x00, 0x00, 0x00, 0x00, 1167 + 0x00, 0x00, 0x00, 0x00, 1168 + 0x00, 0x00, 0x00, 0x00, 1169 + 0x00, 0x00, 0x00, 0x00, 1170 + 0x00, 0x00, 0x00, 0x00, 1171 + 0x00, 0x00, 0x00, 0x00, 1172 + 1173 + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */ 1174 + 0x00, 0x18, 0x00, 0x00, 1175 + 1176 + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */ 1177 + 0x00, 0x00, 0x00, 0x00, 1178 + 0x00, 0x00, 0x00, 0x00, 1179 + 0x00, 0x00, 0x00, 0x00, 1180 + 1181 + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 1182 + }; 1183 + 1114 1184 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = { 1115 1185 { ICE_MAC_OFOS, 0 }, 1116 1186 { ICE_ETYPE_OL, 12 }, ··· 1415 1343 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), 1416 1344 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), 1417 1345 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), 1346 + ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6), 1347 + ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP), 1418 1348 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 | 1419 1349 ICE_PKT_INNER_UDP), 1420 1350 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6), ··· 4606 4532 ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6), 4607 4533 ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22), 4608 4534 ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14), 4535 + ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22), 4609 4536 ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6), 4610 4537 ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10), 4611 4538 ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0), ··· 4640 4565 { ICE_NVGRE, ICE_GRE_OF_HW }, 4641 4566 { ICE_GTP, ICE_UDP_OF_HW }, 4642 4567 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, 4568 + { ICE_PFCP, ICE_UDP_ILOS_HW }, 4643 4569 { ICE_PPPOE, ICE_PPPOE_HW }, 4644 4570 { ICE_L2TPV3, ICE_L2TPV3_HW }, 4645 4571 { ICE_VLAN_EX, ICE_VLAN_OF_HW }, ··· 5348 5272 case ICE_SW_TUN_GTPC: 5349 5273 prof_type = ICE_PROF_TUN_GTPC; 5350 5274 break; 5275 + case ICE_SW_TUN_PFCP: 5276 + prof_type = ICE_PROF_TUN_PFCP; 5277 + break; 5351 5278 case ICE_SW_TUN_AND_NON_TUN: 5352 5279 default: 5353 5280 prof_type = ICE_PROF_ALL; ··· 5635 5556 case ICE_SW_TUN_VXLAN: 5636 5557 match |= ICE_PKT_TUN_UDP; 5637 5558 break; 5559 + case ICE_SW_TUN_PFCP: 5560 + match |= ICE_PKT_PFCP; 5561 + break; 5638 5562 default: 5639 5563 break; 5640 5564 } ··· 5777 5695 case ICE_GTP_NO_PAY: 5778 5696 case ICE_GTP: 5779 5697 len = sizeof(struct ice_udp_gtp_hdr); 5698 + break; 5699 + case ICE_PFCP: 5700 + len = sizeof(struct ice_pfcp_hdr); 5780 5701 break; 5781 5702 case ICE_PPPOE: 5782 5703 len = sizeof(struct ice_pppoe_hdr);
+2
drivers/net/ethernet/intel/ice/ice_switch.h
··· 22 22 #define ICE_PROFID_IPV6_GTPC_NO_TEID 45 23 23 #define ICE_PROFID_IPV6_GTPU_TEID 46 24 24 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70 25 + #define ICE_PROFID_IPV4_PFCP_NODE 79 26 + #define ICE_PROFID_IPV6_PFCP_SESSION 82 25 27 26 28 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) 27 29 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
+57 -11
drivers/net/ethernet/intel/ice/ice_tc_lib.c
··· 35 35 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) 36 36 lkups_cnt++; 37 37 38 - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) 38 + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) 39 + lkups_cnt++; 40 + 41 + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) 39 42 lkups_cnt++; 40 43 41 44 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ··· 141 138 return ICE_GTP; 142 139 case TNL_GTPC: 143 140 return ICE_GTP_NO_PAY; 141 + case TNL_PFCP: 142 + return ICE_PFCP; 144 143 default: 145 144 return 0; 146 145 } ··· 162 157 return ICE_SW_TUN_GTPU; 163 158 case TNL_GTPC: 164 159 return ICE_SW_TUN_GTPC; 160 + case TNL_PFCP: 161 + return ICE_SW_TUN_PFCP; 165 162 default: 166 163 return ICE_NON_TUN; 167 164 } ··· 226 219 i++; 227 220 } 228 221 229 - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && 230 - (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { 222 + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) { 231 223 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); 232 224 233 225 if (fltr->gtp_pdu_info_masks.pdu_type) { ··· 239 233 list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi; 240 234 memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1); 241 235 } 236 + 237 + i++; 238 + } 239 + 240 + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) { 241 + struct ice_pfcp_hdr *hdr_h, *hdr_m; 242 + 243 + hdr_h = &list[i].h_u.pfcp_hdr; 244 + hdr_m = &list[i].m_u.pfcp_hdr; 245 + list[i].type = ICE_PFCP; 246 + 247 + hdr_h->flags = fltr->pfcp_meta_keys.type; 248 + hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01; 249 + 250 + hdr_h->seid = fltr->pfcp_meta_keys.seid; 251 + hdr_m->seid = fltr->pfcp_meta_masks.seid; 242 252 243 253 i++; 244 254 } ··· 389 367 if (tc_fltr->tunnel_type != TNL_LAST) { 390 368 i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); 391 369 392 - headers = &tc_fltr->inner_headers; 393 - inner = true; 370 + /* PFCP is considered non-tunneled - don't swap headers. */ 371 + if (tc_fltr->tunnel_type != TNL_PFCP) { 372 + headers = &tc_fltr->inner_headers; 373 + inner = true; 374 + } 394 375 } 395 376 396 377 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { ··· 647 622 */ 648 623 if (netif_is_gtp(tunnel_dev)) 649 624 return TNL_GTPU; 625 + if (netif_is_pfcp(tunnel_dev)) 626 + return TNL_PFCP; 650 627 return TNL_LAST; 651 628 } 652 629 ··· 1428 1401 } 1429 1402 } 1430 1403 1431 - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { 1404 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && 1405 + (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { 1432 1406 struct flow_match_enc_opts match; 1433 1407 1434 1408 flow_rule_match_enc_opts(rule, &match); ··· 1440 1412 memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], 1441 1413 sizeof(struct gtp_pdu_session_info)); 1442 1414 1443 - fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; 1415 + fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS; 1416 + } 1417 + 1418 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && 1419 + fltr->tunnel_type == TNL_PFCP) { 1420 + struct flow_match_enc_opts match; 1421 + 1422 + flow_rule_match_enc_opts(rule, &match); 1423 + 1424 + memcpy(&fltr->pfcp_meta_keys, match.key->data, 1425 + sizeof(struct pfcp_metadata)); 1426 + memcpy(&fltr->pfcp_meta_masks, match.mask->data, 1427 + sizeof(struct pfcp_metadata)); 1428 + 1429 + fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS; 1444 1430 } 1445 1431 1446 1432 return 0; ··· 1515 1473 return err; 1516 1474 } 1517 1475 1518 - /* header pointers should point to the inner headers, outer 1519 - * header were already set by ice_parse_tunnel_attr 1520 - */ 1521 - headers = &fltr->inner_headers; 1476 + /* PFCP is considered non-tunneled - don't swap headers. */ 1477 + if (fltr->tunnel_type != TNL_PFCP) { 1478 + /* Header pointers should point to the inner headers, 1479 + * outer header were already set by 1480 + * ice_parse_tunnel_attr(). 1481 + */ 1482 + headers = &fltr->inner_headers; 1483 + } 1522 1484 } else if (dissector->used_keys & 1523 1485 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 1524 1486 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+7 -1
drivers/net/ethernet/intel/ice/ice_tc_lib.h
··· 4 4 #ifndef _ICE_TC_LIB_H_ 5 5 #define _ICE_TC_LIB_H_ 6 6 7 + #include <linux/bits.h> 8 + #include <net/pfcp.h> 9 + 7 10 #define ICE_TC_FLWR_FIELD_DST_MAC BIT(0) 8 11 #define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1) 9 12 #define ICE_TC_FLWR_FIELD_VLAN BIT(2) ··· 25 22 #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) 26 23 #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) 27 24 #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) 28 - #define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18) 25 + #define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18) 29 26 #define ICE_TC_FLWR_FIELD_CVLAN BIT(19) 30 27 #define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20) 31 28 #define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21) ··· 37 34 #define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27) 38 35 #define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28) 39 36 #define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29) 37 + #define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30) 40 38 41 39 #define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF 42 40 ··· 165 161 __be32 tenant_id; 166 162 struct gtp_pdu_session_info gtp_pdu_info_keys; 167 163 struct gtp_pdu_session_info gtp_pdu_info_masks; 164 + struct pfcp_metadata pfcp_meta_keys; 165 + struct pfcp_metadata pfcp_meta_masks; 168 166 u32 flags; 169 167 u8 tunnel_type; 170 168 struct ice_tc_flower_action action;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
··· 117 117 118 118 bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a, 119 119 struct mlx5e_encap_key *b, 120 - __be16 tun_flags); 120 + u32 tun_type); 121 121 #endif /* CONFIG_MLX5_ESWITCH */ 122 122 123 123 #endif //__MLX5_EN_TC_TUNNEL_H__
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
··· 587 587 588 588 bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a, 589 589 struct mlx5e_encap_key *b, 590 - __be16 tun_flags) 590 + u32 tun_type) 591 591 { 592 592 struct ip_tunnel_info *a_info; 593 593 struct ip_tunnel_info *b_info; ··· 596 596 if (!mlx5e_tc_tun_encap_info_equal_generic(a, b)) 597 597 return false; 598 598 599 - a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags); 600 - b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags); 599 + a_has_opts = test_bit(tun_type, a->ip_tun_key->tun_flags); 600 + b_has_opts = test_bit(tun_type, b->ip_tun_key->tun_flags); 601 601 602 602 /* keys are equal when both don't have any options attached */ 603 603 if (!a_has_opts && !b_has_opts)
+7 -5
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
··· 106 106 memset(geneveh, 0, sizeof(*geneveh)); 107 107 geneveh->ver = MLX5E_GENEVE_VER; 108 108 geneveh->opt_len = tun_info->options_len / 4; 109 - geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM); 110 - geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT); 109 + geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, tun_info->key.tun_flags); 110 + geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, 111 + tun_info->key.tun_flags); 111 112 mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni); 112 113 geneveh->proto_type = htons(ETH_P_TEB); 113 114 114 - if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) { 115 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) { 115 116 if (!geneveh->opt_len) 116 117 return -EOPNOTSUPP; 117 118 ip_tunnel_info_opts_get(geneveh->options, tun_info); ··· 189 188 190 189 /* make sure that we're talking about GENEVE options */ 191 190 192 - if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) { 191 + if (enc_opts.key->dst_opt_type != IP_TUNNEL_GENEVE_OPT_BIT) { 193 192 NL_SET_ERR_MSG_MOD(extack, 194 193 "Matching on GENEVE options: option type is not GENEVE"); 195 194 netdev_warn(priv->netdev, ··· 338 337 static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a, 339 338 struct mlx5e_encap_key *b) 340 339 { 341 - return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT); 340 + return mlx5e_tc_tun_encap_info_equal_options(a, b, 341 + IP_TUNNEL_GENEVE_OPT_BIT); 342 342 } 343 343 344 344 struct mlx5e_tc_tunnel geneve_tunnel = {
+6 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
··· 31 31 const struct ip_tunnel_key *tun_key = &e->tun_info->key; 32 32 struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf); 33 33 __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); 34 + IP_TUNNEL_DECLARE_FLAGS(unsupp) = { }; 34 35 int hdr_len; 35 36 36 37 *ip_proto = IPPROTO_GRE; 37 38 38 39 /* the HW does not calculate GRE csum or sequences */ 39 - if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ)) 40 + __set_bit(IP_TUNNEL_CSUM_BIT, unsupp); 41 + __set_bit(IP_TUNNEL_SEQ_BIT, unsupp); 42 + 43 + if (ip_tunnel_flags_intersect(tun_key->tun_flags, unsupp)) 40 44 return -EOPNOTSUPP; 41 45 42 46 greh->protocol = htons(ETH_P_TEB); ··· 48 44 /* GRE key */ 49 45 hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e); 50 46 greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags); 51 - if (tun_key->tun_flags & TUNNEL_KEY) { 47 + if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) { 52 48 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); 53 49 *ptr = tun_id; 54 50 }
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
··· 90 90 const struct vxlan_metadata *md; 91 91 struct vxlanhdr *vxh; 92 92 93 - if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) && 93 + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) && 94 94 e->tun_info->options_len != sizeof(*md)) 95 95 return -EOPNOTSUPP; 96 96 vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); ··· 99 99 udp->dest = tun_key->tp_dst; 100 100 vxh->vx_flags = VXLAN_HF_VNI; 101 101 vxh->vx_vni = vxlan_vni_field(tun_id); 102 - if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) { 102 + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) { 103 103 md = ip_tunnel_info_opts(e->tun_info); 104 104 vxlan_build_gbp_hdr(vxh, md); 105 105 } ··· 125 125 return -EOPNOTSUPP; 126 126 } 127 127 128 - if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) { 128 + if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) { 129 129 NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP"); 130 130 return -EOPNOTSUPP; 131 131 } ··· 208 208 static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a, 209 209 struct mlx5e_encap_key *b) 210 210 { 211 - return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT); 211 + return mlx5e_tc_tun_encap_info_equal_options(a, b, 212 + IP_TUNNEL_VXLAN_OPT_BIT); 212 213 } 213 214 214 215 static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
+12 -4
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 5464 5464 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 5465 5465 struct tunnel_match_enc_opts enc_opts = {}; 5466 5466 struct mlx5_rep_uplink_priv *uplink_priv; 5467 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 5467 5468 struct mlx5e_rep_priv *uplink_rpriv; 5468 5469 struct metadata_dst *tun_dst; 5469 5470 struct tunnel_match_key key; 5470 5471 u32 tun_id, enc_opts_id; 5471 5472 struct net_device *dev; 5472 5473 int err; 5474 + 5475 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 5473 5476 5474 5477 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; 5475 5478 tun_id = tunnel_id >> ENC_OPTS_BITS; ··· 5506 5503 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 5507 5504 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, 5508 5505 key.enc_ip.tos, key.enc_ip.ttl, 5509 - key.enc_tp.dst, TUNNEL_KEY, 5506 + key.enc_tp.dst, flags, 5510 5507 key32_to_tunnel_id(key.enc_key_id.keyid), 5511 5508 enc_opts.key.len); 5512 5509 break; 5513 5510 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 5514 5511 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, 5515 5512 key.enc_ip.tos, key.enc_ip.ttl, 5516 - key.enc_tp.dst, 0, TUNNEL_KEY, 5513 + key.enc_tp.dst, 0, flags, 5517 5514 key32_to_tunnel_id(key.enc_key_id.keyid), 5518 5515 enc_opts.key.len); 5519 5516 break; ··· 5531 5528 5532 5529 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; 5533 5530 5534 - if (enc_opts.key.len) 5531 + if (enc_opts.key.len) { 5532 + ip_tunnel_flags_zero(flags); 5533 + if (enc_opts.key.dst_opt_type) 5534 + __set_bit(enc_opts.key.dst_opt_type, flags); 5535 + 5535 5536 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 5536 5537 enc_opts.key.data, 5537 5538 enc_opts.key.len, 5538 - enc_opts.key.dst_opt_type); 5539 + flags); 5540 + } 5539 5541 5540 5542 skb_dst_set(skb, (struct dst_entry *)tun_dst); 5541 5543 dev = dev_get_by_index(&init_net, key.filter_ifindex);
+33 -23
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
··· 8 8 #include "spectrum_ipip.h" 9 9 #include "reg.h" 10 10 11 - struct ip_tunnel_parm 11 + struct ip_tunnel_parm_kern 12 12 mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev) 13 13 { 14 14 struct ip_tunnel *tun = netdev_priv(ol_dev); ··· 24 24 return tun->parms; 25 25 } 26 26 27 - static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) 27 + static bool 28 + mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms) 28 29 { 29 - return !!(parms->i_flags & TUNNEL_KEY); 30 + return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags); 30 31 } 31 32 32 33 static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) 33 34 { 34 - return !!(parms->i_flags & TUNNEL_KEY); 35 + return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags); 35 36 } 36 37 37 - static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) 38 + static bool 39 + mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms) 38 40 { 39 - return !!(parms->o_flags & TUNNEL_KEY); 41 + return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags); 40 42 } 41 43 42 44 static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) 43 45 { 44 - return !!(parms->o_flags & TUNNEL_KEY); 46 + return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags); 45 47 } 46 48 47 - static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) 49 + static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms) 48 50 { 49 51 return mlxsw_sp_ipip_parms4_has_ikey(parms) ? 50 52 be32_to_cpu(parms->i_key) : 0; ··· 58 56 be32_to_cpu(parms->i_key) : 0; 59 57 } 60 58 61 - static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) 59 + static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms) 62 60 { 63 61 return mlxsw_sp_ipip_parms4_has_okey(parms) ? 64 62 be32_to_cpu(parms->o_key) : 0; ··· 71 69 } 72 70 73 71 static union mlxsw_sp_l3addr 74 - mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) 72 + mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms) 75 73 { 76 74 return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; 77 75 } ··· 83 81 } 84 82 85 83 static union mlxsw_sp_l3addr 86 - mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) 84 + mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms) 87 85 { 88 86 return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; 89 87 } ··· 98 96 mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, 99 97 const struct net_device *ol_dev) 100 98 { 101 - struct ip_tunnel_parm parms4; 99 + struct ip_tunnel_parm_kern parms4; 102 100 struct __ip6_tnl_parm parms6; 103 101 104 102 switch (proto) { ··· 117 115 static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) 118 116 { 119 117 120 - struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); 118 + struct ip_tunnel_parm_kern parms4; 119 + 120 + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); 121 121 122 122 return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; 123 123 } ··· 128 124 mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, 129 125 const struct net_device *ol_dev) 130 126 { 131 - struct ip_tunnel_parm parms4; 127 + struct ip_tunnel_parm_kern parms4; 132 128 struct __ip6_tnl_parm parms6; 133 129 134 130 switch (proto) { ··· 154 150 static struct mlxsw_sp_ipip_parms 155 151 mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) 156 152 { 157 - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); 153 + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); 158 154 159 155 return (struct mlxsw_sp_ipip_parms) { 160 156 .proto = MLXSW_SP_L3_PROTO_IPV4, ··· 191 187 { 192 188 u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); 193 189 u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); 190 + struct ip_tunnel_parm_kern parms; 194 191 char rtdp_pl[MLXSW_REG_RTDP_LEN]; 195 - struct ip_tunnel_parm parms; 196 192 unsigned int type_check; 197 193 bool has_ikey; 198 194 u32 daddr4; ··· 242 238 const struct net_device *ol_dev) 243 239 { 244 240 struct ip_tunnel *tunnel = netdev_priv(ol_dev); 245 - __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ 246 241 bool inherit_ttl = tunnel->parms.iph.ttl == 0; 247 242 bool inherit_tos = tunnel->parms.iph.tos & 0x1; 243 + IP_TUNNEL_DECLARE_FLAGS(okflags) = { }; 248 244 249 - return (tunnel->parms.i_flags & ~okflags) == 0 && 250 - (tunnel->parms.o_flags & ~okflags) == 0 && 245 + /* We can't offload any other features. */ 246 + __set_bit(IP_TUNNEL_KEY_BIT, okflags); 247 + 248 + return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) && 249 + ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) && 251 250 inherit_ttl && inherit_tos && 252 251 mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev); 253 252 } ··· 259 252 mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, 260 253 const struct net_device *ol_dev) 261 254 { 262 - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); 255 + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); 263 256 enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; 264 257 265 258 lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? ··· 446 439 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev); 447 440 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 448 441 bool inherit_ttl = tparm.hop_limit == 0; 449 - __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ 442 + IP_TUNNEL_DECLARE_FLAGS(okflags) = { }; 450 443 451 - return (tparm.i_flags & ~okflags) == 0 && 452 - (tparm.o_flags & ~okflags) == 0 && 444 + /* We can't offload any other features. */ 445 + __set_bit(IP_TUNNEL_KEY_BIT, okflags); 446 + 447 + return ip_tunnel_flags_subset(tparm.i_flags, okflags) && 448 + ip_tunnel_flags_subset(tparm.o_flags, okflags) && 453 449 inherit_ttl && inherit_tos && 454 450 mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev); 455 451 }
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
··· 9 9 #include <linux/if_tunnel.h> 10 10 #include <net/ip6_tunnel.h> 11 11 12 - struct ip_tunnel_parm 12 + struct ip_tunnel_parm_kern 13 13 mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); 14 14 struct __ip6_tnl_parm 15 15 mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
+6 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
··· 413 413 __be32 *saddrp, __be32 *daddrp) 414 414 { 415 415 struct ip_tunnel *tun = netdev_priv(to_dev); 416 + struct ip_tunnel_parm_kern parms; 416 417 struct net_device *dev = NULL; 417 - struct ip_tunnel_parm parms; 418 418 struct rtable *rt = NULL; 419 419 struct flowi4 fl4; 420 420 ··· 451 451 const struct net_device *to_dev, 452 452 struct mlxsw_sp_span_parms *sparmsp) 453 453 { 454 - struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 454 + struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 455 455 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 456 456 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 457 457 bool inherit_tos = tparm.iph.tos & 0x1; ··· 461 461 462 462 if (!(to_dev->flags & IFF_UP) || 463 463 /* Reject tunnels with GRE keys, checksums, etc. */ 464 - tparm.i_flags || tparm.o_flags || 464 + !ip_tunnel_flags_empty(tparm.i_flags) || 465 + !ip_tunnel_flags_empty(tparm.o_flags) || 465 466 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 466 467 inherit_ttl || !inherit_tos || 467 468 /* A destination address may not be "any". */ ··· 566 565 567 566 if (!(to_dev->flags & IFF_UP) || 568 567 /* Reject tunnels with GRE keys, checksums, etc. */ 569 - tparm.i_flags || tparm.o_flags || 568 + !ip_tunnel_flags_empty(tparm.i_flags) || 569 + !ip_tunnel_flags_empty(tparm.o_flags) || 570 570 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 571 571 inherit_ttl || !inherit_tos || 572 572 /* A destination address may not be "any". */
+21 -6
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 396 396 return 0; 397 397 } 398 398 399 + #define NFP_FL_CHECK(flag) ({ \ 400 + IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \ 401 + __be16 __res; \ 402 + \ 403 + __set_bit(IP_TUNNEL_##flag##_BIT, __check); \ 404 + __res = ip_tunnel_flags_to_be16(__check); \ 405 + \ 406 + BUILD_BUG_ON(__builtin_constant_p(__res) && \ 407 + NFP_FL_TUNNEL_##flag != __res); \ 408 + }) 409 + 399 410 static int 400 411 nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, 401 412 const struct flow_action_entry *act, ··· 421 410 u32 tmp_set_ip_tun_type_index = 0; 422 411 /* Currently support one pre-tunnel so index is always 0. */ 423 412 int pretun_idx = 0; 413 + __be16 tun_flags; 424 414 425 415 if (!IS_ENABLED(CONFIG_IPV6) && ipv6) 426 416 return -EOPNOTSUPP; ··· 429 417 if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) 430 418 return -EOPNOTSUPP; 431 419 432 - BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || 433 - NFP_FL_TUNNEL_KEY != TUNNEL_KEY || 434 - NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); 420 + NFP_FL_CHECK(CSUM); 421 + NFP_FL_CHECK(KEY); 422 + NFP_FL_CHECK(GENEVE_OPT); 423 + 435 424 if (ip_tun->options_len && 436 425 (tun_type != NFP_FL_TUNNEL_GENEVE || 437 426 !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) { ··· 440 427 return -EOPNOTSUPP; 441 428 } 442 429 443 - if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) { 430 + tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags); 431 + if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) || 432 + (tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) { 444 433 NL_SET_ERR_MSG_MOD(extack, 445 434 "unsupported offload: loaded firmware does not support tunnel flag offload"); 446 435 return -EOPNOTSUPP; ··· 457 442 FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx); 458 443 459 444 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); 460 - if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) 445 + if (tun_flags & NFP_FL_TUNNEL_KEY) 461 446 set_tun->tun_id = ip_tun->key.tun_id; 462 447 463 448 if (ip_tun->key.ttl) { ··· 501 486 } 502 487 503 488 set_tun->tos = ip_tun->key.tos; 504 - set_tun->tun_flags = ip_tun->key.tun_flags; 489 + set_tun->tun_flags = tun_flags; 505 490 506 491 if (tun_type == NFP_FL_TUNNEL_GENEVE) { 507 492 set_tun->tun_proto = htons(ETH_P_TEB);
+27 -17
drivers/net/geneve.c
··· 225 225 void *oiph; 226 226 227 227 if (ip_tunnel_collect_metadata() || gs->collect_md) { 228 - __be16 flags; 228 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 229 229 230 - flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) | 231 - (gnvh->critical ? TUNNEL_CRIT_OPT : 0); 230 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 231 + __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam); 232 + __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical); 232 233 233 234 tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, 234 235 vni_to_tunnel_id(gnvh->vni), ··· 239 238 goto drop; 240 239 } 241 240 /* Update tunnel dst according to Geneve options. */ 241 + ip_tunnel_flags_zero(flags); 242 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags); 242 243 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 243 244 gnvh->options, gnvh->opt_len * 4, 244 - TUNNEL_GENEVE_OPT); 245 + flags); 245 246 } else { 246 247 /* Drop packets w/ critical options, 247 248 * since we don't support any... ··· 748 745 { 749 746 geneveh->ver = GENEVE_VER; 750 747 geneveh->opt_len = info->options_len / 4; 751 - geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM); 752 - geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT); 748 + geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags); 749 + geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, 750 + info->key.tun_flags); 753 751 geneveh->rsvd1 = 0; 754 752 tunnel_id_to_vni(info->key.tun_id, geneveh->vni); 755 753 geneveh->proto_type = inner_proto; 756 754 geneveh->rsvd2 = 0; 757 755 758 - if (info->key.tun_flags & TUNNEL_GENEVE_OPT) 756 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) 759 757 ip_tunnel_info_opts_get(geneveh->options, info); 760 758 } 761 759 ··· 765 761 bool xnet, int ip_hdr_len, 766 762 bool inner_proto_inherit) 767 763 { 768 - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 764 + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 769 765 struct genevehdr *gnvh; 770 766 __be16 inner_proto; 771 767 int min_headroom; ··· 882 878 if (geneve->cfg.collect_md) { 883 879 ttl = key->ttl; 884 880 885 - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 881 + df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? 882 + htons(IP_DF) : 0; 886 883 } else { 887 884 if (geneve->cfg.ttl_inherit) 888 885 ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); ··· 915 910 udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, 916 911 tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, 917 912 !net_eq(geneve->net, dev_net(geneve->dev)), 918 - !(info->key.tun_flags & TUNNEL_CSUM)); 913 + !test_bit(IP_TUNNEL_CSUM_BIT, 914 + info->key.tun_flags)); 919 915 return 0; 920 916 } 921 917 ··· 1004 998 udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 1005 999 &saddr, &key->u.ipv6.dst, prio, ttl, 1006 1000 info->key.label, sport, geneve->cfg.info.key.tp_dst, 1007 - !(info->key.tun_flags & TUNNEL_CSUM)); 1001 + !test_bit(IP_TUNNEL_CSUM_BIT, 1002 + info->key.tun_flags)); 1008 1003 return 0; 1009 1004 } 1010 1005 #endif ··· 1304 1297 1305 1298 static bool is_tnl_info_zero(const struct ip_tunnel_info *info) 1306 1299 { 1307 - return !(info->key.tun_id || info->key.tun_flags || info->key.tos || 1300 + return !(info->key.tun_id || info->key.tos || 1301 + !ip_tunnel_flags_empty(info->key.tun_flags) || 1308 1302 info->key.ttl || info->key.label || info->key.tp_src || 1309 1303 memchr_inv(&info->key.u, 0, sizeof(info->key.u))); 1310 1304 } ··· 1443 1435 "Remote IPv6 address cannot be Multicast"); 1444 1436 return -EINVAL; 1445 1437 } 1446 - info->key.tun_flags |= TUNNEL_CSUM; 1438 + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 1447 1439 cfg->use_udp6_rx_checksums = true; 1448 1440 #else 1449 1441 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], ··· 1518 1510 goto change_notsup; 1519 1511 } 1520 1512 if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) 1521 - info->key.tun_flags |= TUNNEL_CSUM; 1513 + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 1522 1514 } 1523 1515 1524 1516 if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { ··· 1528 1520 goto change_notsup; 1529 1521 } 1530 1522 if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) 1531 - info->key.tun_flags &= ~TUNNEL_CSUM; 1523 + __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 1532 1524 #else 1533 1525 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], 1534 1526 "IPv6 support not enabled in the kernel"); ··· 1761 1753 info->key.u.ipv4.dst)) 1762 1754 goto nla_put_failure; 1763 1755 if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, 1764 - !!(info->key.tun_flags & TUNNEL_CSUM))) 1756 + test_bit(IP_TUNNEL_CSUM_BIT, 1757 + info->key.tun_flags))) 1765 1758 goto nla_put_failure; 1766 1759 1767 1760 #if IS_ENABLED(CONFIG_IPV6) ··· 1771 1762 &info->key.u.ipv6.dst)) 1772 1763 goto nla_put_failure; 1773 1764 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, 1774 - !(info->key.tun_flags & TUNNEL_CSUM))) 1765 + !test_bit(IP_TUNNEL_CSUM_BIT, 1766 + info->key.tun_flags))) 1775 1767 goto nla_put_failure; 1776 1768 #endif 1777 1769 }
+302
drivers/net/pfcp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * PFCP according to 3GPP TS 29.244 4 + * 5 + * Copyright (C) 2022, Intel Corporation. 6 + */ 7 + 8 + #include <linux/module.h> 9 + #include <linux/netdevice.h> 10 + #include <linux/rculist.h> 11 + #include <linux/skbuff.h> 12 + #include <linux/types.h> 13 + 14 + #include <net/udp.h> 15 + #include <net/udp_tunnel.h> 16 + #include <net/pfcp.h> 17 + 18 + struct pfcp_dev { 19 + struct list_head list; 20 + 21 + struct socket *sock; 22 + struct net_device *dev; 23 + struct net *net; 24 + 25 + struct gro_cells gro_cells; 26 + }; 27 + 28 + static unsigned int pfcp_net_id __read_mostly; 29 + 30 + struct pfcp_net { 31 + struct list_head pfcp_dev_list; 32 + }; 33 + 34 + static void 35 + pfcp_session_recv(struct pfcp_dev *pfcp, struct sk_buff *skb, 36 + struct pfcp_metadata *md) 37 + { 38 + struct pfcphdr_session *unparsed = pfcp_hdr_session(skb); 39 + 40 + md->seid = unparsed->seid; 41 + md->type = PFCP_TYPE_SESSION; 42 + } 43 + 44 + static void 45 + pfcp_node_recv(struct pfcp_dev *pfcp, struct sk_buff *skb, 46 + struct pfcp_metadata *md) 47 + { 48 + md->type = PFCP_TYPE_NODE; 49 + } 50 + 51 + static int pfcp_encap_recv(struct sock *sk, struct sk_buff *skb) 52 + { 53 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 54 + struct metadata_dst *tun_dst; 55 + struct pfcp_metadata *md; 56 + struct pfcphdr *unparsed; 57 + struct pfcp_dev *pfcp; 58 + 59 + if (unlikely(!pskb_may_pull(skb, PFCP_HLEN))) 60 + goto drop; 61 + 62 + pfcp = rcu_dereference_sk_user_data(sk); 63 + if (unlikely(!pfcp)) 64 + goto drop; 65 + 66 + unparsed = pfcp_hdr(skb); 67 + 68 + ip_tunnel_flags_zero(flags); 69 + tun_dst = udp_tun_rx_dst(skb, sk->sk_family, flags, 0, 70 + sizeof(*md)); 71 + if (unlikely(!tun_dst)) 72 + goto drop; 73 + 74 + md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 75 + if (unlikely(!md)) 76 + goto drop; 77 + 78 + if (unparsed->flags & PFCP_SEID_FLAG) 79 + pfcp_session_recv(pfcp, skb, md); 80 + else 81 + pfcp_node_recv(pfcp, skb, md); 82 + 83 + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, flags); 84 + ip_tunnel_info_opts_set(&tun_dst->u.tun_info, md, sizeof(*md), 85 + flags); 86 + 87 + if (unlikely(iptunnel_pull_header(skb, PFCP_HLEN, skb->protocol, 88 + !net_eq(sock_net(sk), 89 + dev_net(pfcp->dev))))) 90 + goto drop; 91 + 92 + skb_dst_set(skb, (struct dst_entry *)tun_dst); 93 + 94 + skb_reset_network_header(skb); 95 + skb_reset_mac_header(skb); 96 + skb->dev = pfcp->dev; 97 + 98 + gro_cells_receive(&pfcp->gro_cells, skb); 99 + 100 + return 0; 101 + drop: 102 + kfree_skb(skb); 103 + return 0; 104 + } 105 + 106 + static void pfcp_del_sock(struct pfcp_dev *pfcp) 107 + { 108 + udp_tunnel_sock_release(pfcp->sock); 109 + pfcp->sock = NULL; 110 + } 111 + 112 + static void pfcp_dev_uninit(struct net_device *dev) 113 + { 114 + struct pfcp_dev *pfcp = netdev_priv(dev); 115 + 116 + gro_cells_destroy(&pfcp->gro_cells); 117 + pfcp_del_sock(pfcp); 118 + } 119 + 120 + static int pfcp_dev_init(struct net_device *dev) 121 + { 122 + struct pfcp_dev *pfcp = netdev_priv(dev); 123 + 124 + pfcp->dev = dev; 125 + 126 + return gro_cells_init(&pfcp->gro_cells, dev); 127 + } 128 + 129 + static const struct net_device_ops pfcp_netdev_ops = { 130 + .ndo_init = pfcp_dev_init, 131 + .ndo_uninit = pfcp_dev_uninit, 132 + .ndo_get_stats64 = dev_get_tstats64, 133 + }; 134 + 135 + static const struct device_type pfcp_type = { 136 + .name = "pfcp", 137 + }; 138 + 139 + static void pfcp_link_setup(struct net_device *dev) 140 + { 141 + dev->netdev_ops = &pfcp_netdev_ops; 142 + dev->needs_free_netdev = true; 143 + SET_NETDEV_DEVTYPE(dev, &pfcp_type); 144 + 145 + dev->hard_header_len = 0; 146 + dev->addr_len = 0; 147 + 148 + dev->type = ARPHRD_NONE; 149 + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 150 + dev->priv_flags |= IFF_NO_QUEUE; 151 + 152 + netif_keep_dst(dev); 153 + } 154 + 155 + static struct socket *pfcp_create_sock(struct pfcp_dev *pfcp) 156 + { 157 + struct udp_tunnel_sock_cfg tuncfg = {}; 158 + struct udp_port_cfg udp_conf = { 159 + .local_ip.s_addr = htonl(INADDR_ANY), 160 + .family = AF_INET, 161 + }; 162 + struct net *net = pfcp->net; 163 + struct socket *sock; 164 + int err; 165 + 166 + udp_conf.local_udp_port = htons(PFCP_PORT); 167 + 168 + err = udp_sock_create(net, &udp_conf, &sock); 169 + if (err) 170 + return ERR_PTR(err); 171 + 172 + tuncfg.sk_user_data = pfcp; 173 + tuncfg.encap_rcv = pfcp_encap_recv; 174 + tuncfg.encap_type = 1; 175 + 176 + setup_udp_tunnel_sock(net, sock, &tuncfg); 177 + 178 + return sock; 179 + } 180 + 181 + static int pfcp_add_sock(struct pfcp_dev *pfcp) 182 + { 183 + pfcp->sock = pfcp_create_sock(pfcp); 184 + 185 + return PTR_ERR_OR_ZERO(pfcp->sock); 186 + } 187 + 188 + static int pfcp_newlink(struct net *net, struct net_device *dev, 189 + struct nlattr *tb[], struct nlattr *data[], 190 + struct netlink_ext_ack *extack) 191 + { 192 + struct pfcp_dev *pfcp = netdev_priv(dev); 193 + struct pfcp_net *pn; 194 + int err; 195 + 196 + pfcp->net = net; 197 + 198 + err = pfcp_add_sock(pfcp); 199 + if (err) { 200 + netdev_dbg(dev, "failed to add pfcp socket %d\n", err); 201 + goto exit_err; 202 + } 203 + 204 + err = register_netdevice(dev); 205 + if (err) { 206 + netdev_dbg(dev, "failed to register pfcp netdev %d\n", err); 207 + goto exit_del_pfcp_sock; 208 + } 209 + 210 + pn = net_generic(dev_net(dev), pfcp_net_id); 211 + list_add_rcu(&pfcp->list, &pn->pfcp_dev_list); 212 + 213 + netdev_dbg(dev, "registered new PFCP interface\n"); 214 + 215 + return 0; 216 + 217 + exit_del_pfcp_sock: 218 + pfcp_del_sock(pfcp); 219 + exit_err: 220 + pfcp->net = NULL; 221 + return err; 222 + } 223 + 224 + static void pfcp_dellink(struct net_device *dev, struct list_head *head) 225 + { 226 + struct pfcp_dev *pfcp = netdev_priv(dev); 227 + 228 + list_del_rcu(&pfcp->list); 229 + unregister_netdevice_queue(dev, head); 230 + } 231 + 232 + static struct rtnl_link_ops pfcp_link_ops __read_mostly = { 233 + .kind = "pfcp", 234 + .priv_size = sizeof(struct pfcp_dev), 235 + .setup = pfcp_link_setup, 236 + .newlink = pfcp_newlink, 237 + .dellink = pfcp_dellink, 238 + }; 239 + 240 + static int __net_init pfcp_net_init(struct net *net) 241 + { 242 + struct pfcp_net *pn = net_generic(net, pfcp_net_id); 243 + 244 + INIT_LIST_HEAD(&pn->pfcp_dev_list); 245 + return 0; 246 + } 247 + 248 + static void __net_exit pfcp_net_exit(struct net *net) 249 + { 250 + struct pfcp_net *pn = net_generic(net, pfcp_net_id); 251 + struct pfcp_dev *pfcp; 252 + LIST_HEAD(list); 253 + 254 + rtnl_lock(); 255 + list_for_each_entry(pfcp, &pn->pfcp_dev_list, list) 256 + pfcp_dellink(pfcp->dev, &list); 257 + 258 + unregister_netdevice_many(&list); 259 + rtnl_unlock(); 260 + } 261 + 262 + static struct pernet_operations pfcp_net_ops = { 263 + .init = pfcp_net_init, 264 + .exit = pfcp_net_exit, 265 + .id = &pfcp_net_id, 266 + .size = sizeof(struct pfcp_net), 267 + }; 268 + 269 + static int __init pfcp_init(void) 270 + { 271 + int err; 272 + 273 + err = register_pernet_subsys(&pfcp_net_ops); 274 + if (err) 275 + goto exit_err; 276 + 277 + err = rtnl_link_register(&pfcp_link_ops); 278 + if (err) 279 + goto exit_unregister_subsys; 280 + return 0; 281 + 282 + exit_unregister_subsys: 283 + unregister_pernet_subsys(&pfcp_net_ops); 284 + exit_err: 285 + pr_err("loading PFCP module failed: err %d\n", err); 286 + return err; 287 + } 288 + late_initcall(pfcp_init); 289 + 290 + static void __exit pfcp_exit(void) 291 + { 292 + rtnl_link_unregister(&pfcp_link_ops); 293 + unregister_pernet_subsys(&pfcp_net_ops); 294 + 295 + pr_info("PFCP module unloaded\n"); 296 + } 297 + module_exit(pfcp_exit); 298 + 299 + MODULE_LICENSE("GPL"); 300 + MODULE_AUTHOR("Wojciech Drewek <wojciech.drewek@intel.com>"); 301 + MODULE_DESCRIPTION("Interface driver for PFCP encapsulated traffic"); 302 + MODULE_ALIAS_RTNL_LINK("pfcp");
+9 -5
drivers/net/vxlan/vxlan_core.c
··· 1584 1584 1585 1585 tun_dst = (struct metadata_dst *)skb_dst(skb); 1586 1586 if (tun_dst) { 1587 - tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1587 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, 1588 + tun_dst->u.tun_info.key.tun_flags); 1588 1589 tun_dst->u.tun_info.options_len = sizeof(*md); 1589 1590 } 1590 1591 if (gbp->dont_learn) ··· 1717 1716 goto drop; 1718 1717 1719 1718 if (vxlan_collect_metadata(vs)) { 1719 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 1720 1720 struct metadata_dst *tun_dst; 1721 1721 1722 - tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, 1722 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 1723 + tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags, 1723 1724 key32_to_tunnel_id(vni), sizeof(*md)); 1724 1725 1725 1726 if (!tun_dst) ··· 2406 2403 vni = tunnel_id_to_key32(info->key.tun_id); 2407 2404 ifindex = 0; 2408 2405 dst_cache = &info->dst_cache; 2409 - if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { 2406 + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) { 2410 2407 if (info->options_len < sizeof(*md)) 2411 2408 goto drop; 2412 2409 md = ip_tunnel_info_opts(info); 2413 2410 } 2414 2411 ttl = info->key.ttl; 2415 2412 tos = info->key.tos; 2416 - udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 2413 + udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 2417 2414 } 2418 2415 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2419 2416 vxlan->cfg.port_max, true); ··· 2454 2451 old_iph->frag_off & htons(IP_DF))) 2455 2452 df = htons(IP_DF); 2456 2453 } 2457 - } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { 2454 + } else if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, 2455 + info->key.tun_flags)) { 2458 2456 df = htons(IP_DF); 2459 2457 } 2460 2458
+7 -5
drivers/s390/cio/idset.c
··· 16 16 unsigned long bitmap[]; 17 17 }; 18 18 19 - static inline unsigned long bitmap_size(int num_ssid, int num_id) 19 + static inline unsigned long idset_bitmap_size(int num_ssid, int num_id) 20 20 { 21 - return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); 21 + return bitmap_size(size_mul(num_ssid, num_id)); 22 22 } 23 23 24 24 static struct idset *idset_new(int num_ssid, int num_id) 25 25 { 26 26 struct idset *set; 27 27 28 - set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); 28 + set = vmalloc(sizeof(struct idset) + 29 + idset_bitmap_size(num_ssid, num_id)); 29 30 if (set) { 30 31 set->num_ssid = num_ssid; 31 32 set->num_id = num_id; 32 - memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); 33 + memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id)); 33 34 } 34 35 return set; 35 36 } ··· 42 41 43 42 void idset_fill(struct idset *set) 44 43 { 45 - memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); 44 + memset(set->bitmap, 0xff, 45 + idset_bitmap_size(set->num_ssid, set->num_id)); 46 46 } 47 47 48 48 static inline void idset_add(struct idset *set, int ssid, int id)
+4 -4
fs/btrfs/free-space-cache.c
··· 1911 1911 ctl->free_space -= bytes; 1912 1912 } 1913 1913 1914 - static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, 1915 - struct btrfs_free_space *info, u64 offset, 1916 - u64 bytes) 1914 + static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl, 1915 + struct btrfs_free_space *info, u64 offset, 1916 + u64 bytes) 1917 1917 { 1918 1918 unsigned long start, count, end; 1919 1919 int extent_delta = 1; ··· 2249 2249 2250 2250 bytes_to_set = min(end - offset, bytes); 2251 2251 2252 - bitmap_set_bits(ctl, info, offset, bytes_to_set); 2252 + btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set); 2253 2253 2254 2254 return bytes_to_set; 2255 2255
+2 -2
fs/ntfs3/bitmap.c
··· 654 654 wnd->total_zeroes = nbits; 655 655 wnd->extent_max = MINUS_ONE_T; 656 656 wnd->zone_bit = wnd->zone_end = 0; 657 - wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits)); 657 + wnd->nwnd = bytes_to_block(sb, ntfs3_bitmap_size(nbits)); 658 658 wnd->bits_last = nbits & (wbits - 1); 659 659 if (!wnd->bits_last) 660 660 wnd->bits_last = wbits; ··· 1347 1347 return -EINVAL; 1348 1348 1349 1349 /* Align to 8 byte boundary. */ 1350 - new_wnd = bytes_to_block(sb, bitmap_size(new_bits)); 1350 + new_wnd = bytes_to_block(sb, ntfs3_bitmap_size(new_bits)); 1351 1351 new_last = new_bits & (wbits - 1); 1352 1352 if (!new_last) 1353 1353 new_last = wbits;
+1 -1
fs/ntfs3/fsntfs.c
··· 522 522 ni->mi.dirty = true; 523 523 524 524 /* Step 2: Resize $MFT::BITMAP. */ 525 - new_bitmap_bytes = bitmap_size(new_mft_total); 525 + new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total); 526 526 527 527 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run, 528 528 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
+6 -5
fs/ntfs3/index.c
··· 1456 1456 1457 1457 alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size); 1458 1458 1459 - err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name, 1460 - in->name_len, &bitmap, NULL, NULL); 1459 + err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP, 1460 + in->name, in->name_len, &bitmap, NULL, NULL); 1461 1461 if (err) 1462 1462 goto out2; 1463 1463 ··· 1518 1518 if (bmp) { 1519 1519 /* Increase bitmap. */ 1520 1520 err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len, 1521 - &indx->bitmap_run, bitmap_size(bit + 1), 1522 - NULL, true, NULL); 1521 + &indx->bitmap_run, 1522 + ntfs3_bitmap_size(bit + 1), NULL, true, 1523 + NULL); 1523 1524 if (err) 1524 1525 goto out1; 1525 1526 } ··· 2093 2092 if (in->name == I30_NAME) 2094 2093 i_size_write(&ni->vfs_inode, new_data); 2095 2094 2096 - bpb = bitmap_size(bit); 2095 + bpb = ntfs3_bitmap_size(bit); 2097 2096 if (bpb * 8 == nbits) 2098 2097 return 0; 2099 2098
+2 -2
fs/ntfs3/ntfs_fs.h
··· 966 966 } 967 967 968 968 /* NTFS uses quad aligned bitmaps. */ 969 - static inline size_t bitmap_size(size_t bits) 969 + static inline size_t ntfs3_bitmap_size(size_t bits) 970 970 { 971 - return ALIGN((bits + 7) >> 3, 8); 971 + return BITS_TO_U64(bits) * sizeof(u64); 972 972 } 973 973 974 974 #define _100ns2seconds 10000000
+1 -1
fs/ntfs3/super.c
··· 1341 1341 1342 1342 /* Check bitmap boundary. */ 1343 1343 tt = sbi->used.bitmap.nbits; 1344 - if (inode->i_size < bitmap_size(tt)) { 1344 + if (inode->i_size < ntfs3_bitmap_size(tt)) { 1345 1345 ntfs_err(sb, "$Bitmap is corrupted."); 1346 1346 err = -EINVAL; 1347 1347 goto put_inode_out;
+71 -20
include/linux/bitmap.h
··· 83 83 * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst 84 84 * bitmap_get_value8(map, start) Get 8bit value from map at start 85 85 * bitmap_set_value8(map, value, start) Set 8bit value to map at start 86 + * bitmap_read(map, start, nbits) Read an nbits-sized value from 87 + * map at start 88 + * bitmap_write(map, value, start, nbits) Write an nbits-sized value to 89 + * map at start 86 90 * 87 91 * Note, bitmap_zero() and bitmap_fill() operate over the region of 88 92 * unsigned longs, that is, bits behind bitmap till the unsigned long ··· 226 222 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) 227 223 #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) 228 224 225 + #define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) 226 + 229 227 static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) 230 228 { 231 - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 229 + unsigned int len = bitmap_size(nbits); 232 230 233 231 if (small_const_nbits(nbits)) 234 232 *dst = 0; ··· 240 234 241 235 static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) 242 236 { 243 - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 237 + unsigned int len = bitmap_size(nbits); 244 238 245 239 if (small_const_nbits(nbits)) 246 240 *dst = ~0UL; ··· 251 245 static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 252 246 unsigned int nbits) 253 247 { 254 - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 248 + unsigned int len = bitmap_size(nbits); 255 249 256 250 if (small_const_nbits(nbits)) 257 251 *dst = *src; ··· 728 722 } 729 723 730 724 /** 731 - * bitmap_get_value8 - get an 8-bit value within a memory region 725 + * bitmap_read - read a value of n-bits from the memory region 732 726 * @map: address to the bitmap memory region 733 - * @start: bit offset of the 8-bit value; must be a multiple of 8 727 + * @start: bit offset of the n-bit value 728 + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG 734 729 * 735 - * Returns the 8-bit value located at the @start bit offset within the @src 736 - * memory region. 730 + * Returns: value of @nbits bits located at the @start bit offset within the 731 + * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return 732 + * value is undefined. 737 733 */ 738 - static inline unsigned long bitmap_get_value8(const unsigned long *map, 739 - unsigned long start) 734 + static inline unsigned long bitmap_read(const unsigned long *map, 735 + unsigned long start, 736 + unsigned long nbits) 740 737 { 741 - const size_t index = BIT_WORD(start); 742 - const unsigned long offset = start % BITS_PER_LONG; 738 + size_t index = BIT_WORD(start); 739 + unsigned long offset = start % BITS_PER_LONG; 740 + unsigned long space = BITS_PER_LONG - offset; 741 + unsigned long value_low, value_high; 743 742 744 - return (map[index] >> offset) & 0xFF; 743 + if (unlikely(!nbits || nbits > BITS_PER_LONG)) 744 + return 0; 745 + 746 + if (space >= nbits) 747 + return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits); 748 + 749 + value_low = map[index] & BITMAP_FIRST_WORD_MASK(start); 750 + value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits); 751 + return (value_low >> offset) | (value_high << space); 745 752 } 746 753 747 754 /** 748 - * bitmap_set_value8 - set an 8-bit value within a memory region 755 + * bitmap_write - write n-bit value within a memory region 749 756 * @map: address to the bitmap memory region 750 - * @value: the 8-bit value; values wider than 8 bits may clobber bitmap 751 - * @start: bit offset of the 8-bit value; must be a multiple of 8 757 + * @value: value to write, clamped to nbits 758 + * @start: bit offset of the n-bit value 759 + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG. 760 + * 761 + * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(), 762 + * i.e. bits beyond @nbits are ignored: 763 + * 764 + * for (bit = 0; bit < nbits; bit++) 765 + * __assign_bit(start + bit, bitmap, val & BIT(bit)); 766 + * 767 + * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed. 752 768 */ 753 - static inline void bitmap_set_value8(unsigned long *map, unsigned long value, 754 - unsigned long start) 769 + static inline void bitmap_write(unsigned long *map, unsigned long value, 770 + unsigned long start, unsigned long nbits) 755 771 { 756 - const size_t index = BIT_WORD(start); 757 - const unsigned long offset = start % BITS_PER_LONG; 772 + size_t index; 773 + unsigned long offset; 774 + unsigned long space; 775 + unsigned long mask; 776 + bool fit; 758 777 759 - map[index] &= ~(0xFFUL << offset); 778 + if (unlikely(!nbits || nbits > BITS_PER_LONG)) 779 + return; 780 + 781 + mask = BITMAP_LAST_WORD_MASK(nbits); 782 + value &= mask; 783 + offset = start % BITS_PER_LONG; 784 + space = BITS_PER_LONG - offset; 785 + fit = space >= nbits; 786 + index = BIT_WORD(start); 787 + 788 + map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start)); 760 789 map[index] |= value << offset; 790 + if (fit) 791 + return; 792 + 793 + map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits); 794 + map[index + 1] |= (value >> space); 761 795 } 796 + 797 + #define bitmap_get_value8(map, start) \ 798 + bitmap_read(map, start, BITS_PER_BYTE) 799 + #define bitmap_set_value8(map, value, start) \ 800 + bitmap_write(map, value, start, BITS_PER_BYTE) 762 801 763 802 #endif /* __ASSEMBLY__ */ 764 803
+7 -16
include/linux/bitops.h
··· 21 21 #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) 22 22 #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) 23 23 24 + #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) 25 + 24 26 extern unsigned int __sw_hweight8(unsigned int w); 25 27 extern unsigned int __sw_hweight16(unsigned int w); 26 28 extern unsigned int __sw_hweight32(unsigned int w); ··· 82 80 __check_bitop_pr(__test_and_clear_bit); 83 81 __check_bitop_pr(__test_and_change_bit); 84 82 __check_bitop_pr(test_bit); 83 + __check_bitop_pr(test_bit_acquire); 85 84 86 85 #undef __check_bitop_pr 87 86 ··· 275 272 * @addr: the address to start counting from 276 273 * @value: the value to assign 277 274 */ 278 - static __always_inline void assign_bit(long nr, volatile unsigned long *addr, 279 - bool value) 280 - { 281 - if (value) 282 - set_bit(nr, addr); 283 - else 284 - clear_bit(nr, addr); 285 - } 275 + #define assign_bit(nr, addr, value) \ 276 + ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr))) 286 277 287 - static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, 288 - bool value) 289 - { 290 - if (value) 291 - __set_bit(nr, addr); 292 - else 293 - __clear_bit(nr, addr); 294 - } 278 + #define __assign_bit(nr, addr, value) \ 279 + ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr))) 295 280 296 281 /** 297 282 * __ptr_set_bit - Set bit in a pointer's value
+1 -1
include/linux/cpumask.h
··· 853 853 */ 854 854 static inline unsigned int cpumask_size(void) 855 855 { 856 - return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long); 856 + return bitmap_size(large_cpumask_bits); 857 857 } 858 858 859 859 /*
+4 -23
include/linux/linkmode.h
··· 43 43 return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); 44 44 } 45 45 46 - static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) 47 - { 48 - __set_bit(nr, addr); 49 - } 50 - 51 - static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) 52 - { 53 - __clear_bit(nr, addr); 54 - } 55 - 56 - static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, 57 - int set) 58 - { 59 - if (set) 60 - linkmode_set_bit(nr, addr); 61 - else 62 - linkmode_clear_bit(nr, addr); 63 - } 64 - 65 - static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) 66 - { 67 - return test_bit(nr, addr); 68 - } 46 + #define linkmode_test_bit test_bit 47 + #define linkmode_set_bit __set_bit 48 + #define linkmode_clear_bit __clear_bit 49 + #define linkmode_mod_bit __assign_bit 69 50 70 51 static inline void linkmode_set_bit_array(const int *array, int array_size, 71 52 unsigned long *addr)
+4 -3
include/linux/netdevice.h
··· 59 59 struct kernel_hwtstamp_config; 60 60 struct phy_device; 61 61 struct dsa_port; 62 - struct ip_tunnel_parm; 62 + struct ip_tunnel_parm_kern; 63 63 struct macsec_context; 64 64 struct macsec_ops; 65 65 struct netdev_name_node; ··· 1327 1327 * queue id bound to an AF_XDP socket. The flags field specifies if 1328 1328 * only RX, only Tx, or both should be woken up using the flags 1329 1329 * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1330 - * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, 1330 + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, 1331 1331 * int cmd); 1332 1332 * Add, change, delete or get information on an IPv4 tunnel. 1333 1333 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); ··· 1583 1583 int (*ndo_xsk_wakeup)(struct net_device *dev, 1584 1584 u32 queue_id, u32 flags); 1585 1585 int (*ndo_tunnel_ctl)(struct net_device *dev, 1586 - struct ip_tunnel_parm *p, int cmd); 1586 + struct ip_tunnel_parm_kern *p, 1587 + int cmd); 1587 1588 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); 1588 1589 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, 1589 1590 struct net_device_path *path);
+5 -5
include/net/dst_metadata.h
··· 198 198 __be32 daddr, 199 199 __u8 tos, __u8 ttl, 200 200 __be16 tp_dst, 201 - __be16 flags, 201 + const unsigned long *flags, 202 202 __be64 tunnel_id, 203 203 int md_size) 204 204 { ··· 215 215 } 216 216 217 217 static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, 218 - __be16 flags, 218 + const unsigned long *flags, 219 219 __be64 tunnel_id, 220 220 int md_size) 221 221 { ··· 230 230 __u8 tos, __u8 ttl, 231 231 __be16 tp_dst, 232 232 __be32 label, 233 - __be16 flags, 233 + const unsigned long *flags, 234 234 __be64 tunnel_id, 235 235 int md_size) 236 236 { ··· 243 243 244 244 info = &tun_dst->u.tun_info; 245 245 info->mode = IP_TUNNEL_INFO_IPV6; 246 - info->key.tun_flags = flags; 246 + ip_tunnel_flags_copy(info->key.tun_flags, flags); 247 247 info->key.tun_id = tunnel_id; 248 248 info->key.tp_src = 0; 249 249 info->key.tp_dst = tp_dst; ··· 259 259 } 260 260 261 261 static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb, 262 - __be16 flags, 262 + const unsigned long *flags, 263 263 __be64 tunnel_id, 264 264 int md_size) 265 265 {
+1 -1
include/net/flow_dissector.h
··· 97 97 * here but seems difficult to #include 98 98 */ 99 99 u8 len; 100 - __be16 dst_opt_type; 100 + u32 dst_opt_type; 101 101 }; 102 102 103 103 struct flow_dissector_key_keyid {
+32 -34
include/net/gre.h
··· 49 49 !strcmp(dev->rtnl_link_ops->kind, "ip6gretap"); 50 50 } 51 51 52 - static inline int gre_calc_hlen(__be16 o_flags) 52 + static inline int gre_calc_hlen(const unsigned long *o_flags) 53 53 { 54 54 int addend = 4; 55 55 56 - if (o_flags & TUNNEL_CSUM) 56 + if (test_bit(IP_TUNNEL_CSUM_BIT, o_flags)) 57 57 addend += 4; 58 - if (o_flags & TUNNEL_KEY) 58 + if (test_bit(IP_TUNNEL_KEY_BIT, o_flags)) 59 59 addend += 4; 60 - if (o_flags & TUNNEL_SEQ) 60 + if (test_bit(IP_TUNNEL_SEQ_BIT, o_flags)) 61 61 addend += 4; 62 62 return addend; 63 63 } 64 64 65 - static inline __be16 gre_flags_to_tnl_flags(__be16 flags) 65 + static inline void gre_flags_to_tnl_flags(unsigned long *dst, __be16 flags) 66 66 { 67 - __be16 tflags = 0; 67 + IP_TUNNEL_DECLARE_FLAGS(res) = { }; 68 68 69 - if (flags & GRE_CSUM) 70 - tflags |= TUNNEL_CSUM; 71 - if (flags & GRE_ROUTING) 72 - tflags |= TUNNEL_ROUTING; 73 - if (flags & GRE_KEY) 74 - tflags |= TUNNEL_KEY; 75 - if (flags & GRE_SEQ) 76 - tflags |= TUNNEL_SEQ; 77 - if (flags & GRE_STRICT) 78 - tflags |= TUNNEL_STRICT; 79 - if (flags & GRE_REC) 80 - tflags |= TUNNEL_REC; 81 - if (flags & GRE_VERSION) 82 - tflags |= TUNNEL_VERSION; 69 + __assign_bit(IP_TUNNEL_CSUM_BIT, res, flags & GRE_CSUM); 70 + __assign_bit(IP_TUNNEL_ROUTING_BIT, res, flags & GRE_ROUTING); 71 + __assign_bit(IP_TUNNEL_KEY_BIT, res, flags & GRE_KEY); 72 + __assign_bit(IP_TUNNEL_SEQ_BIT, res, flags & GRE_SEQ); 73 + __assign_bit(IP_TUNNEL_STRICT_BIT, res, flags & GRE_STRICT); 74 + __assign_bit(IP_TUNNEL_REC_BIT, res, flags & GRE_REC); 75 + __assign_bit(IP_TUNNEL_VERSION_BIT, res, flags & GRE_VERSION); 83 76 84 - return tflags; 77 + ip_tunnel_flags_copy(dst, res); 85 78 } 86 79 87 - static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags) 80 + static inline __be16 gre_tnl_flags_to_gre_flags(const unsigned long *tflags) 88 81 { 89 82 __be16 flags = 0; 90 83 91 - if (tflags & TUNNEL_CSUM) 84 + if (test_bit(IP_TUNNEL_CSUM_BIT, tflags)) 92 85 flags |= GRE_CSUM; 93 - if (tflags & TUNNEL_ROUTING) 86 + if (test_bit(IP_TUNNEL_ROUTING_BIT, tflags)) 94 87 flags |= GRE_ROUTING; 95 - if (tflags & TUNNEL_KEY) 88 + if (test_bit(IP_TUNNEL_KEY_BIT, tflags)) 96 89 flags |= GRE_KEY; 97 - if (tflags & TUNNEL_SEQ) 90 + if (test_bit(IP_TUNNEL_SEQ_BIT, tflags)) 98 91 flags |= GRE_SEQ; 99 - if (tflags & TUNNEL_STRICT) 92 + if (test_bit(IP_TUNNEL_STRICT_BIT, tflags)) 100 93 flags |= GRE_STRICT; 101 - if (tflags & TUNNEL_REC) 94 + if (test_bit(IP_TUNNEL_REC_BIT, tflags)) 102 95 flags |= GRE_REC; 103 - if (tflags & TUNNEL_VERSION) 96 + if (test_bit(IP_TUNNEL_VERSION_BIT, tflags)) 104 97 flags |= GRE_VERSION; 105 98 106 99 return flags; 107 100 } 108 101 109 102 static inline void gre_build_header(struct sk_buff *skb, int hdr_len, 110 - __be16 flags, __be16 proto, 103 + const unsigned long *flags, __be16 proto, 111 104 __be32 key, __be32 seq) 112 105 { 106 + IP_TUNNEL_DECLARE_FLAGS(cond) = { }; 113 107 struct gre_base_hdr *greh; 114 108 115 109 skb_push(skb, hdr_len); ··· 114 120 greh->flags = gre_tnl_flags_to_gre_flags(flags); 115 121 greh->protocol = proto; 116 122 117 - if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { 123 + __set_bit(IP_TUNNEL_KEY_BIT, cond); 124 + __set_bit(IP_TUNNEL_CSUM_BIT, cond); 125 + __set_bit(IP_TUNNEL_SEQ_BIT, cond); 126 + 127 + if (ip_tunnel_flags_intersect(flags, cond)) { 118 128 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); 119 129 120 - if (flags & TUNNEL_SEQ) { 130 + if (test_bit(IP_TUNNEL_SEQ_BIT, flags)) { 121 131 *ptr = seq; 122 132 ptr--; 123 133 } 124 - if (flags & TUNNEL_KEY) { 134 + if (test_bit(IP_TUNNEL_KEY_BIT, flags)) { 125 135 *ptr = key; 126 136 ptr--; 127 137 } 128 - if (flags & TUNNEL_CSUM && 138 + if (test_bit(IP_TUNNEL_CSUM_BIT, flags) && 129 139 !(skb_shinfo(skb)->gso_type & 130 140 (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { 131 141 *ptr = 0;
+2 -2
include/net/ip6_tunnel.h
··· 30 30 struct in6_addr laddr; /* local tunnel end-point address */ 31 31 struct in6_addr raddr; /* remote tunnel end-point address */ 32 32 33 - __be16 i_flags; 34 - __be16 o_flags; 33 + IP_TUNNEL_DECLARE_FLAGS(i_flags); 34 + IP_TUNNEL_DECLARE_FLAGS(o_flags); 35 35 __be32 i_key; 36 36 __be32 o_key; 37 37
+119 -20
include/net/ip_tunnels.h
··· 36 36 (sizeof_field(struct ip_tunnel_key, u) - \ 37 37 sizeof_field(struct ip_tunnel_key, u.ipv4)) 38 38 39 + #define __ipt_flag_op(op, ...) \ 40 + op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM) 41 + 42 + #define IP_TUNNEL_DECLARE_FLAGS(...) \ 43 + __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__) 44 + 45 + #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__) 46 + #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__) 47 + #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__) 48 + #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__) 49 + 50 + #define ip_tunnel_flags_empty(...) \ 51 + __ipt_flag_op(bitmap_empty, __VA_ARGS__) 52 + #define ip_tunnel_flags_intersect(...) \ 53 + __ipt_flag_op(bitmap_intersects, __VA_ARGS__) 54 + #define ip_tunnel_flags_subset(...) \ 55 + __ipt_flag_op(bitmap_subset, __VA_ARGS__) 56 + 39 57 struct ip_tunnel_key { 40 58 __be64 tun_id; 41 59 union { ··· 66 48 struct in6_addr dst; 67 49 } ipv6; 68 50 } u; 69 - __be16 tun_flags; 70 - u8 tos; /* TOS for IPv4, TC for IPv6 */ 71 - u8 ttl; /* TTL for IPv4, HL for IPv6 */ 51 + IP_TUNNEL_DECLARE_FLAGS(tun_flags); 72 52 __be32 label; /* Flow Label for IPv6 */ 73 53 u32 nhid; 54 + u8 tos; /* TOS for IPv4, TC for IPv6 */ 55 + u8 ttl; /* TTL for IPv4, HL for IPv6 */ 74 56 __be16 tp_src; 75 57 __be16 tp_dst; 76 58 __u8 flow_flags; ··· 128 110 129 111 struct metadata_dst; 130 112 113 + /* Kernel-side variant of ip_tunnel_parm */ 114 + struct ip_tunnel_parm_kern { 115 + char name[IFNAMSIZ]; 116 + IP_TUNNEL_DECLARE_FLAGS(i_flags); 117 + IP_TUNNEL_DECLARE_FLAGS(o_flags); 118 + __be32 i_key; 119 + __be32 o_key; 120 + int link; 121 + struct iphdr iph; 122 + }; 123 + 131 124 struct ip_tunnel { 132 125 struct ip_tunnel __rcu *next; 133 126 struct hlist_node hash_node; ··· 165 136 166 137 struct dst_cache dst_cache; 167 138 168 - struct ip_tunnel_parm parms; 139 + struct ip_tunnel_parm_kern parms; 169 140 170 141 int mlink; 171 142 int encap_hlen; /* Encap header length (FOU,GUE) */ ··· 186 157 }; 187 158 188 159 struct tnl_ptk_info { 189 - __be16 flags; 160 + IP_TUNNEL_DECLARE_FLAGS(flags); 190 161 __be16 proto; 191 162 __be32 key; 192 163 __be32 seq; ··· 208 179 int type; 209 180 }; 210 181 182 + static inline void ip_tunnel_set_options_present(unsigned long *flags) 183 + { 184 + IP_TUNNEL_DECLARE_FLAGS(present) = { }; 185 + 186 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 187 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 188 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 189 + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 190 + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 191 + 192 + ip_tunnel_flags_or(flags, flags, present); 193 + } 194 + 195 + static inline void ip_tunnel_clear_options_present(unsigned long *flags) 196 + { 197 + IP_TUNNEL_DECLARE_FLAGS(present) = { }; 198 + 199 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 200 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 201 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 202 + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 203 + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 204 + 205 + __ipt_flag_op(bitmap_andnot, flags, flags, present); 206 + } 207 + 208 + static inline bool ip_tunnel_is_options_present(const unsigned long *flags) 209 + { 210 + IP_TUNNEL_DECLARE_FLAGS(present) = { }; 211 + 212 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 213 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 214 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 215 + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 216 + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 217 + 218 + return ip_tunnel_flags_intersect(flags, present); 219 + } 220 + 221 + static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags) 222 + { 223 + IP_TUNNEL_DECLARE_FLAGS(supp) = { }; 224 + 225 + bitmap_set(supp, 0, BITS_PER_TYPE(__be16)); 226 + __set_bit(IP_TUNNEL_VTI_BIT, supp); 227 + 228 + return ip_tunnel_flags_subset(flags, supp); 229 + } 230 + 231 + static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags) 232 + { 233 + ip_tunnel_flags_zero(dst); 234 + 235 + bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16)); 236 + __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI); 237 + } 238 + 239 + static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags) 240 + { 241 + __be16 ret; 242 + 243 + ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16))); 244 + if (test_bit(IP_TUNNEL_VTI_BIT, flags)) 245 + ret |= VTI_ISVTI; 246 + 247 + return ret; 248 + } 249 + 211 250 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 212 251 __be32 saddr, __be32 daddr, 213 252 u8 tos, u8 ttl, __be32 label, 214 253 __be16 tp_src, __be16 tp_dst, 215 - __be64 tun_id, __be16 tun_flags) 254 + __be64 tun_id, 255 + const unsigned long *tun_flags) 216 256 { 217 257 key->tun_id = tun_id; 218 258 key->u.ipv4.src = saddr; ··· 291 193 key->tos = tos; 292 194 key->ttl = ttl; 293 195 key->label = label; 294 - key->tun_flags = tun_flags; 196 + ip_tunnel_flags_copy(key->tun_flags, tun_flags); 295 197 296 198 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of 297 199 * the upper tunnel are used. ··· 312 214 { 313 215 if (skb->mark) 314 216 return false; 315 - if (!info) 316 - return true; 317 - if (info->key.tun_flags & TUNNEL_NOCACHE) 318 - return false; 319 217 320 - return true; 218 + return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); 321 219 } 322 220 323 221 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info ··· 385 291 const struct iphdr *tnl_params, const u8 protocol); 386 292 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 387 293 const u8 proto, int tunnel_hlen); 388 - int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); 294 + int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, 295 + int cmd); 296 + bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, 297 + const void __user *data); 298 + bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); 389 299 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 390 300 void __user *data, int cmd); 391 301 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); 392 302 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 393 303 394 304 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, 395 - int link, __be16 flags, 305 + int link, const unsigned long *flags, 396 306 __be32 remote, __be32 local, 397 307 __be32 key); 398 308 ··· 405 307 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, 406 308 bool log_ecn_error); 407 309 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], 408 - struct ip_tunnel_parm *p, __u32 fwmark); 310 + struct ip_tunnel_parm_kern *p, __u32 fwmark); 409 311 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 410 - struct ip_tunnel_parm *p, __u32 fwmark); 312 + struct ip_tunnel_parm_kern *p, __u32 fwmark); 411 313 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); 412 314 413 315 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], 414 316 struct ip_tunnel_encap *encap); 415 317 416 318 void ip_tunnel_netlink_parms(struct nlattr *data[], 417 - struct ip_tunnel_parm *parms); 319 + struct ip_tunnel_parm_kern *parms); 418 320 419 321 extern const struct header_ops ip_tunnel_header_ops; 420 322 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); ··· 612 514 613 515 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 614 516 const void *from, int len, 615 - __be16 flags) 517 + const unsigned long *flags) 616 518 { 617 519 info->options_len = len; 618 520 if (len > 0) { 619 521 memcpy(ip_tunnel_info_opts(info), from, len); 620 - info->key.tun_flags |= flags; 522 + ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags, 523 + flags); 621 524 } 622 525 } 623 526 ··· 662 563 663 564 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 664 565 const void *from, int len, 665 - __be16 flags) 566 + const unsigned long *flags) 666 567 { 667 568 info->options_len = 0; 668 569 }
+90
include/net/pfcp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _PFCP_H_ 3 + #define _PFCP_H_ 4 + 5 + #include <uapi/linux/if_ether.h> 6 + #include <net/dst_metadata.h> 7 + #include <linux/netdevice.h> 8 + #include <uapi/linux/ipv6.h> 9 + #include <net/udp_tunnel.h> 10 + #include <uapi/linux/udp.h> 11 + #include <uapi/linux/ip.h> 12 + #include <linux/string.h> 13 + #include <linux/types.h> 14 + #include <linux/bits.h> 15 + 16 + #define PFCP_PORT 8805 17 + 18 + /* PFCP protocol header */ 19 + struct pfcphdr { 20 + u8 flags; 21 + u8 message_type; 22 + __be16 message_length; 23 + }; 24 + 25 + /* PFCP header flags */ 26 + #define PFCP_SEID_FLAG BIT(0) 27 + #define PFCP_MP_FLAG BIT(1) 28 + 29 + #define PFCP_VERSION_MASK GENMASK(4, 0) 30 + 31 + #define PFCP_HLEN (sizeof(struct udphdr) + sizeof(struct pfcphdr)) 32 + 33 + /* PFCP node related messages */ 34 + struct pfcphdr_node { 35 + u8 seq_number[3]; 36 + u8 reserved; 37 + }; 38 + 39 + /* PFCP session related messages */ 40 + struct pfcphdr_session { 41 + __be64 seid; 42 + u8 seq_number[3]; 43 + #ifdef __LITTLE_ENDIAN_BITFIELD 44 + u8 message_priority:4, 45 + reserved:4; 46 + #elif defined(__BIG_ENDIAN_BITFIELD) 47 + u8 reserved:4, 48 + message_priprity:4; 49 + #else 50 + #error "Please fix <asm/byteorder>" 51 + #endif 52 + }; 53 + 54 + struct pfcp_metadata { 55 + u8 type; 56 + __be64 seid; 57 + } __packed; 58 + 59 + enum { 60 + PFCP_TYPE_NODE = 0, 61 + PFCP_TYPE_SESSION = 1, 62 + }; 63 + 64 + #define PFCP_HEADROOM (sizeof(struct iphdr) + sizeof(struct udphdr) + \ 65 + sizeof(struct pfcphdr) + sizeof(struct ethhdr)) 66 + #define PFCP6_HEADROOM (sizeof(struct ipv6hdr) + sizeof(struct udphdr) + \ 67 + sizeof(struct pfcphdr) + sizeof(struct ethhdr)) 68 + 69 + static inline struct pfcphdr *pfcp_hdr(struct sk_buff *skb) 70 + { 71 + return (struct pfcphdr *)(udp_hdr(skb) + 1); 72 + } 73 + 74 + static inline struct pfcphdr_node *pfcp_hdr_node(struct sk_buff *skb) 75 + { 76 + return (struct pfcphdr_node *)(pfcp_hdr(skb) + 1); 77 + } 78 + 79 + static inline struct pfcphdr_session *pfcp_hdr_session(struct sk_buff *skb) 80 + { 81 + return (struct pfcphdr_session *)(pfcp_hdr(skb) + 1); 82 + } 83 + 84 + static inline bool netif_is_pfcp(const struct net_device *dev) 85 + { 86 + return dev->rtnl_link_ops && 87 + !strcmp(dev->rtnl_link_ops->kind, "pfcp"); 88 + } 89 + 90 + #endif
+2 -2
include/net/udp_tunnel.h
··· 179 179 struct dst_cache *dst_cache); 180 180 181 181 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, 182 - __be16 flags, __be64 tunnel_id, 183 - int md_size); 182 + const unsigned long *flags, 183 + __be64 tunnel_id, int md_size); 184 184 185 185 #ifdef CONFIG_INET 186 186 static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
+36
include/uapi/linux/if_tunnel.h
··· 161 161 162 162 #define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1) 163 163 164 + #ifndef __KERNEL__ 165 + /* Historically, tunnel flags have been defined as __be16 and now there are 166 + * no free bits left. It is strongly advised to switch the already existing 167 + * userspace code to the new *_BIT definitions from down below, as __be16 168 + * can't be simply cast to a wider type on LE systems. All new flags and 169 + * code must use *_BIT only. 170 + */ 171 + 164 172 #define TUNNEL_CSUM __cpu_to_be16(0x01) 165 173 #define TUNNEL_ROUTING __cpu_to_be16(0x02) 166 174 #define TUNNEL_KEY __cpu_to_be16(0x04) ··· 189 181 #define TUNNEL_OPTIONS_PRESENT \ 190 182 (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \ 191 183 TUNNEL_GTP_OPT) 184 + #endif 185 + 186 + enum { 187 + IP_TUNNEL_CSUM_BIT = 0U, 188 + IP_TUNNEL_ROUTING_BIT, 189 + IP_TUNNEL_KEY_BIT, 190 + IP_TUNNEL_SEQ_BIT, 191 + IP_TUNNEL_STRICT_BIT, 192 + IP_TUNNEL_REC_BIT, 193 + IP_TUNNEL_VERSION_BIT, 194 + IP_TUNNEL_NO_KEY_BIT, 195 + IP_TUNNEL_DONT_FRAGMENT_BIT, 196 + IP_TUNNEL_OAM_BIT, 197 + IP_TUNNEL_CRIT_OPT_BIT, 198 + IP_TUNNEL_GENEVE_OPT_BIT, /* OPTIONS_PRESENT */ 199 + IP_TUNNEL_VXLAN_OPT_BIT, /* OPTIONS_PRESENT */ 200 + IP_TUNNEL_NOCACHE_BIT, 201 + IP_TUNNEL_ERSPAN_OPT_BIT, /* OPTIONS_PRESENT */ 202 + IP_TUNNEL_GTP_OPT_BIT, /* OPTIONS_PRESENT */ 203 + 204 + IP_TUNNEL_VTI_BIT, 205 + IP_TUNNEL_SIT_ISATAP_BIT = IP_TUNNEL_VTI_BIT, 206 + 207 + /* Flags starting from here are not available via the old UAPI */ 208 + IP_TUNNEL_PFCP_OPT_BIT, /* OPTIONS_PRESENT */ 209 + 210 + __IP_TUNNEL_FLAG_NUM, 211 + }; 192 212 193 213 #endif /* _UAPI_IF_TUNNEL_H_ */
+14
include/uapi/linux/pkt_cls.h
··· 587 587 * TCA_FLOWER_KEY_ENC_OPT_GTP_ 588 588 * attributes 589 589 */ 590 + TCA_FLOWER_KEY_ENC_OPTS_PFCP, /* Nested 591 + * TCA_FLOWER_KEY_ENC_IPT_PFCP 592 + * attributes 593 + */ 590 594 __TCA_FLOWER_KEY_ENC_OPTS_MAX, 591 595 }; 592 596 ··· 639 635 640 636 #define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \ 641 637 (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1) 638 + 639 + enum { 640 + TCA_FLOWER_KEY_ENC_OPT_PFCP_UNSPEC, 641 + TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, /* u8 */ 642 + TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, /* be64 */ 643 + __TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, 644 + }; 645 + 646 + #define TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX \ 647 + (__TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX - 1) 642 648 643 649 enum { 644 650 TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
-2
kernel/trace/trace_probe.c
··· 1180 1180 return ret; 1181 1181 } 1182 1182 1183 - #define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long)) 1184 - 1185 1183 /* Bitfield type needs to be parsed into a fetch function */ 1186 1184 static int __parse_bitfield_probe_arg(const char *bf, 1187 1185 const struct fetch_type *t,
-2
lib/math/prime_numbers.c
··· 6 6 #include <linux/prime_numbers.h> 7 7 #include <linux/slab.h> 8 8 9 - #define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) 10 - 11 9 struct primes { 12 10 struct rcu_head rcu; 13 11 unsigned long last, sz;
+185 -18
lib/test_bitmap.c
··· 60 60 }; 61 61 62 62 static bool __init 63 - __check_eq_uint(const char *srcfile, unsigned int line, 64 - const unsigned int exp_uint, unsigned int x) 63 + __check_eq_ulong(const char *srcfile, unsigned int line, 64 + const unsigned long exp_ulong, unsigned long x) 65 65 { 66 - if (exp_uint != x) { 67 - pr_err("[%s:%u] expected %u, got %u\n", 68 - srcfile, line, exp_uint, x); 66 + if (exp_ulong != x) { 67 + pr_err("[%s:%u] expected %lu, got %lu\n", 68 + srcfile, line, exp_ulong, x); 69 69 return false; 70 70 } 71 71 return true; 72 72 } 73 - 74 73 75 74 static bool __init 76 75 __check_eq_bitmap(const char *srcfile, unsigned int line, ··· 184 185 result; \ 185 186 }) 186 187 187 - #define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) 188 + #define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__) 189 + #define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y)) 188 190 #define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) 189 191 #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) 190 192 #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) ··· 548 548 } 549 549 550 550 if (ptest.flags & PARSE_TIME) 551 - pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", 551 + pr_info("parselist: %d: input is '%s' OK, Time: %llu\n", 552 552 i, ptest.in, time); 553 553 554 554 #undef ptest ··· 587 587 goto out; 588 588 } 589 589 590 - pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); 590 + pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); 591 591 out: 592 592 kfree(buf); 593 593 kfree(bmap); ··· 665 665 } 666 666 667 667 if (test.flags & PARSE_TIME) 668 - pr_err("parse: %d: input is '%s' OK, Time: %llu\n", 668 + pr_info("parse: %d: input is '%s' OK, Time: %llu\n", 669 669 i, test.in, time); 670 670 } 671 671 } ··· 1245 1245 * in runtime. 1246 1246 */ 1247 1247 1248 - /* 1249 - * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`. 1250 - * Clang on s390 optimizes bitops at compile-time as intended, but at 1251 - * the same time stops treating @bitmap and @bitopvar as compile-time 1252 - * constants after regular test_bit() is executed, thus triggering the 1253 - * build bugs below. So, call const_test_bit() there directly until 1254 - * the compiler is fixed. 1255 - */ 1248 + /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */ 1256 1249 bitmap_clear(bitmap, 0, BITS_PER_LONG); 1257 1250 if (!test_bit(7, bitmap)) 1258 1251 bitmap_set(bitmap, 5, 2); ··· 1277 1284 /* ~BIT(25) */ 1278 1285 BUILD_BUG_ON(!__builtin_constant_p(~var)); 1279 1286 BUILD_BUG_ON(~var != ~BIT(25)); 1287 + 1288 + /* ~BIT(25) | BIT(25) == ~0UL */ 1289 + bitmap_complement(&var, &var, BITS_PER_LONG); 1290 + __assign_bit(25, &var, true); 1291 + 1292 + /* !(~(~0UL)) == 1 */ 1293 + res = bitmap_full(&var, BITS_PER_LONG); 1294 + BUILD_BUG_ON(!__builtin_constant_p(res)); 1295 + BUILD_BUG_ON(!res); 1280 1296 } 1297 + 1298 + /* 1299 + * Test bitmap should be big enough to include the cases when start is not in 1300 + * the first word, and start+nbits lands in the following word. 1301 + */ 1302 + #define TEST_BIT_LEN (1000) 1303 + 1304 + /* 1305 + * Helper function to test bitmap_write() overwriting the chosen byte pattern. 1306 + */ 1307 + static void __init test_bitmap_write_helper(const char *pattern) 1308 + { 1309 + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); 1310 + DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN); 1311 + DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN); 1312 + unsigned long w, r, bit; 1313 + int i, n, nbits; 1314 + 1315 + /* 1316 + * Only parse the pattern once and store the result in the intermediate 1317 + * bitmap. 1318 + */ 1319 + bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN); 1320 + 1321 + /* 1322 + * Check that writing a single bit does not accidentally touch the 1323 + * adjacent bits. 1324 + */ 1325 + for (i = 0; i < TEST_BIT_LEN; i++) { 1326 + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); 1327 + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); 1328 + for (bit = 0; bit <= 1; bit++) { 1329 + bitmap_write(bitmap, bit, i, 1); 1330 + __assign_bit(i, exp_bitmap, bit); 1331 + expect_eq_bitmap(exp_bitmap, bitmap, 1332 + TEST_BIT_LEN); 1333 + } 1334 + } 1335 + 1336 + /* Ensure writing 0 bits does not change anything. */ 1337 + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); 1338 + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); 1339 + for (i = 0; i < TEST_BIT_LEN; i++) { 1340 + bitmap_write(bitmap, ~0UL, i, 0); 1341 + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); 1342 + } 1343 + 1344 + for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) { 1345 + w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL 1346 + : 0xdeadbeefUL; 1347 + w >>= (BITS_PER_LONG - nbits); 1348 + for (i = 0; i <= TEST_BIT_LEN - nbits; i++) { 1349 + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); 1350 + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); 1351 + for (n = 0; n < nbits; n++) 1352 + __assign_bit(i + n, exp_bitmap, w & BIT(n)); 1353 + bitmap_write(bitmap, w, i, nbits); 1354 + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); 1355 + r = bitmap_read(bitmap, i, nbits); 1356 + expect_eq_ulong(r, w); 1357 + } 1358 + } 1359 + } 1360 + 1361 + static void __init test_bitmap_read_write(void) 1362 + { 1363 + unsigned char *pattern[3] = {"", "all:1/2", "all"}; 1364 + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); 1365 + unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG; 1366 + unsigned long val; 1367 + int i, pi; 1368 + 1369 + /* 1370 + * Reading/writing zero bits should not crash the kernel. 1371 + * READ_ONCE() prevents constant folding. 1372 + */ 1373 + bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits)); 1374 + /* Return value of bitmap_read() is undefined here. */ 1375 + bitmap_read(NULL, 0, READ_ONCE(zero_bits)); 1376 + 1377 + /* 1378 + * Reading/writing more than BITS_PER_LONG bits should not crash the 1379 + * kernel. READ_ONCE() prevents constant folding. 1380 + */ 1381 + bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1); 1382 + /* Return value of bitmap_read() is undefined here. */ 1383 + bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1); 1384 + 1385 + /* 1386 + * Ensure that bitmap_read() reads the same value that was previously 1387 + * written, and two consequent values are correctly merged. 1388 + * The resulting bit pattern is asymmetric to rule out possible issues 1389 + * with bit numeration order. 1390 + */ 1391 + for (i = 0; i < TEST_BIT_LEN - 7; i++) { 1392 + bitmap_zero(bitmap, TEST_BIT_LEN); 1393 + 1394 + bitmap_write(bitmap, 0b10101UL, i, 5); 1395 + val = bitmap_read(bitmap, i, 5); 1396 + expect_eq_ulong(0b10101UL, val); 1397 + 1398 + bitmap_write(bitmap, 0b101UL, i + 5, 3); 1399 + val = bitmap_read(bitmap, i + 5, 3); 1400 + expect_eq_ulong(0b101UL, val); 1401 + 1402 + val = bitmap_read(bitmap, i, 8); 1403 + expect_eq_ulong(0b10110101UL, val); 1404 + } 1405 + 1406 + for (pi = 0; pi < ARRAY_SIZE(pattern); pi++) 1407 + test_bitmap_write_helper(pattern[pi]); 1408 + } 1409 + 1410 + static void __init test_bitmap_read_perf(void) 1411 + { 1412 + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); 1413 + unsigned int cnt, nbits, i; 1414 + unsigned long val; 1415 + ktime_t time; 1416 + 1417 + bitmap_fill(bitmap, TEST_BIT_LEN); 1418 + time = ktime_get(); 1419 + for (cnt = 0; cnt < 5; cnt++) { 1420 + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { 1421 + for (i = 0; i < TEST_BIT_LEN; i++) { 1422 + if (i + nbits > TEST_BIT_LEN) 1423 + break; 1424 + /* 1425 + * Prevent the compiler from optimizing away the 1426 + * bitmap_read() by using its value. 1427 + */ 1428 + WRITE_ONCE(val, bitmap_read(bitmap, i, nbits)); 1429 + } 1430 + } 1431 + } 1432 + time = ktime_get() - time; 1433 + pr_info("Time spent in %s:\t%llu\n", __func__, time); 1434 + } 1435 + 1436 + static void __init test_bitmap_write_perf(void) 1437 + { 1438 + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); 1439 + unsigned int cnt, nbits, i; 1440 + unsigned long val = 0xfeedface; 1441 + ktime_t time; 1442 + 1443 + bitmap_zero(bitmap, TEST_BIT_LEN); 1444 + time = ktime_get(); 1445 + for (cnt = 0; cnt < 5; cnt++) { 1446 + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { 1447 + for (i = 0; i < TEST_BIT_LEN; i++) { 1448 + if (i + nbits > TEST_BIT_LEN) 1449 + break; 1450 + bitmap_write(bitmap, val, i, nbits); 1451 + } 1452 + } 1453 + } 1454 + time = ktime_get() - time; 1455 + pr_info("Time spent in %s:\t%llu\n", __func__, time); 1456 + } 1457 + 1458 + #undef TEST_BIT_LEN 1281 1459 1282 1460 static void __init selftest(void) 1283 1461 { ··· 1467 1303 test_bitmap_cut(); 1468 1304 test_bitmap_print_buf(); 1469 1305 test_bitmap_const_eval(); 1306 + test_bitmap_read_write(); 1307 + test_bitmap_read_perf(); 1308 + test_bitmap_write_perf(); 1470 1309 1471 1310 test_find_nth_bit(); 1472 1311 test_for_each_set_bit();
+6 -3
net/bridge/br_vlan_tunnel.c
··· 65 65 { 66 66 struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst); 67 67 __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id)); 68 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 68 69 int err; 69 70 70 71 if (metadata) 71 72 return -EEXIST; 72 73 73 - metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY, 74 - key, 0); 74 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 75 + metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, key, 0); 75 76 if (!metadata) 76 77 return -EINVAL; 77 78 ··· 186 185 int br_handle_egress_vlan_tunnel(struct sk_buff *skb, 187 186 struct net_bridge_vlan *vlan) 188 187 { 188 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 189 189 struct metadata_dst *tunnel_dst; 190 190 __be64 tunnel_id; 191 191 int err; ··· 204 202 return err; 205 203 206 204 if (BR_INPUT_SKB_CB(skb)->backup_nhid) { 207 - tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY, 205 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 206 + tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, 208 207 tunnel_id, 0); 209 208 if (!tunnel_dst) 210 209 return -ENOMEM;
+1 -1
net/core/Makefile
··· 41 41 obj-$(CONFIG_BPF_SYSCALL) += sock_map.o 42 42 obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o 43 43 obj-$(CONFIG_OF) += of_net.o 44 - obj-$(CONFIG_NET_TEST) += gso_test.o 44 + obj-$(CONFIG_NET_TEST) += net_test.o
+14 -12
net/core/filter.c
··· 4662 4662 to->tunnel_tos = info->key.tos; 4663 4663 to->tunnel_ttl = info->key.ttl; 4664 4664 if (flags & BPF_F_TUNINFO_FLAGS) 4665 - to->tunnel_flags = info->key.tun_flags; 4665 + to->tunnel_flags = ip_tunnel_flags_to_be16(info->key.tun_flags); 4666 4666 else 4667 4667 to->tunnel_ext = 0; 4668 4668 ··· 4705 4705 int err; 4706 4706 4707 4707 if (unlikely(!info || 4708 - !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { 4708 + !ip_tunnel_is_options_present(info->key.tun_flags))) { 4709 4709 err = -ENOENT; 4710 4710 goto err_clear; 4711 4711 } ··· 4775 4775 memset(info, 0, sizeof(*info)); 4776 4776 info->mode = IP_TUNNEL_INFO_TX; 4777 4777 4778 - info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 4779 - if (flags & BPF_F_DONT_FRAGMENT) 4780 - info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; 4781 - if (flags & BPF_F_ZERO_CSUM_TX) 4782 - info->key.tun_flags &= ~TUNNEL_CSUM; 4783 - if (flags & BPF_F_SEQ_NUMBER) 4784 - info->key.tun_flags |= TUNNEL_SEQ; 4785 - if (flags & BPF_F_NO_TUNNEL_KEY) 4786 - info->key.tun_flags &= ~TUNNEL_KEY; 4778 + __set_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); 4779 + __assign_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags, 4780 + flags & BPF_F_DONT_FRAGMENT); 4781 + __assign_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags, 4782 + !(flags & BPF_F_ZERO_CSUM_TX)); 4783 + __assign_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags, 4784 + flags & BPF_F_SEQ_NUMBER); 4785 + __assign_bit(IP_TUNNEL_KEY_BIT, info->key.tun_flags, 4786 + !(flags & BPF_F_NO_TUNNEL_KEY)); 4787 4787 4788 4788 info->key.tun_id = cpu_to_be64(from->tunnel_id); 4789 4789 info->key.tos = from->tunnel_tos; ··· 4821 4821 { 4822 4822 struct ip_tunnel_info *info = skb_tunnel_info(skb); 4823 4823 const struct metadata_dst *md = this_cpu_ptr(md_dst); 4824 + IP_TUNNEL_DECLARE_FLAGS(present) = { }; 4824 4825 4825 4826 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) 4826 4827 return -EINVAL; 4827 4828 if (unlikely(size > IP_TUNNEL_OPTS_MAX)) 4828 4829 return -ENOMEM; 4829 4830 4830 - ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); 4831 + ip_tunnel_set_options_present(present); 4832 + ip_tunnel_info_opts_set(info, from, size, present); 4831 4833 4832 4834 return 0; 4833 4835 }
+14 -6
net/core/flow_dissector.c
··· 455 455 456 456 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) { 457 457 struct flow_dissector_key_enc_opts *enc_opt; 458 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 459 + u32 val; 458 460 459 461 enc_opt = skb_flow_dissector_target(flow_dissector, 460 462 FLOW_DISSECTOR_KEY_ENC_OPTS, 461 463 target_container); 462 464 463 - if (info->options_len) { 464 - enc_opt->len = info->options_len; 465 - ip_tunnel_info_opts_get(enc_opt->data, info); 466 - enc_opt->dst_opt_type = info->key.tun_flags & 467 - TUNNEL_OPTIONS_PRESENT; 468 - } 465 + if (!info->options_len) 466 + return; 467 + 468 + enc_opt->len = info->options_len; 469 + ip_tunnel_info_opts_get(enc_opt->data, info); 470 + 471 + ip_tunnel_set_options_present(flags); 472 + ip_tunnel_flags_and(flags, info->key.tun_flags, flags); 473 + 474 + val = find_next_bit(flags, __IP_TUNNEL_FLAG_NUM, 475 + IP_TUNNEL_GENEVE_OPT_BIT); 476 + enc_opt->dst_opt_type = val < __IP_TUNNEL_FLAG_NUM ? val : 0; 469 477 } 470 478 } 471 479 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
+121 -8
net/core/gso_test.c net/core/net_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 3 #include <kunit/test.h> 4 + 5 + /* GSO */ 6 + 4 7 #include <linux/skbuff.h> 5 8 6 9 static const char hdr[] = "abcdefgh"; ··· 261 258 consume_skb(skb); 262 259 } 263 260 264 - static struct kunit_case gso_test_cases[] = { 261 + /* IP tunnel flags */ 262 + 263 + #include <net/ip_tunnels.h> 264 + 265 + struct ip_tunnel_flags_test { 266 + const char *name; 267 + 268 + const u16 *src_bits; 269 + const u16 *exp_bits; 270 + u8 src_num; 271 + u8 exp_num; 272 + 273 + __be16 exp_val; 274 + bool exp_comp; 275 + }; 276 + 277 + #define IP_TUNNEL_FLAGS_TEST(n, src, comp, eval, exp) { \ 278 + .name = (n), \ 279 + .src_bits = (src), \ 280 + .src_num = ARRAY_SIZE(src), \ 281 + .exp_comp = (comp), \ 282 + .exp_val = (eval), \ 283 + .exp_bits = (exp), \ 284 + .exp_num = ARRAY_SIZE(exp), \ 285 + } 286 + 287 + /* These are __be16-compatible and can be compared as is */ 288 + static const u16 ip_tunnel_flags_1[] = { 289 + IP_TUNNEL_KEY_BIT, 290 + IP_TUNNEL_STRICT_BIT, 291 + IP_TUNNEL_ERSPAN_OPT_BIT, 292 + }; 293 + 294 + /* Due to the previous flags design limitation, setting either 295 + * ``IP_TUNNEL_CSUM_BIT`` (on Big Endian) or ``IP_TUNNEL_DONT_FRAGMENT_BIT`` 296 + * (on Little) also sets VTI/ISATAP bit. In the bitmap implementation, they 297 + * correspond to ``BIT(16)``, which is bigger than ``U16_MAX``, but still is 298 + * backward-compatible. 299 + */ 300 + #ifdef __LITTLE_ENDIAN 301 + #define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_DONT_FRAGMENT_BIT 302 + #else 303 + #define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_CSUM_BIT 304 + #endif 305 + 306 + static const u16 ip_tunnel_flags_2_src[] = { 307 + IP_TUNNEL_CONFLICT_BIT, 308 + }; 309 + 310 + static const u16 ip_tunnel_flags_2_exp[] = { 311 + IP_TUNNEL_CONFLICT_BIT, 312 + IP_TUNNEL_SIT_ISATAP_BIT, 313 + }; 314 + 315 + /* Bits 17 and higher are not compatible with __be16 flags */ 316 + static const u16 ip_tunnel_flags_3_src[] = { 317 + IP_TUNNEL_VXLAN_OPT_BIT, 318 + 17, 319 + 18, 320 + 20, 321 + }; 322 + 323 + static const u16 ip_tunnel_flags_3_exp[] = { 324 + IP_TUNNEL_VXLAN_OPT_BIT, 325 + }; 326 + 327 + static const struct ip_tunnel_flags_test ip_tunnel_flags_test[] = { 328 + IP_TUNNEL_FLAGS_TEST("compat", ip_tunnel_flags_1, true, 329 + cpu_to_be16(BIT(IP_TUNNEL_KEY_BIT) | 330 + BIT(IP_TUNNEL_STRICT_BIT) | 331 + BIT(IP_TUNNEL_ERSPAN_OPT_BIT)), 332 + ip_tunnel_flags_1), 333 + IP_TUNNEL_FLAGS_TEST("conflict", ip_tunnel_flags_2_src, true, 334 + VTI_ISVTI, ip_tunnel_flags_2_exp), 335 + IP_TUNNEL_FLAGS_TEST("new", ip_tunnel_flags_3_src, false, 336 + cpu_to_be16(BIT(IP_TUNNEL_VXLAN_OPT_BIT)), 337 + ip_tunnel_flags_3_exp), 338 + }; 339 + 340 + static void 341 + ip_tunnel_flags_test_case_to_desc(const struct ip_tunnel_flags_test *t, 342 + char *desc) 343 + { 344 + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); 345 + } 346 + KUNIT_ARRAY_PARAM(ip_tunnel_flags_test, ip_tunnel_flags_test, 347 + ip_tunnel_flags_test_case_to_desc); 348 + 349 + static void ip_tunnel_flags_test_run(struct kunit *test) 350 + { 351 + const struct ip_tunnel_flags_test *t = test->param_value; 352 + IP_TUNNEL_DECLARE_FLAGS(src) = { }; 353 + IP_TUNNEL_DECLARE_FLAGS(exp) = { }; 354 + IP_TUNNEL_DECLARE_FLAGS(out); 355 + 356 + for (u32 j = 0; j < t->src_num; j++) 357 + __set_bit(t->src_bits[j], src); 358 + for (u32 j = 0; j < t->exp_num; j++) 359 + __set_bit(t->exp_bits[j], exp); 360 + 361 + KUNIT_ASSERT_EQ(test, t->exp_comp, 362 + ip_tunnel_flags_is_be16_compat(src)); 363 + KUNIT_ASSERT_EQ(test, (__force u16)t->exp_val, 364 + (__force u16)ip_tunnel_flags_to_be16(src)); 365 + 366 + ip_tunnel_flags_from_be16(out, t->exp_val); 367 + KUNIT_ASSERT_TRUE(test, __ipt_flag_op(bitmap_equal, exp, out)); 368 + } 369 + 370 + static struct kunit_case net_test_cases[] = { 265 371 KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params), 266 - {} 372 + KUNIT_CASE_PARAM(ip_tunnel_flags_test_run, 373 + ip_tunnel_flags_test_gen_params), 374 + { }, 267 375 }; 268 376 269 - static struct kunit_suite gso_test_suite = { 270 - .name = "net_core_gso", 271 - .test_cases = gso_test_cases, 377 + static struct kunit_suite net_test_suite = { 378 + .name = "net_core", 379 + .test_cases = net_test_cases, 272 380 }; 381 + kunit_test_suite(net_test_suite); 273 382 274 - kunit_test_suite(gso_test_suite); 275 - 383 + MODULE_DESCRIPTION("KUnit tests for networking core"); 276 384 MODULE_LICENSE("GPL"); 277 - MODULE_DESCRIPTION("KUnit tests for segmentation offload");
+1 -1
net/ipv4/fou_bpf.c
··· 64 64 info->encap.type = TUNNEL_ENCAP_NONE; 65 65 } 66 66 67 - if (info->key.tun_flags & TUNNEL_CSUM) 67 + if (test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)) 68 68 info->encap.flags |= TUNNEL_ENCAP_FLAG_CSUM; 69 69 70 70 info->encap.sport = encap->sport;
+1 -1
net/ipv4/gre_demux.c
··· 73 73 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) 74 74 return -EINVAL; 75 75 76 - tpi->flags = gre_flags_to_tnl_flags(greh->flags); 76 + gre_flags_to_tnl_flags(tpi->flags, greh->flags); 77 77 hdr_len = gre_calc_hlen(tpi->flags); 78 78 79 79 if (!pskb_may_pull(skb, nhs + hdr_len))
+87 -57
net/ipv4/ip_gre.c
··· 265 265 struct net *net = dev_net(skb->dev); 266 266 struct metadata_dst *tun_dst = NULL; 267 267 struct erspan_base_hdr *ershdr; 268 + IP_TUNNEL_DECLARE_FLAGS(flags); 268 269 struct ip_tunnel_net *itn; 269 270 struct ip_tunnel *tunnel; 270 271 const struct iphdr *iph; ··· 273 272 int ver; 274 273 int len; 275 274 275 + ip_tunnel_flags_copy(flags, tpi->flags); 276 + 276 277 itn = net_generic(net, erspan_net_id); 277 278 iph = ip_hdr(skb); 278 279 if (is_erspan_type1(gre_hdr_len)) { 279 280 ver = 0; 280 - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 281 - tpi->flags | TUNNEL_NO_KEY, 281 + __set_bit(IP_TUNNEL_NO_KEY_BIT, flags); 282 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, 282 283 iph->saddr, iph->daddr, 0); 283 284 } else { 284 285 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 285 286 ver = ershdr->ver; 286 - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 287 - tpi->flags | TUNNEL_KEY, 287 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 288 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, 288 289 iph->saddr, iph->daddr, tpi->key); 289 290 } 290 291 ··· 310 307 struct ip_tunnel_info *info; 311 308 unsigned char *gh; 312 309 __be64 tun_id; 313 - __be16 flags; 314 310 315 - tpi->flags |= TUNNEL_KEY; 316 - flags = tpi->flags; 311 + __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags); 312 + ip_tunnel_flags_copy(flags, tpi->flags); 317 313 tun_id = key32_to_tunnel_id(tpi->key); 318 314 319 315 tun_dst = ip_tun_rx_dst(skb, flags, ··· 335 333 ERSPAN_V2_MDSIZE); 336 334 337 335 info = &tun_dst->u.tun_info; 338 - info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 336 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 337 + info->key.tun_flags); 339 338 info->options_len = sizeof(*md); 340 339 } 341 340 ··· 379 376 380 377 tnl_params = &tunnel->parms.iph; 381 378 if (tunnel->collect_md || tnl_params->daddr == 0) { 382 - __be16 flags; 379 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 383 380 __be64 tun_id; 384 381 385 - flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); 382 + __set_bit(IP_TUNNEL_CSUM_BIT, flags); 383 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 384 + ip_tunnel_flags_and(flags, tpi->flags, flags); 385 + 386 386 tun_id = key32_to_tunnel_id(tpi->key); 387 387 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0); 388 388 if (!tun_dst) ··· 465 459 __be16 proto) 466 460 { 467 461 struct ip_tunnel *tunnel = netdev_priv(dev); 468 - __be16 flags = tunnel->parms.o_flags; 462 + IP_TUNNEL_DECLARE_FLAGS(flags); 463 + 464 + ip_tunnel_flags_copy(flags, tunnel->parms.o_flags); 469 465 470 466 /* Push GRE header. */ 471 467 gre_build_header(skb, tunnel->tun_hlen, 472 468 flags, proto, tunnel->parms.o_key, 473 - (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); 469 + test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 470 + htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); 474 471 475 472 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 476 473 } ··· 487 478 __be16 proto) 488 479 { 489 480 struct ip_tunnel *tunnel = netdev_priv(dev); 481 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 490 482 struct ip_tunnel_info *tun_info; 491 483 const struct ip_tunnel_key *key; 492 484 int tunnel_hlen; 493 - __be16 flags; 494 485 495 486 tun_info = skb_tunnel_info(skb); 496 487 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || ··· 504 495 goto err_free_skb; 505 496 506 497 /* Push Tunnel header. */ 507 - if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM))) 498 + if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 499 + tunnel->parms.o_flags))) 508 500 goto err_free_skb; 509 501 510 - flags = tun_info->key.tun_flags & 511 - (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); 502 + __set_bit(IP_TUNNEL_CSUM_BIT, flags); 503 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 504 + __set_bit(IP_TUNNEL_SEQ_BIT, flags); 505 + ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags); 506 + 512 507 gre_build_header(skb, tunnel_hlen, flags, proto, 513 508 tunnel_id_to_key32(tun_info->key.tun_id), 514 - (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); 509 + test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 510 + htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); 515 511 516 512 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); 517 513 ··· 530 516 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) 531 517 { 532 518 struct ip_tunnel *tunnel = netdev_priv(dev); 519 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 533 520 struct ip_tunnel_info *tun_info; 534 521 const struct ip_tunnel_key *key; 535 522 struct erspan_metadata *md; ··· 546 531 goto err_free_skb; 547 532 548 533 key = &tun_info->key; 549 - if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 534 + if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags)) 550 535 goto err_free_skb; 551 536 if (tun_info->options_len < sizeof(*md)) 552 537 goto err_free_skb; ··· 599 584 goto err_free_skb; 600 585 } 601 586 602 - gre_build_header(skb, 8, TUNNEL_SEQ, 603 - proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno))); 587 + __set_bit(IP_TUNNEL_SEQ_BIT, flags); 588 + gre_build_header(skb, 8, flags, proto, 0, 589 + htonl(atomic_fetch_inc(&tunnel->o_seqno))); 604 590 605 591 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); 606 592 ··· 675 659 tnl_params = &tunnel->parms.iph; 676 660 } 677 661 678 - if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM))) 662 + if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 663 + tunnel->parms.o_flags))) 679 664 goto free_skb; 680 665 681 666 __gre_xmit(skb, dev, tnl_params, skb->protocol); ··· 718 701 /* Push ERSPAN header */ 719 702 if (tunnel->erspan_ver == 0) { 720 703 proto = htons(ETH_P_ERSPAN); 721 - tunnel->parms.o_flags &= ~TUNNEL_SEQ; 704 + __clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags); 722 705 } else if (tunnel->erspan_ver == 1) { 723 706 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 724 707 tunnel->index, ··· 733 716 goto free_skb; 734 717 } 735 718 736 - tunnel->parms.o_flags &= ~TUNNEL_KEY; 719 + __clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags); 737 720 __gre_xmit(skb, dev, &tunnel->parms.iph, proto); 738 721 return NETDEV_TX_OK; 739 722 ··· 756 739 return NETDEV_TX_OK; 757 740 } 758 741 759 - if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM))) 742 + if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 743 + tunnel->parms.o_flags))) 760 744 goto free_skb; 761 745 762 746 if (skb_cow_head(skb, dev->needed_headroom)) ··· 775 757 static void ipgre_link_update(struct net_device *dev, bool set_mtu) 776 758 { 777 759 struct ip_tunnel *tunnel = netdev_priv(dev); 778 - __be16 flags; 779 760 int len; 780 761 781 762 len = tunnel->tun_hlen; ··· 790 773 if (set_mtu) 791 774 dev->mtu = max_t(int, dev->mtu - len, 68); 792 775 793 - flags = tunnel->parms.o_flags; 794 - 795 - if (flags & TUNNEL_SEQ || 796 - (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) { 776 + if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) || 777 + (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) && 778 + tunnel->encap.type != TUNNEL_ENCAP_NONE)) { 797 779 dev->features &= ~NETIF_F_GSO_SOFTWARE; 798 780 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE; 799 781 } else { ··· 801 785 } 802 786 } 803 787 804 - static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, 788 + static int ipgre_tunnel_ctl(struct net_device *dev, 789 + struct ip_tunnel_parm_kern *p, 805 790 int cmd) 806 791 { 792 + __be16 i_flags, o_flags; 807 793 int err; 794 + 795 + if (!ip_tunnel_flags_is_be16_compat(p->i_flags) || 796 + !ip_tunnel_flags_is_be16_compat(p->o_flags)) 797 + return -EOVERFLOW; 798 + 799 + i_flags = ip_tunnel_flags_to_be16(p->i_flags); 800 + o_flags = ip_tunnel_flags_to_be16(p->o_flags); 808 801 809 802 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { 810 803 if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE || 811 804 p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) || 812 - ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING))) 805 + ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING))) 813 806 return -EINVAL; 814 807 } 815 808 816 - p->i_flags = gre_flags_to_tnl_flags(p->i_flags); 817 - p->o_flags = gre_flags_to_tnl_flags(p->o_flags); 809 + gre_flags_to_tnl_flags(p->i_flags, i_flags); 810 + gre_flags_to_tnl_flags(p->o_flags, o_flags); 818 811 819 812 err = ip_tunnel_ctl(dev, p, cmd); 820 813 if (err) ··· 832 807 if (cmd == SIOCCHGTUNNEL) { 833 808 struct ip_tunnel *t = netdev_priv(dev); 834 809 835 - t->parms.i_flags = p->i_flags; 836 - t->parms.o_flags = p->o_flags; 810 + ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags); 811 + ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags); 837 812 838 813 if (strcmp(dev->rtnl_link_ops->kind, "erspan")) 839 814 ipgre_link_update(dev, true); 840 815 } 841 816 842 - p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 843 - p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 817 + i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 818 + ip_tunnel_flags_from_be16(p->i_flags, i_flags); 819 + o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 820 + ip_tunnel_flags_from_be16(p->o_flags, o_flags); 821 + 844 822 return 0; 845 823 } 846 824 ··· 983 955 static void __gre_tunnel_init(struct net_device *dev) 984 956 { 985 957 struct ip_tunnel *tunnel; 986 - __be16 flags; 987 958 988 959 tunnel = netdev_priv(dev); 989 960 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); ··· 994 967 dev->features |= GRE_FEATURES | NETIF_F_LLTX; 995 968 dev->hw_features |= GRE_FEATURES; 996 969 997 - flags = tunnel->parms.o_flags; 998 - 999 970 /* TCP offload with GRE SEQ is not supported, nor can we support 2 1000 971 * levels of outer headers requiring an update. 1001 972 */ 1002 - if (flags & TUNNEL_SEQ) 973 + if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags)) 1003 974 return; 1004 - if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE) 975 + if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) && 976 + tunnel->encap.type != TUNNEL_ENCAP_NONE) 1005 977 return; 1006 978 1007 979 dev->features |= NETIF_F_GSO_SOFTWARE; ··· 1157 1131 static int ipgre_netlink_parms(struct net_device *dev, 1158 1132 struct nlattr *data[], 1159 1133 struct nlattr *tb[], 1160 - struct ip_tunnel_parm *parms, 1134 + struct ip_tunnel_parm_kern *parms, 1161 1135 __u32 *fwmark) 1162 1136 { 1163 1137 struct ip_tunnel *t = netdev_priv(dev); ··· 1173 1147 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1174 1148 1175 1149 if (data[IFLA_GRE_IFLAGS]) 1176 - parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS])); 1150 + gre_flags_to_tnl_flags(parms->i_flags, 1151 + nla_get_be16(data[IFLA_GRE_IFLAGS])); 1177 1152 1178 1153 if (data[IFLA_GRE_OFLAGS]) 1179 - parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS])); 1154 + gre_flags_to_tnl_flags(parms->o_flags, 1155 + nla_get_be16(data[IFLA_GRE_OFLAGS])); 1180 1156 1181 1157 if (data[IFLA_GRE_IKEY]) 1182 1158 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); ··· 1226 1198 static int erspan_netlink_parms(struct net_device *dev, 1227 1199 struct nlattr *data[], 1228 1200 struct nlattr *tb[], 1229 - struct ip_tunnel_parm *parms, 1201 + struct ip_tunnel_parm_kern *parms, 1230 1202 __u32 *fwmark) 1231 1203 { 1232 1204 struct ip_tunnel *t = netdev_priv(dev); ··· 1385 1357 struct nlattr *tb[], struct nlattr *data[], 1386 1358 struct netlink_ext_ack *extack) 1387 1359 { 1388 - struct ip_tunnel_parm p; 1360 + struct ip_tunnel_parm_kern p; 1389 1361 __u32 fwmark = 0; 1390 1362 int err; 1391 1363 ··· 1403 1375 struct nlattr *tb[], struct nlattr *data[], 1404 1376 struct netlink_ext_ack *extack) 1405 1377 { 1406 - struct ip_tunnel_parm p; 1378 + struct ip_tunnel_parm_kern p; 1407 1379 __u32 fwmark = 0; 1408 1380 int err; 1409 1381 ··· 1422 1394 struct netlink_ext_ack *extack) 1423 1395 { 1424 1396 struct ip_tunnel *t = netdev_priv(dev); 1397 + struct ip_tunnel_parm_kern p; 1425 1398 __u32 fwmark = t->fwmark; 1426 - struct ip_tunnel_parm p; 1427 1399 int err; 1428 1400 1429 1401 err = ipgre_newlink_encap_setup(dev, data); ··· 1438 1410 if (err < 0) 1439 1411 return err; 1440 1412 1441 - t->parms.i_flags = p.i_flags; 1442 - t->parms.o_flags = p.o_flags; 1413 + ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags); 1414 + ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags); 1443 1415 1444 1416 ipgre_link_update(dev, !tb[IFLA_MTU]); 1445 1417 ··· 1451 1423 struct netlink_ext_ack *extack) 1452 1424 { 1453 1425 struct ip_tunnel *t = netdev_priv(dev); 1426 + struct ip_tunnel_parm_kern p; 1454 1427 __u32 fwmark = t->fwmark; 1455 - struct ip_tunnel_parm p; 1456 1428 int err; 1457 1429 1458 1430 err = ipgre_newlink_encap_setup(dev, data); ··· 1467 1439 if (err < 0) 1468 1440 return err; 1469 1441 1470 - t->parms.i_flags = p.i_flags; 1471 - t->parms.o_flags = p.o_flags; 1442 + ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags); 1443 + ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags); 1472 1444 1473 1445 return 0; 1474 1446 } ··· 1524 1496 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) 1525 1497 { 1526 1498 struct ip_tunnel *t = netdev_priv(dev); 1527 - struct ip_tunnel_parm *p = &t->parms; 1528 - __be16 o_flags = p->o_flags; 1499 + struct ip_tunnel_parm_kern *p = &t->parms; 1500 + IP_TUNNEL_DECLARE_FLAGS(o_flags); 1501 + 1502 + ip_tunnel_flags_copy(o_flags, p->o_flags); 1529 1503 1530 1504 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1531 1505 nla_put_be16(skb, IFLA_GRE_IFLAGS, ··· 1575 1545 1576 1546 if (t->erspan_ver <= 2) { 1577 1547 if (t->erspan_ver != 0 && !t->collect_md) 1578 - t->parms.o_flags |= TUNNEL_KEY; 1548 + __set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags); 1579 1549 1580 1550 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) 1581 1551 goto nla_put_failure;
+75 -34
net/ipv4/ip_tunnel.c
··· 56 56 IP_TNL_HASH_BITS); 57 57 } 58 58 59 - static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, 60 - __be16 flags, __be32 key) 59 + static bool ip_tunnel_key_match(const struct ip_tunnel_parm_kern *p, 60 + const unsigned long *flags, __be32 key) 61 61 { 62 - if (p->i_flags & TUNNEL_KEY) { 63 - if (flags & TUNNEL_KEY) 64 - return key == p->i_key; 65 - else 66 - /* key expected, none present */ 67 - return false; 68 - } else 69 - return !(flags & TUNNEL_KEY); 62 + if (!test_bit(IP_TUNNEL_KEY_BIT, flags)) 63 + return !test_bit(IP_TUNNEL_KEY_BIT, p->i_flags); 64 + 65 + return test_bit(IP_TUNNEL_KEY_BIT, p->i_flags) && p->i_key == key; 70 66 } 71 67 72 68 /* Fallback tunnel: no source, no destination, no key, no options ··· 77 81 Given src, dst and key, find appropriate for input tunnel. 78 82 */ 79 83 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, 80 - int link, __be16 flags, 84 + int link, const unsigned long *flags, 81 85 __be32 remote, __be32 local, 82 86 __be32 key) 83 87 { ··· 139 143 } 140 144 141 145 hlist_for_each_entry_rcu(t, head, hash_node) { 142 - if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) || 146 + if ((!test_bit(IP_TUNNEL_NO_KEY_BIT, flags) && 147 + t->parms.i_key != key) || 143 148 t->parms.iph.saddr != 0 || 144 149 t->parms.iph.daddr != 0 || 145 150 !(t->dev->flags & IFF_UP)) ··· 168 171 EXPORT_SYMBOL_GPL(ip_tunnel_lookup); 169 172 170 173 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn, 171 - struct ip_tunnel_parm *parms) 174 + struct ip_tunnel_parm_kern *parms) 172 175 { 173 176 unsigned int h; 174 177 __be32 remote; ··· 179 182 else 180 183 remote = 0; 181 184 182 - if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI)) 185 + if (!test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags) && 186 + test_bit(IP_TUNNEL_VTI_BIT, parms->i_flags)) 183 187 i_key = 0; 184 188 185 189 h = ip_tunnel_hash(i_key, remote); ··· 204 206 } 205 207 206 208 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn, 207 - struct ip_tunnel_parm *parms, 209 + struct ip_tunnel_parm_kern *parms, 208 210 int type) 209 211 { 210 212 __be32 remote = parms->iph.daddr; 211 213 __be32 local = parms->iph.saddr; 214 + IP_TUNNEL_DECLARE_FLAGS(flags); 212 215 __be32 key = parms->i_key; 213 - __be16 flags = parms->i_flags; 214 216 int link = parms->link; 215 217 struct ip_tunnel *t = NULL; 216 218 struct hlist_head *head = ip_bucket(itn, parms); 219 + 220 + ip_tunnel_flags_copy(flags, parms->i_flags); 217 221 218 222 hlist_for_each_entry_rcu(t, head, hash_node) { 219 223 if (local == t->parms.iph.saddr && ··· 230 230 231 231 static struct net_device *__ip_tunnel_create(struct net *net, 232 232 const struct rtnl_link_ops *ops, 233 - struct ip_tunnel_parm *parms) 233 + struct ip_tunnel_parm_kern *parms) 234 234 { 235 235 int err; 236 236 struct ip_tunnel *tunnel; ··· 326 326 327 327 static struct ip_tunnel *ip_tunnel_create(struct net *net, 328 328 struct ip_tunnel_net *itn, 329 - struct ip_tunnel_parm *parms) 329 + struct ip_tunnel_parm_kern *parms) 330 330 { 331 331 struct ip_tunnel *nt; 332 332 struct net_device *dev; ··· 386 386 } 387 387 #endif 388 388 389 - if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || 390 - ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { 389 + if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) != 390 + test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) { 391 391 DEV_STATS_INC(tunnel->dev, rx_crc_errors); 392 392 DEV_STATS_INC(tunnel->dev, rx_errors); 393 393 goto drop; 394 394 } 395 395 396 - if (tunnel->parms.i_flags&TUNNEL_SEQ) { 397 - if (!(tpi->flags&TUNNEL_SEQ) || 396 + if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) { 397 + if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) || 398 398 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 399 399 DEV_STATS_INC(tunnel->dev, rx_fifo_errors); 400 400 DEV_STATS_INC(tunnel->dev, rx_errors); ··· 638 638 goto tx_error; 639 639 } 640 640 641 - if (key->tun_flags & TUNNEL_DONT_FRAGMENT) 641 + if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags)) 642 642 df = htons(IP_DF); 643 643 if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen, 644 644 key->u.ipv4.dst, true)) { ··· 871 871 static void ip_tunnel_update(struct ip_tunnel_net *itn, 872 872 struct ip_tunnel *t, 873 873 struct net_device *dev, 874 - struct ip_tunnel_parm *p, 874 + struct ip_tunnel_parm_kern *p, 875 875 bool set_mtu, 876 876 __u32 fwmark) 877 877 { ··· 903 903 netdev_state_change(dev); 904 904 } 905 905 906 - int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) 906 + int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, 907 + int cmd) 907 908 { 908 909 int err = 0; 909 910 struct ip_tunnel *t = netdev_priv(dev); ··· 928 927 goto done; 929 928 if (p->iph.ttl) 930 929 p->iph.frag_off |= htons(IP_DF); 931 - if (!(p->i_flags & VTI_ISVTI)) { 932 - if (!(p->i_flags & TUNNEL_KEY)) 930 + if (!test_bit(IP_TUNNEL_VTI_BIT, p->i_flags)) { 931 + if (!test_bit(IP_TUNNEL_KEY_BIT, p->i_flags)) 933 932 p->i_key = 0; 934 - if (!(p->o_flags & TUNNEL_KEY)) 933 + if (!test_bit(IP_TUNNEL_KEY_BIT, p->o_flags)) 935 934 p->o_key = 0; 936 935 } 937 936 ··· 1006 1005 } 1007 1006 EXPORT_SYMBOL_GPL(ip_tunnel_ctl); 1008 1007 1008 + bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, 1009 + const void __user *data) 1010 + { 1011 + struct ip_tunnel_parm p; 1012 + 1013 + if (copy_from_user(&p, data, sizeof(p))) 1014 + return false; 1015 + 1016 + strscpy(kp->name, p.name); 1017 + kp->link = p.link; 1018 + ip_tunnel_flags_from_be16(kp->i_flags, p.i_flags); 1019 + ip_tunnel_flags_from_be16(kp->o_flags, p.o_flags); 1020 + kp->i_key = p.i_key; 1021 + kp->o_key = p.o_key; 1022 + memcpy(&kp->iph, &p.iph, min(sizeof(kp->iph), sizeof(p.iph))); 1023 + 1024 + return true; 1025 + } 1026 + EXPORT_SYMBOL_GPL(ip_tunnel_parm_from_user); 1027 + 1028 + bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp) 1029 + { 1030 + struct ip_tunnel_parm p; 1031 + 1032 + if (!ip_tunnel_flags_is_be16_compat(kp->i_flags) || 1033 + !ip_tunnel_flags_is_be16_compat(kp->o_flags)) 1034 + return false; 1035 + 1036 + strscpy(p.name, kp->name); 1037 + p.link = kp->link; 1038 + p.i_flags = ip_tunnel_flags_to_be16(kp->i_flags); 1039 + p.o_flags = ip_tunnel_flags_to_be16(kp->o_flags); 1040 + p.i_key = kp->i_key; 1041 + p.o_key = kp->o_key; 1042 + memcpy(&p.iph, &kp->iph, min(sizeof(p.iph), sizeof(kp->iph))); 1043 + 1044 + return !copy_to_user(data, &p, sizeof(p)); 1045 + } 1046 + EXPORT_SYMBOL_GPL(ip_tunnel_parm_to_user); 1047 + 1009 1048 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 1010 1049 void __user *data, int cmd) 1011 1050 { 1012 - struct ip_tunnel_parm p; 1051 + struct ip_tunnel_parm_kern p; 1013 1052 int err; 1014 1053 1015 - if (copy_from_user(&p, data, sizeof(p))) 1054 + if (!ip_tunnel_parm_from_user(&p, data)) 1016 1055 return -EFAULT; 1017 1056 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd); 1018 - if (!err && copy_to_user(data, &p, sizeof(p))) 1057 + if (!err && !ip_tunnel_parm_to_user(data, &p)) 1019 1058 return -EFAULT; 1020 1059 return err; 1021 1060 } ··· 1134 1093 struct rtnl_link_ops *ops, char *devname) 1135 1094 { 1136 1095 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); 1137 - struct ip_tunnel_parm parms; 1096 + struct ip_tunnel_parm_kern parms; 1138 1097 unsigned int i; 1139 1098 1140 1099 itn->rtnl_link_ops = ops; ··· 1212 1171 EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets); 1213 1172 1214 1173 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 1215 - struct ip_tunnel_parm *p, __u32 fwmark) 1174 + struct ip_tunnel_parm_kern *p, __u32 fwmark) 1216 1175 { 1217 1176 struct ip_tunnel *nt; 1218 1177 struct net *net = dev_net(dev); ··· 1266 1225 EXPORT_SYMBOL_GPL(ip_tunnel_newlink); 1267 1226 1268 1227 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], 1269 - struct ip_tunnel_parm *p, __u32 fwmark) 1228 + struct ip_tunnel_parm_kern *p, __u32 fwmark) 1270 1229 { 1271 1230 struct ip_tunnel *t; 1272 1231 struct ip_tunnel *tunnel = netdev_priv(dev);
+51 -31
net/ipv4/ip_tunnel_core.c
··· 125 125 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, 126 126 gfp_t flags) 127 127 { 128 + IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { }; 128 129 struct metadata_dst *res; 129 130 struct ip_tunnel_info *dst, *src; 130 131 ··· 145 144 sizeof(struct in6_addr)); 146 145 else 147 146 dst->key.u.ipv4.dst = src->key.u.ipv4.src; 148 - dst->key.tun_flags = src->key.tun_flags; 147 + ip_tunnel_flags_copy(dst->key.tun_flags, src->key.tun_flags); 149 148 dst->mode = src->mode | IP_TUNNEL_INFO_TX; 150 149 ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src), 151 - src->options_len, 0); 150 + src->options_len, tun_flags); 152 151 153 152 return res; 154 153 } ··· 498 497 opt->opt_class = nla_get_be16(attr); 499 498 attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE]; 500 499 opt->type = nla_get_u8(attr); 501 - info->key.tun_flags |= TUNNEL_GENEVE_OPT; 500 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags); 502 501 } 503 502 504 503 return sizeof(struct geneve_opt) + data_len; ··· 526 525 attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP]; 527 526 md->gbp = nla_get_u32(attr); 528 527 md->gbp &= VXLAN_GBP_MASK; 529 - info->key.tun_flags |= TUNNEL_VXLAN_OPT; 528 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags); 530 529 } 531 530 532 531 return sizeof(struct vxlan_metadata); ··· 575 574 set_hwid(&md->u.md2, nla_get_u8(attr)); 576 575 } 577 576 578 - info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 577 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags); 579 578 } 580 579 581 580 return sizeof(struct erspan_metadata); ··· 586 585 { 587 586 int err, rem, opt_len, opts_len = 0; 588 587 struct nlattr *nla; 589 - __be16 type = 0; 588 + u32 type = 0; 590 589 591 590 if (!attr) 592 591 return 0; ··· 599 598 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) { 600 599 switch (nla_type(nla)) { 601 600 case LWTUNNEL_IP_OPTS_GENEVE: 602 - if (type && type != TUNNEL_GENEVE_OPT) 601 + if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) 603 602 return -EINVAL; 604 603 opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len, 605 604 extack); ··· 608 607 opts_len += opt_len; 609 608 if (opts_len > IP_TUNNEL_OPTS_MAX) 610 609 return -EINVAL; 611 - type = TUNNEL_GENEVE_OPT; 610 + type = IP_TUNNEL_GENEVE_OPT_BIT; 612 611 break; 613 612 case LWTUNNEL_IP_OPTS_VXLAN: 614 613 if (type) ··· 618 617 if (opt_len < 0) 619 618 return opt_len; 620 619 opts_len += opt_len; 621 - type = TUNNEL_VXLAN_OPT; 620 + type = IP_TUNNEL_VXLAN_OPT_BIT; 622 621 break; 623 622 case LWTUNNEL_IP_OPTS_ERSPAN: 624 623 if (type) ··· 628 627 if (opt_len < 0) 629 628 return opt_len; 630 629 opts_len += opt_len; 631 - type = TUNNEL_ERSPAN_OPT; 630 + type = IP_TUNNEL_ERSPAN_OPT_BIT; 632 631 break; 633 632 default: 634 633 return -EINVAL; ··· 706 705 if (tb[LWTUNNEL_IP_TOS]) 707 706 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); 708 707 709 - if (tb[LWTUNNEL_IP_FLAGS]) 710 - tun_info->key.tun_flags |= 711 - (nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) & 712 - ~TUNNEL_OPTIONS_PRESENT); 708 + if (tb[LWTUNNEL_IP_FLAGS]) { 709 + IP_TUNNEL_DECLARE_FLAGS(flags); 710 + 711 + ip_tunnel_flags_from_be16(flags, 712 + nla_get_be16(tb[LWTUNNEL_IP_FLAGS])); 713 + ip_tunnel_clear_options_present(flags); 714 + 715 + ip_tunnel_flags_or(tun_info->key.tun_flags, 716 + tun_info->key.tun_flags, flags); 717 + } 713 718 714 719 tun_info->mode = IP_TUNNEL_INFO_TX; 715 720 tun_info->options_len = opt_len; ··· 819 812 struct nlattr *nest; 820 813 int err = 0; 821 814 822 - if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)) 815 + if (!ip_tunnel_is_options_present(tun_info->key.tun_flags)) 823 816 return 0; 824 817 825 818 nest = nla_nest_start_noflag(skb, type); 826 819 if (!nest) 827 820 return -ENOMEM; 828 821 829 - if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) 822 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) 830 823 err = ip_tun_fill_encap_opts_geneve(skb, tun_info); 831 - else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT) 824 + else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_info->key.tun_flags)) 832 825 err = ip_tun_fill_encap_opts_vxlan(skb, tun_info); 833 - else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT) 826 + else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags)) 834 827 err = ip_tun_fill_encap_opts_erspan(skb, tun_info); 835 828 836 829 if (err) { ··· 853 846 nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || 854 847 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || 855 848 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || 856 - nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) || 849 + nla_put_be16(skb, LWTUNNEL_IP_FLAGS, 850 + ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) || 857 851 ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info)) 858 852 return -ENOMEM; 859 853 ··· 865 857 { 866 858 int opt_len; 867 859 868 - if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)) 860 + if (!ip_tunnel_is_options_present(info->key.tun_flags)) 869 861 return 0; 870 862 871 863 opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */ 872 - if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { 864 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) { 873 865 struct geneve_opt *opt; 874 866 int offset = 0; 875 867 ··· 882 874 /* OPT_GENEVE_DATA */ 883 875 offset += sizeof(*opt) + opt->length * 4; 884 876 } 885 - } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { 877 + } else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) { 886 878 opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */ 887 879 + nla_total_size(4); /* OPT_VXLAN_GBP */ 888 - } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) { 880 + } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) { 889 881 struct erspan_metadata *md = ip_tunnel_info_opts(info); 890 882 891 883 opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */ ··· 992 984 if (tb[LWTUNNEL_IP6_TC]) 993 985 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); 994 986 995 - if (tb[LWTUNNEL_IP6_FLAGS]) 996 - tun_info->key.tun_flags |= 997 - (nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) & 998 - ~TUNNEL_OPTIONS_PRESENT); 987 + if (tb[LWTUNNEL_IP6_FLAGS]) { 988 + IP_TUNNEL_DECLARE_FLAGS(flags); 989 + __be16 data; 990 + 991 + data = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]); 992 + ip_tunnel_flags_from_be16(flags, data); 993 + ip_tunnel_clear_options_present(flags); 994 + 995 + ip_tunnel_flags_or(tun_info->key.tun_flags, 996 + tun_info->key.tun_flags, flags); 997 + } 999 998 1000 999 tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6; 1001 1000 tun_info->options_len = opt_len; ··· 1023 1008 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || 1024 1009 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) || 1025 1010 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) || 1026 - nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) || 1011 + nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, 1012 + ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) || 1027 1013 ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info)) 1028 1014 return -ENOMEM; 1029 1015 ··· 1132 1116 EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms); 1133 1117 1134 1118 void ip_tunnel_netlink_parms(struct nlattr *data[], 1135 - struct ip_tunnel_parm *parms) 1119 + struct ip_tunnel_parm_kern *parms) 1136 1120 { 1137 1121 if (data[IFLA_IPTUN_LINK]) 1138 1122 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); ··· 1155 1139 if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC])) 1156 1140 parms->iph.frag_off = htons(IP_DF); 1157 1141 1158 - if (data[IFLA_IPTUN_FLAGS]) 1159 - parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]); 1142 + if (data[IFLA_IPTUN_FLAGS]) { 1143 + __be16 flags; 1144 + 1145 + flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]); 1146 + ip_tunnel_flags_from_be16(parms->i_flags, flags); 1147 + } 1160 1148 1161 1149 if (data[IFLA_IPTUN_PROTO]) 1162 1150 parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+27 -14
net/ipv4/ip_vti.c
··· 51 51 const struct iphdr *iph = ip_hdr(skb); 52 52 struct net *net = dev_net(skb->dev); 53 53 struct ip_tunnel_net *itn = net_generic(net, vti_net_id); 54 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 54 55 55 - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 56 + __set_bit(IP_TUNNEL_NO_KEY_BIT, flags); 57 + 58 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, 56 59 iph->saddr, iph->daddr, 0); 57 60 if (tunnel) { 58 61 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) ··· 170 167 struct flowi *fl) 171 168 { 172 169 struct ip_tunnel *tunnel = netdev_priv(dev); 173 - struct ip_tunnel_parm *parms = &tunnel->parms; 170 + struct ip_tunnel_parm_kern *parms = &tunnel->parms; 174 171 struct dst_entry *dst = skb_dst(skb); 175 172 struct net_device *tdev; /* Device to other host */ 176 173 int pkt_len = skb->len; ··· 325 322 const struct iphdr *iph = (const struct iphdr *)skb->data; 326 323 int protocol = iph->protocol; 327 324 struct ip_tunnel_net *itn = net_generic(net, vti_net_id); 325 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 328 326 329 - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 327 + __set_bit(IP_TUNNEL_NO_KEY_BIT, flags); 328 + 329 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, 330 330 iph->daddr, iph->saddr, 0); 331 331 if (!tunnel) 332 332 return -1; ··· 379 373 } 380 374 381 375 static int 382 - vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) 376 + vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd) 383 377 { 378 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 384 379 int err = 0; 385 380 386 381 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { ··· 390 383 return -EINVAL; 391 384 } 392 385 393 - if (!(p->i_flags & GRE_KEY)) 386 + if (!ip_tunnel_flags_is_be16_compat(p->i_flags) || 387 + !ip_tunnel_flags_is_be16_compat(p->o_flags)) 388 + return -EOVERFLOW; 389 + 390 + if (!(ip_tunnel_flags_to_be16(p->i_flags) & GRE_KEY)) 394 391 p->i_key = 0; 395 - if (!(p->o_flags & GRE_KEY)) 392 + if (!(ip_tunnel_flags_to_be16(p->o_flags) & GRE_KEY)) 396 393 p->o_key = 0; 397 394 398 - p->i_flags = VTI_ISVTI; 395 + __set_bit(IP_TUNNEL_VTI_BIT, flags); 396 + ip_tunnel_flags_copy(p->i_flags, flags); 399 397 400 398 err = ip_tunnel_ctl(dev, p, cmd); 401 399 if (err) 402 400 return err; 403 401 404 402 if (cmd != SIOCDELTUNNEL) { 405 - p->i_flags |= GRE_KEY; 406 - p->o_flags |= GRE_KEY; 403 + ip_tunnel_flags_from_be16(flags, GRE_KEY); 404 + ip_tunnel_flags_or(p->i_flags, p->i_flags, flags); 405 + ip_tunnel_flags_or(p->o_flags, p->o_flags, flags); 407 406 } 408 407 return 0; 409 408 } ··· 544 531 } 545 532 546 533 static void vti_netlink_parms(struct nlattr *data[], 547 - struct ip_tunnel_parm *parms, 534 + struct ip_tunnel_parm_kern *parms, 548 535 __u32 *fwmark) 549 536 { 550 537 memset(parms, 0, sizeof(*parms)); ··· 554 541 if (!data) 555 542 return; 556 543 557 - parms->i_flags = VTI_ISVTI; 544 + __set_bit(IP_TUNNEL_VTI_BIT, parms->i_flags); 558 545 559 546 if (data[IFLA_VTI_LINK]) 560 547 parms->link = nla_get_u32(data[IFLA_VTI_LINK]); ··· 579 566 struct nlattr *tb[], struct nlattr *data[], 580 567 struct netlink_ext_ack *extack) 581 568 { 582 - struct ip_tunnel_parm parms; 569 + struct ip_tunnel_parm_kern parms; 583 570 __u32 fwmark = 0; 584 571 585 572 vti_netlink_parms(data, &parms, &fwmark); ··· 591 578 struct netlink_ext_ack *extack) 592 579 { 593 580 struct ip_tunnel *t = netdev_priv(dev); 581 + struct ip_tunnel_parm_kern p; 594 582 __u32 fwmark = t->fwmark; 595 - struct ip_tunnel_parm p; 596 583 597 584 vti_netlink_parms(data, &p, &fwmark); 598 585 return ip_tunnel_changelink(dev, tb, &p, fwmark); ··· 619 606 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev) 620 607 { 621 608 struct ip_tunnel *t = netdev_priv(dev); 622 - struct ip_tunnel_parm *p = &t->parms; 609 + struct ip_tunnel_parm_kern *p = &t->parms; 623 610 624 611 if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) || 625 612 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
+21 -12
net/ipv4/ipip.c
··· 130 130 struct net *net = dev_net(skb->dev); 131 131 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); 132 132 const struct iphdr *iph = (const struct iphdr *)skb->data; 133 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 133 134 const int type = icmp_hdr(skb)->type; 134 135 const int code = icmp_hdr(skb)->code; 135 136 struct ip_tunnel *t; 136 137 int err = 0; 137 138 138 - t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 139 - iph->daddr, iph->saddr, 0); 139 + __set_bit(IP_TUNNEL_NO_KEY_BIT, flags); 140 + 141 + t = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->daddr, 142 + iph->saddr, 0); 140 143 if (!t) { 141 144 err = -ENOENT; 142 145 goto out; ··· 216 213 { 217 214 struct net *net = dev_net(skb->dev); 218 215 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); 216 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 219 217 struct metadata_dst *tun_dst = NULL; 220 218 struct ip_tunnel *tunnel; 221 219 const struct iphdr *iph; 222 220 221 + __set_bit(IP_TUNNEL_NO_KEY_BIT, flags); 222 + 223 223 iph = ip_hdr(skb); 224 - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 225 - iph->saddr, iph->daddr, 0); 224 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->saddr, 225 + iph->daddr, 0); 226 226 if (tunnel) { 227 227 const struct tnl_ptk_info *tpi; 228 228 ··· 244 238 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 245 239 goto drop; 246 240 if (tunnel->collect_md) { 247 - tun_dst = ip_tun_rx_dst(skb, 0, 0, 0); 241 + ip_tunnel_flags_zero(flags); 242 + 243 + tun_dst = ip_tun_rx_dst(skb, flags, 0, 0); 248 244 if (!tun_dst) 249 245 return 0; 250 246 ip_tunnel_md_udp_encap(skb, &tun_dst->u.tun_info); ··· 338 330 } 339 331 340 332 static int 341 - ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) 333 + ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd) 342 334 { 343 335 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { 344 336 if (p->iph.version != 4 || ··· 348 340 } 349 341 350 342 p->i_key = p->o_key = 0; 351 - p->i_flags = p->o_flags = 0; 343 + ip_tunnel_flags_zero(p->i_flags); 344 + ip_tunnel_flags_zero(p->o_flags); 352 345 return ip_tunnel_ctl(dev, p, cmd); 353 346 } 354 347 ··· 414 405 } 415 406 416 407 static void ipip_netlink_parms(struct nlattr *data[], 417 - struct ip_tunnel_parm *parms, bool *collect_md, 418 - __u32 *fwmark) 408 + struct ip_tunnel_parm_kern *parms, 409 + bool *collect_md, __u32 *fwmark) 419 410 { 420 411 memset(parms, 0, sizeof(*parms)); 421 412 ··· 441 432 struct netlink_ext_ack *extack) 442 433 { 443 434 struct ip_tunnel *t = netdev_priv(dev); 444 - struct ip_tunnel_parm p; 445 435 struct ip_tunnel_encap ipencap; 436 + struct ip_tunnel_parm_kern p; 446 437 __u32 fwmark = 0; 447 438 448 439 if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { ··· 461 452 struct netlink_ext_ack *extack) 462 453 { 463 454 struct ip_tunnel *t = netdev_priv(dev); 464 - struct ip_tunnel_parm p; 465 455 struct ip_tunnel_encap ipencap; 456 + struct ip_tunnel_parm_kern p; 466 457 bool collect_md; 467 458 __u32 fwmark = t->fwmark; 468 459 ··· 519 510 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) 520 511 { 521 512 struct ip_tunnel *tunnel = netdev_priv(dev); 522 - struct ip_tunnel_parm *parm = &tunnel->parms; 513 + struct ip_tunnel_parm_kern *parm = &tunnel->parms; 523 514 524 515 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 525 516 nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
+1 -1
net/ipv4/ipmr.c
··· 441 441 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) 442 442 { 443 443 struct net_device *tunnel_dev, *new_dev; 444 - struct ip_tunnel_parm p = { }; 444 + struct ip_tunnel_parm_kern p = { }; 445 445 int err; 446 446 447 447 tunnel_dev = __dev_get_by_name(net, "tunl0");
+3 -2
net/ipv4/udp_tunnel_core.c
··· 183 183 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); 184 184 185 185 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, 186 - __be16 flags, __be64 tunnel_id, int md_size) 186 + const unsigned long *flags, 187 + __be64 tunnel_id, int md_size) 187 188 { 188 189 struct metadata_dst *tun_dst; 189 190 struct ip_tunnel_info *info; ··· 200 199 info->key.tp_src = udp_hdr(skb)->source; 201 200 info->key.tp_dst = udp_hdr(skb)->dest; 202 201 if (udp_hdr(skb)->check) 203 - info->key.tun_flags |= TUNNEL_CSUM; 202 + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 204 203 return tun_dst; 205 204 } 206 205 EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
+2 -1
net/ipv6/addrconf.c
··· 63 63 #include <linux/string.h> 64 64 #include <linux/hash.h> 65 65 66 + #include <net/ip_tunnels.h> 66 67 #include <net/net_namespace.h> 67 68 #include <net/sock.h> 68 69 #include <net/snmp.h> ··· 2918 2917 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev, 2919 2918 struct in6_ifreq *ireq) 2920 2919 { 2921 - struct ip_tunnel_parm p = { }; 2920 + struct ip_tunnel_parm_kern p = { }; 2922 2921 int err; 2923 2922 2924 2923 if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
+49 -36
net/ipv6/ip6_gre.c
··· 496 496 tpi->proto); 497 497 if (tunnel) { 498 498 if (tunnel->parms.collect_md) { 499 + IP_TUNNEL_DECLARE_FLAGS(flags); 499 500 struct metadata_dst *tun_dst; 500 501 __be64 tun_id; 501 - __be16 flags; 502 502 503 - flags = tpi->flags; 503 + ip_tunnel_flags_copy(flags, tpi->flags); 504 504 tun_id = key32_to_tunnel_id(tpi->key); 505 505 506 506 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0); ··· 548 548 549 549 if (tunnel->parms.collect_md) { 550 550 struct erspan_metadata *pkt_md, *md; 551 + IP_TUNNEL_DECLARE_FLAGS(flags); 551 552 struct metadata_dst *tun_dst; 552 553 struct ip_tunnel_info *info; 553 554 unsigned char *gh; 554 555 __be64 tun_id; 555 - __be16 flags; 556 556 557 - tpi->flags |= TUNNEL_KEY; 558 - flags = tpi->flags; 557 + __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags); 558 + ip_tunnel_flags_copy(flags, tpi->flags); 559 559 tun_id = key32_to_tunnel_id(tpi->key); 560 560 561 561 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, ··· 577 577 md2 = &md->u.md2; 578 578 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 579 579 ERSPAN_V2_MDSIZE); 580 - info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 580 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 581 + info->key.tun_flags); 581 582 info->options_len = sizeof(*md); 582 583 583 584 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); ··· 746 745 __u32 *pmtu, __be16 proto) 747 746 { 748 747 struct ip6_tnl *tunnel = netdev_priv(dev); 748 + IP_TUNNEL_DECLARE_FLAGS(flags); 749 749 __be16 protocol; 750 - __be16 flags; 751 750 752 751 if (dev->type == ARPHRD_ETHER) 753 752 IPCB(skb)->flags = 0; ··· 779 778 fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id); 780 779 781 780 dsfield = key->tos; 782 - flags = key->tun_flags & 783 - (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); 781 + ip_tunnel_flags_zero(flags); 782 + __set_bit(IP_TUNNEL_CSUM_BIT, flags); 783 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 784 + __set_bit(IP_TUNNEL_SEQ_BIT, flags); 785 + ip_tunnel_flags_and(flags, flags, key->tun_flags); 784 786 tun_hlen = gre_calc_hlen(flags); 785 787 786 788 if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen)) ··· 792 788 gre_build_header(skb, tun_hlen, 793 789 flags, protocol, 794 790 tunnel_id_to_key32(tun_info->key.tun_id), 795 - (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) 796 - : 0); 791 + test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 792 + htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 793 + 0); 797 794 798 795 } else { 799 796 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) 800 797 return -ENOMEM; 801 798 802 - flags = tunnel->parms.o_flags; 799 + ip_tunnel_flags_copy(flags, tunnel->parms.o_flags); 803 800 804 801 gre_build_header(skb, tunnel->tun_hlen, flags, 805 802 protocol, tunnel->parms.o_key, 806 - (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) 807 - : 0); 803 + test_bit(IP_TUNNEL_SEQ_BIT, flags) ? 804 + htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 805 + 0); 808 806 } 809 807 810 808 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, ··· 828 822 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 829 823 &dsfield, &encap_limit); 830 824 831 - err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 825 + err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 826 + t->parms.o_flags)); 832 827 if (err) 833 828 return -1; 834 829 ··· 863 856 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit)) 864 857 return -1; 865 858 866 - if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM))) 859 + if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 860 + t->parms.o_flags))) 867 861 return -1; 868 862 869 863 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, ··· 891 883 prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit)) 892 884 return -1; 893 885 894 - err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 886 + err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT, 887 + t->parms.o_flags)); 895 888 if (err) 896 889 return err; 897 890 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol); ··· 945 936 struct ip_tunnel_info *tun_info = NULL; 946 937 struct ip6_tnl *t = netdev_priv(dev); 947 938 struct dst_entry *dst = skb_dst(skb); 939 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 948 940 bool truncate = false; 949 941 int encap_limit = -1; 950 942 __u8 dsfield = false; ··· 989 979 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) 990 980 goto tx_err; 991 981 992 - t->parms.o_flags &= ~TUNNEL_KEY; 982 + __clear_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags); 993 983 IPCB(skb)->flags = 0; 994 984 995 985 /* For collect_md mode, derive fl6 from the tunnel key, ··· 1014 1004 fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id); 1015 1005 1016 1006 dsfield = key->tos; 1017 - if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 1007 + if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 1008 + tun_info->key.tun_flags)) 1018 1009 goto tx_err; 1019 1010 if (tun_info->options_len < sizeof(*md)) 1020 1011 goto tx_err; ··· 1076 1065 } 1077 1066 1078 1067 /* Push GRE header. */ 1079 - gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno))); 1068 + __set_bit(IP_TUNNEL_SEQ_BIT, flags); 1069 + gre_build_header(skb, 8, flags, proto, 0, 1070 + htonl(atomic_fetch_inc(&t->o_seqno))); 1080 1071 1081 1072 /* TooBig packet may have updated dst->dev's mtu */ 1082 1073 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) ··· 1221 1208 t->parms.proto = p->proto; 1222 1209 t->parms.i_key = p->i_key; 1223 1210 t->parms.o_key = p->o_key; 1224 - t->parms.i_flags = p->i_flags; 1225 - t->parms.o_flags = p->o_flags; 1211 + ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags); 1212 + ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags); 1226 1213 t->parms.fwmark = p->fwmark; 1227 1214 t->parms.erspan_ver = p->erspan_ver; 1228 1215 t->parms.index = p->index; ··· 1251 1238 p->link = u->link; 1252 1239 p->i_key = u->i_key; 1253 1240 p->o_key = u->o_key; 1254 - p->i_flags = gre_flags_to_tnl_flags(u->i_flags); 1255 - p->o_flags = gre_flags_to_tnl_flags(u->o_flags); 1241 + gre_flags_to_tnl_flags(p->i_flags, u->i_flags); 1242 + gre_flags_to_tnl_flags(p->o_flags, u->o_flags); 1256 1243 memcpy(p->name, u->name, sizeof(u->name)); 1257 1244 } 1258 1245 ··· 1404 1391 ipv6h->daddr = t->parms.raddr; 1405 1392 1406 1393 p = (__be16 *)(ipv6h + 1); 1407 - p[0] = t->parms.o_flags; 1394 + p[0] = ip_tunnel_flags_to_be16(t->parms.o_flags); 1408 1395 p[1] = htons(type); 1409 1396 1410 1397 /* ··· 1468 1455 static void ip6gre_tnl_init_features(struct net_device *dev) 1469 1456 { 1470 1457 struct ip6_tnl *nt = netdev_priv(dev); 1471 - __be16 flags; 1472 1458 1473 1459 dev->features |= GRE6_FEATURES | NETIF_F_LLTX; 1474 1460 dev->hw_features |= GRE6_FEATURES; 1475 1461 1476 - flags = nt->parms.o_flags; 1477 - 1478 1462 /* TCP offload with GRE SEQ is not supported, nor can we support 2 1479 1463 * levels of outer headers requiring an update. 1480 1464 */ 1481 - if (flags & TUNNEL_SEQ) 1465 + if (test_bit(IP_TUNNEL_SEQ_BIT, nt->parms.o_flags)) 1482 1466 return; 1483 - if (flags & TUNNEL_CSUM && nt->encap.type != TUNNEL_ENCAP_NONE) 1467 + if (test_bit(IP_TUNNEL_CSUM_BIT, nt->parms.o_flags) && 1468 + nt->encap.type != TUNNEL_ENCAP_NONE) 1484 1469 return; 1485 1470 1486 1471 dev->features |= NETIF_F_GSO_SOFTWARE; ··· 1803 1792 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1804 1793 1805 1794 if (data[IFLA_GRE_IFLAGS]) 1806 - parms->i_flags = gre_flags_to_tnl_flags( 1807 - nla_get_be16(data[IFLA_GRE_IFLAGS])); 1795 + gre_flags_to_tnl_flags(parms->i_flags, 1796 + nla_get_be16(data[IFLA_GRE_IFLAGS])); 1808 1797 1809 1798 if (data[IFLA_GRE_OFLAGS]) 1810 - parms->o_flags = gre_flags_to_tnl_flags( 1811 - nla_get_be16(data[IFLA_GRE_OFLAGS])); 1799 + gre_flags_to_tnl_flags(parms->o_flags, 1800 + nla_get_be16(data[IFLA_GRE_OFLAGS])); 1812 1801 1813 1802 if (data[IFLA_GRE_IKEY]) 1814 1803 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); ··· 2155 2144 { 2156 2145 struct ip6_tnl *t = netdev_priv(dev); 2157 2146 struct __ip6_tnl_parm *p = &t->parms; 2158 - __be16 o_flags = p->o_flags; 2147 + IP_TUNNEL_DECLARE_FLAGS(o_flags); 2148 + 2149 + ip_tunnel_flags_copy(o_flags, p->o_flags); 2159 2150 2160 2151 if (p->erspan_ver == 1 || p->erspan_ver == 2) { 2161 2152 if (!p->collect_md) 2162 - o_flags |= TUNNEL_KEY; 2153 + __set_bit(IP_TUNNEL_KEY_BIT, o_flags); 2163 2154 2164 2155 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) 2165 2156 goto nla_put_failure;
+7 -7
net/ipv6/ip6_tunnel.c
··· 798 798 const struct ipv6hdr *ipv6h; 799 799 int nh, err; 800 800 801 - if ((!(tpi->flags & TUNNEL_CSUM) && 802 - (tunnel->parms.i_flags & TUNNEL_CSUM)) || 803 - ((tpi->flags & TUNNEL_CSUM) && 804 - !(tunnel->parms.i_flags & TUNNEL_CSUM))) { 801 + if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) != 802 + test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) { 805 803 DEV_STATS_INC(tunnel->dev, rx_crc_errors); 806 804 DEV_STATS_INC(tunnel->dev, rx_errors); 807 805 goto drop; 808 806 } 809 807 810 - if (tunnel->parms.i_flags & TUNNEL_SEQ) { 811 - if (!(tpi->flags & TUNNEL_SEQ) || 808 + if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) { 809 + if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) || 812 810 (tunnel->i_seqno && 813 811 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 814 812 DEV_STATS_INC(tunnel->dev, rx_fifo_errors); ··· 944 946 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 945 947 goto drop; 946 948 if (t->parms.collect_md) { 947 - tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); 949 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 950 + 951 + tun_dst = ipv6_tun_rx_dst(skb, flags, 0, 0); 948 952 if (!tun_dst) 949 953 goto drop; 950 954 }
+21 -17
net/ipv6/sit.c
··· 132 132 return NULL; 133 133 } 134 134 135 - static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn, 136 - struct ip_tunnel_parm *parms) 135 + static struct ip_tunnel __rcu ** 136 + __ipip6_bucket(struct sit_net *sitn, struct ip_tunnel_parm_kern *parms) 137 137 { 138 138 __be32 remote = parms->iph.daddr; 139 139 __be32 local = parms->iph.saddr; ··· 207 207 __dev_addr_set(dev, &t->parms.iph.saddr, 4); 208 208 memcpy(dev->broadcast, &t->parms.iph.daddr, 4); 209 209 210 - if ((__force u16)t->parms.i_flags & SIT_ISATAP) 210 + if (test_bit(IP_TUNNEL_SIT_ISATAP_BIT, t->parms.i_flags)) 211 211 dev->priv_flags |= IFF_ISATAP; 212 212 213 213 dev->rtnl_link_ops = &sit_link_ops; ··· 226 226 } 227 227 228 228 static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, 229 - struct ip_tunnel_parm *parms, int create) 229 + struct ip_tunnel_parm_kern *parms, 230 + int create) 230 231 { 231 232 __be32 remote = parms->iph.daddr; 232 233 __be32 local = parms->iph.saddr; ··· 1136 1135 dev->needed_headroom = t_hlen + hlen; 1137 1136 } 1138 1137 1139 - static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p, 1138 + static void ipip6_tunnel_update(struct ip_tunnel *t, 1139 + struct ip_tunnel_parm_kern *p, 1140 1140 __u32 fwmark) 1141 1141 { 1142 1142 struct net *net = t->net; ··· 1198 1196 ipip6_tunnel_get6rd(struct net_device *dev, struct ip_tunnel_parm __user *data) 1199 1197 { 1200 1198 struct ip_tunnel *t = netdev_priv(dev); 1199 + struct ip_tunnel_parm_kern p; 1201 1200 struct ip_tunnel_6rd ip6rd; 1202 - struct ip_tunnel_parm p; 1203 1201 1204 1202 if (dev == dev_to_sit_net(dev)->fb_tunnel_dev) { 1205 - if (copy_from_user(&p, data, sizeof(p))) 1203 + if (!ip_tunnel_parm_from_user(&p, data)) 1206 1204 return -EFAULT; 1207 1205 t = ipip6_tunnel_locate(t->net, &p, 0); 1208 1206 } ··· 1253 1251 } 1254 1252 1255 1253 static int 1256 - __ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p) 1254 + __ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm_kern *p) 1257 1255 { 1258 1256 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1259 1257 return -EPERM; ··· 1270 1268 } 1271 1269 1272 1270 static int 1273 - ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p) 1271 + ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm_kern *p) 1274 1272 { 1275 1273 struct ip_tunnel *t = netdev_priv(dev); 1276 1274 ··· 1283 1281 } 1284 1282 1285 1283 static int 1286 - ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p) 1284 + ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm_kern *p) 1287 1285 { 1288 1286 struct ip_tunnel *t = netdev_priv(dev); 1289 1287 int err; ··· 1299 1297 } 1300 1298 1301 1299 static int 1302 - ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p) 1300 + ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm_kern *p) 1303 1301 { 1304 1302 struct ip_tunnel *t = netdev_priv(dev); 1305 1303 int err; ··· 1330 1328 } 1331 1329 1332 1330 static int 1333 - ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p) 1331 + ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm_kern *p) 1334 1332 { 1335 1333 struct ip_tunnel *t = netdev_priv(dev); 1336 1334 ··· 1350 1348 } 1351 1349 1352 1350 static int 1353 - ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) 1351 + ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, 1352 + int cmd) 1354 1353 { 1355 1354 switch (cmd) { 1356 1355 case SIOCGETTUNNEL: ··· 1493 1490 } 1494 1491 1495 1492 static void ipip6_netlink_parms(struct nlattr *data[], 1496 - struct ip_tunnel_parm *parms, 1493 + struct ip_tunnel_parm_kern *parms, 1497 1494 __u32 *fwmark) 1498 1495 { 1499 1496 memset(parms, 0, sizeof(*parms)); ··· 1602 1599 struct netlink_ext_ack *extack) 1603 1600 { 1604 1601 struct ip_tunnel *t = netdev_priv(dev); 1605 - struct ip_tunnel_parm p; 1606 1602 struct ip_tunnel_encap ipencap; 1603 + struct ip_tunnel_parm_kern p; 1607 1604 struct net *net = t->net; 1608 1605 struct sit_net *sitn = net_generic(net, sit_net_id); 1609 1606 #ifdef CONFIG_IPV6_SIT_6RD ··· 1690 1687 static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) 1691 1688 { 1692 1689 struct ip_tunnel *tunnel = netdev_priv(dev); 1693 - struct ip_tunnel_parm *parm = &tunnel->parms; 1690 + struct ip_tunnel_parm_kern *parm = &tunnel->parms; 1694 1691 1695 1692 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 1696 1693 nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) || ··· 1700 1697 nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, 1701 1698 !!(parm->iph.frag_off & htons(IP_DF))) || 1702 1699 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) || 1703 - nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags) || 1700 + nla_put_be16(skb, IFLA_IPTUN_FLAGS, 1701 + ip_tunnel_flags_to_be16(parm->i_flags)) || 1704 1702 nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark)) 1705 1703 goto nla_put_failure; 1706 1704
+5 -1
net/netfilter/ipvs/ip_vs_core.c
··· 1550 1550 if (!dest) 1551 1551 goto unk; 1552 1552 if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) { 1553 + IP_TUNNEL_DECLARE_FLAGS(flags); 1553 1554 __be16 type; 1554 1555 1555 1556 /* Only support version 0 and C (csum) */ ··· 1561 1560 if (type != htons(ETH_P_IP)) 1562 1561 goto unk; 1563 1562 *proto = IPPROTO_IPIP; 1564 - return gre_calc_hlen(gre_flags_to_tnl_flags(greh->flags)); 1563 + 1564 + gre_flags_to_tnl_flags(flags, greh->flags); 1565 + 1566 + return gre_calc_hlen(flags); 1565 1567 } 1566 1568 1567 1569 unk:
+10 -10
net/netfilter/ipvs/ip_vs_xmit.c
··· 390 390 skb->ip_summed == CHECKSUM_PARTIAL) 391 391 mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV; 392 392 } else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) { 393 - __be16 tflags = 0; 393 + IP_TUNNEL_DECLARE_FLAGS(tflags) = { }; 394 394 395 395 if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) 396 - tflags |= TUNNEL_CSUM; 396 + __set_bit(IP_TUNNEL_CSUM_BIT, tflags); 397 397 mtu -= gre_calc_hlen(tflags); 398 398 } 399 399 if (mtu < 68) { ··· 553 553 skb->ip_summed == CHECKSUM_PARTIAL) 554 554 mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV; 555 555 } else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) { 556 - __be16 tflags = 0; 556 + IP_TUNNEL_DECLARE_FLAGS(tflags) = { }; 557 557 558 558 if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) 559 - tflags |= TUNNEL_CSUM; 559 + __set_bit(IP_TUNNEL_CSUM_BIT, tflags); 560 560 mtu -= gre_calc_hlen(tflags); 561 561 } 562 562 if (mtu < IPV6_MIN_MTU) { ··· 1082 1082 { 1083 1083 __be16 proto = *next_protocol == IPPROTO_IPIP ? 1084 1084 htons(ETH_P_IP) : htons(ETH_P_IPV6); 1085 - __be16 tflags = 0; 1085 + IP_TUNNEL_DECLARE_FLAGS(tflags) = { }; 1086 1086 size_t hdrlen; 1087 1087 1088 1088 if (cp->dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) 1089 - tflags |= TUNNEL_CSUM; 1089 + __set_bit(IP_TUNNEL_CSUM_BIT, tflags); 1090 1090 1091 1091 hdrlen = gre_calc_hlen(tflags); 1092 1092 gre_build_header(skb, hdrlen, tflags, proto, 0, 0); ··· 1165 1165 1166 1166 max_headroom += sizeof(struct udphdr) + gue_hdrlen; 1167 1167 } else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) { 1168 + IP_TUNNEL_DECLARE_FLAGS(tflags) = { }; 1168 1169 size_t gre_hdrlen; 1169 - __be16 tflags = 0; 1170 1170 1171 1171 if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) 1172 - tflags |= TUNNEL_CSUM; 1172 + __set_bit(IP_TUNNEL_CSUM_BIT, tflags); 1173 1173 gre_hdrlen = gre_calc_hlen(tflags); 1174 1174 1175 1175 max_headroom += gre_hdrlen; ··· 1310 1310 1311 1311 max_headroom += sizeof(struct udphdr) + gue_hdrlen; 1312 1312 } else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) { 1313 + IP_TUNNEL_DECLARE_FLAGS(tflags) = { }; 1313 1314 size_t gre_hdrlen; 1314 - __be16 tflags = 0; 1315 1315 1316 1316 if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM) 1317 - tflags |= TUNNEL_CSUM; 1317 + __set_bit(IP_TUNNEL_CSUM_BIT, tflags); 1318 1318 gre_hdrlen = gre_calc_hlen(tflags); 1319 1319 1320 1320 max_headroom += gre_hdrlen;
+25 -19
net/netfilter/nft_tunnel.c
··· 174 174 struct erspan_metadata erspan; 175 175 u8 data[IP_TUNNEL_OPTS_MAX]; 176 176 } u; 177 + IP_TUNNEL_DECLARE_FLAGS(flags); 177 178 u32 len; 178 - __be16 flags; 179 179 }; 180 180 181 181 struct nft_tunnel_obj { ··· 271 271 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP])); 272 272 273 273 opts->len = sizeof(struct vxlan_metadata); 274 - opts->flags = TUNNEL_VXLAN_OPT; 274 + ip_tunnel_flags_zero(opts->flags); 275 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags); 275 276 276 277 return 0; 277 278 } ··· 326 325 opts->u.erspan.version = version; 327 326 328 327 opts->len = sizeof(struct erspan_metadata); 329 - opts->flags = TUNNEL_ERSPAN_OPT; 328 + ip_tunnel_flags_zero(opts->flags); 329 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags); 330 330 331 331 return 0; 332 332 } ··· 368 366 opt->length = data_len / 4; 369 367 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]); 370 368 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]); 371 - opts->flags = TUNNEL_GENEVE_OPT; 369 + ip_tunnel_flags_zero(opts->flags); 370 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags); 372 371 373 372 return 0; 374 373 } ··· 388 385 struct nft_tunnel_opts *opts) 389 386 { 390 387 struct nlattr *nla; 391 - __be16 type = 0; 392 388 int err, rem; 389 + u32 type = 0; 393 390 394 391 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX, 395 392 nft_tunnel_opts_policy, NULL); ··· 404 401 err = nft_tunnel_obj_vxlan_init(nla, opts); 405 402 if (err) 406 403 return err; 407 - type = TUNNEL_VXLAN_OPT; 404 + type = IP_TUNNEL_VXLAN_OPT_BIT; 408 405 break; 409 406 case NFTA_TUNNEL_KEY_OPTS_ERSPAN: 410 407 if (type) ··· 412 409 err = nft_tunnel_obj_erspan_init(nla, opts); 413 410 if (err) 414 411 return err; 415 - type = TUNNEL_ERSPAN_OPT; 412 + type = IP_TUNNEL_ERSPAN_OPT_BIT; 416 413 break; 417 414 case NFTA_TUNNEL_KEY_OPTS_GENEVE: 418 - if (type && type != TUNNEL_GENEVE_OPT) 415 + if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) 419 416 return -EINVAL; 420 417 err = nft_tunnel_obj_geneve_init(nla, opts); 421 418 if (err) 422 419 return err; 423 - type = TUNNEL_GENEVE_OPT; 420 + type = IP_TUNNEL_GENEVE_OPT_BIT; 424 421 break; 425 422 default: 426 423 return -EOPNOTSUPP; ··· 457 454 memset(&info, 0, sizeof(info)); 458 455 info.mode = IP_TUNNEL_INFO_TX; 459 456 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID])); 460 - info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 457 + __set_bit(IP_TUNNEL_KEY_BIT, info.key.tun_flags); 458 + __set_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags); 459 + __set_bit(IP_TUNNEL_NOCACHE_BIT, info.key.tun_flags); 461 460 462 461 if (tb[NFTA_TUNNEL_KEY_IP]) { 463 462 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info); ··· 488 483 return -EOPNOTSUPP; 489 484 490 485 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX) 491 - info.key.tun_flags &= ~TUNNEL_CSUM; 486 + __clear_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags); 492 487 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT) 493 - info.key.tun_flags |= TUNNEL_DONT_FRAGMENT; 488 + __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, 489 + info.key.tun_flags); 494 490 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER) 495 - info.key.tun_flags |= TUNNEL_SEQ; 491 + __set_bit(IP_TUNNEL_SEQ_BIT, info.key.tun_flags); 496 492 } 497 493 if (tb[NFTA_TUNNEL_KEY_TOS]) 498 494 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]); ··· 589 583 if (!nest) 590 584 return -1; 591 585 592 - if (opts->flags & TUNNEL_VXLAN_OPT) { 586 + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags)) { 593 587 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN); 594 588 if (!inner) 595 589 goto failure; ··· 597 591 htonl(opts->u.vxlan.gbp))) 598 592 goto inner_failure; 599 593 nla_nest_end(skb, inner); 600 - } else if (opts->flags & TUNNEL_ERSPAN_OPT) { 594 + } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags)) { 601 595 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN); 602 596 if (!inner) 603 597 goto failure; ··· 619 613 break; 620 614 } 621 615 nla_nest_end(skb, inner); 622 - } else if (opts->flags & TUNNEL_GENEVE_OPT) { 616 + } else if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags)) { 623 617 struct geneve_opt *opt; 624 618 int offset = 0; 625 619 ··· 664 658 { 665 659 u32 flags = 0; 666 660 667 - if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) 661 + if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags)) 668 662 flags |= NFT_TUNNEL_F_DONT_FRAGMENT; 669 - if (!(info->key.tun_flags & TUNNEL_CSUM)) 663 + if (!test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)) 670 664 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX; 671 - if (info->key.tun_flags & TUNNEL_SEQ) 665 + if (test_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags)) 672 666 flags |= NFT_TUNNEL_F_SEQ_NUMBER; 673 667 674 668 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
+37 -24
net/openvswitch/flow_netlink.c
··· 152 152 sizeof((match)->key->field)); \ 153 153 } while (0) 154 154 155 + #define SW_FLOW_KEY_BITMAP_COPY(match, field, value_p, nbits, is_mask) ({ \ 156 + update_range(match, offsetof(struct sw_flow_key, field), \ 157 + bitmap_size(nbits), is_mask); \ 158 + bitmap_copy(is_mask ? (match)->mask->key.field : (match)->key->field, \ 159 + value_p, nbits); \ 160 + }) 161 + 155 162 static bool match_validate(const struct sw_flow_match *match, 156 163 u64 key_attrs, u64 mask_attrs, bool log) 157 164 { ··· 677 670 bool log) 678 671 { 679 672 bool ttl = false, ipv4 = false, ipv6 = false; 673 + IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { }; 680 674 bool info_bridge_mode = false; 681 - __be16 tun_flags = 0; 682 675 int opts_type = 0; 683 676 struct nlattr *a; 684 677 int rem; ··· 704 697 case OVS_TUNNEL_KEY_ATTR_ID: 705 698 SW_FLOW_KEY_PUT(match, tun_key.tun_id, 706 699 nla_get_be64(a), is_mask); 707 - tun_flags |= TUNNEL_KEY; 700 + __set_bit(IP_TUNNEL_KEY_BIT, tun_flags); 708 701 break; 709 702 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: 710 703 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src, ··· 736 729 ttl = true; 737 730 break; 738 731 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: 739 - tun_flags |= TUNNEL_DONT_FRAGMENT; 732 + __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_flags); 740 733 break; 741 734 case OVS_TUNNEL_KEY_ATTR_CSUM: 742 - tun_flags |= TUNNEL_CSUM; 735 + __set_bit(IP_TUNNEL_CSUM_BIT, tun_flags); 743 736 break; 744 737 case OVS_TUNNEL_KEY_ATTR_TP_SRC: 745 738 SW_FLOW_KEY_PUT(match, tun_key.tp_src, ··· 750 743 nla_get_be16(a), is_mask); 751 744 break; 752 745 case OVS_TUNNEL_KEY_ATTR_OAM: 753 - tun_flags |= TUNNEL_OAM; 746 + __set_bit(IP_TUNNEL_OAM_BIT, tun_flags); 754 747 break; 755 748 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: 756 749 if (opts_type) { ··· 762 755 if (err) 763 756 return err; 764 757 765 - tun_flags |= TUNNEL_GENEVE_OPT; 758 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_flags); 766 759 opts_type = type; 767 760 break; 768 761 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: ··· 775 768 if (err) 776 769 return err; 777 770 778 - tun_flags |= TUNNEL_VXLAN_OPT; 771 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_flags); 779 772 opts_type = type; 780 773 break; 781 774 case OVS_TUNNEL_KEY_ATTR_PAD: ··· 791 784 if (err) 792 785 return err; 793 786 794 - tun_flags |= TUNNEL_ERSPAN_OPT; 787 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_flags); 795 788 opts_type = type; 796 789 break; 797 790 case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE: ··· 805 798 } 806 799 } 807 800 808 - SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); 801 + SW_FLOW_KEY_BITMAP_COPY(match, tun_key.tun_flags, tun_flags, 802 + __IP_TUNNEL_FLAG_NUM, is_mask); 809 803 if (is_mask) 810 804 SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true); 811 805 else ··· 831 823 } 832 824 if (ipv4) { 833 825 if (info_bridge_mode) { 826 + __clear_bit(IP_TUNNEL_KEY_BIT, tun_flags); 827 + 834 828 if (match->key->tun_key.u.ipv4.src || 835 829 match->key->tun_key.u.ipv4.dst || 836 830 match->key->tun_key.tp_src || 837 831 match->key->tun_key.tp_dst || 838 832 match->key->tun_key.ttl || 839 833 match->key->tun_key.tos || 840 - tun_flags & ~TUNNEL_KEY) { 834 + !ip_tunnel_flags_empty(tun_flags)) { 841 835 OVS_NLERR(log, "IPv4 tun info is not correct"); 842 836 return -EINVAL; 843 837 } ··· 884 874 const void *tun_opts, int swkey_tun_opts_len, 885 875 unsigned short tun_proto, u8 mode) 886 876 { 887 - if (output->tun_flags & TUNNEL_KEY && 877 + if (test_bit(IP_TUNNEL_KEY_BIT, output->tun_flags) && 888 878 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id, 889 879 OVS_TUNNEL_KEY_ATTR_PAD)) 890 880 return -EMSGSIZE; ··· 920 910 return -EMSGSIZE; 921 911 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl)) 922 912 return -EMSGSIZE; 923 - if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && 913 + if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, output->tun_flags) && 924 914 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) 925 915 return -EMSGSIZE; 926 - if ((output->tun_flags & TUNNEL_CSUM) && 916 + if (test_bit(IP_TUNNEL_CSUM_BIT, output->tun_flags) && 927 917 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) 928 918 return -EMSGSIZE; 929 919 if (output->tp_src && ··· 932 922 if (output->tp_dst && 933 923 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst)) 934 924 return -EMSGSIZE; 935 - if ((output->tun_flags & TUNNEL_OAM) && 925 + if (test_bit(IP_TUNNEL_OAM_BIT, output->tun_flags) && 936 926 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM)) 937 927 return -EMSGSIZE; 938 928 if (swkey_tun_opts_len) { 939 - if (output->tun_flags & TUNNEL_GENEVE_OPT && 929 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, output->tun_flags) && 940 930 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, 941 931 swkey_tun_opts_len, tun_opts)) 942 932 return -EMSGSIZE; 943 - else if (output->tun_flags & TUNNEL_VXLAN_OPT && 933 + else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, 934 + output->tun_flags) && 944 935 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len)) 945 936 return -EMSGSIZE; 946 - else if (output->tun_flags & TUNNEL_ERSPAN_OPT && 937 + else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 938 + output->tun_flags) && 947 939 nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, 948 940 swkey_tun_opts_len, tun_opts)) 949 941 return -EMSGSIZE; ··· 2041 2029 if ((swkey->tun_proto || is_mask)) { 2042 2030 const void *opts = NULL; 2043 2031 2044 - if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT) 2032 + if (ip_tunnel_is_options_present(output->tun_key.tun_flags)) 2045 2033 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len); 2046 2034 2047 2035 if (ip_tun_to_nlattr(skb, &output->tun_key, opts, ··· 2764 2752 opts_len -= len; 2765 2753 } 2766 2754 2767 - key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0; 2755 + if (crit_opt) 2756 + __set_bit(IP_TUNNEL_CRIT_OPT_BIT, key->tun_key.tun_flags); 2768 2757 2769 2758 return 0; 2770 2759 } ··· 2773 2760 static int validate_and_copy_set_tun(const struct nlattr *attr, 2774 2761 struct sw_flow_actions **sfa, bool log) 2775 2762 { 2763 + IP_TUNNEL_DECLARE_FLAGS(dst_opt_type) = { }; 2776 2764 struct sw_flow_match match; 2777 2765 struct sw_flow_key key; 2778 2766 struct metadata_dst *tun_dst; ··· 2781 2767 struct ovs_tunnel_info *ovs_tun; 2782 2768 struct nlattr *a; 2783 2769 int err = 0, start, opts_type; 2784 - __be16 dst_opt_type; 2785 2770 2786 - dst_opt_type = 0; 2787 2771 ovs_match_init(&match, &key, true, NULL); 2788 2772 opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log); 2789 2773 if (opts_type < 0) ··· 2793 2781 err = validate_geneve_opts(&key); 2794 2782 if (err < 0) 2795 2783 return err; 2796 - dst_opt_type = TUNNEL_GENEVE_OPT; 2784 + 2785 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, dst_opt_type); 2797 2786 break; 2798 2787 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: 2799 - dst_opt_type = TUNNEL_VXLAN_OPT; 2788 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, dst_opt_type); 2800 2789 break; 2801 2790 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: 2802 - dst_opt_type = TUNNEL_ERSPAN_OPT; 2791 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, dst_opt_type); 2803 2792 break; 2804 2793 } 2805 2794 }
+14 -12
net/psample/psample.c
··· 221 221 const struct ip_tunnel_key *tun_key = &tun_info->key; 222 222 int tun_opts_len = tun_info->options_len; 223 223 224 - if (tun_key->tun_flags & TUNNEL_KEY && 224 + if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags) && 225 225 nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id, 226 226 PSAMPLE_TUNNEL_KEY_ATTR_PAD)) 227 227 return -EMSGSIZE; ··· 257 257 return -EMSGSIZE; 258 258 if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl)) 259 259 return -EMSGSIZE; 260 - if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) && 260 + if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_key->tun_flags) && 261 261 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) 262 262 return -EMSGSIZE; 263 - if ((tun_key->tun_flags & TUNNEL_CSUM) && 263 + if (test_bit(IP_TUNNEL_CSUM_BIT, tun_key->tun_flags) && 264 264 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM)) 265 265 return -EMSGSIZE; 266 266 if (tun_key->tp_src && ··· 269 269 if (tun_key->tp_dst && 270 270 nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst)) 271 271 return -EMSGSIZE; 272 - if ((tun_key->tun_flags & TUNNEL_OAM) && 272 + if (test_bit(IP_TUNNEL_OAM_BIT, tun_key->tun_flags) && 273 273 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM)) 274 274 return -EMSGSIZE; 275 275 if (tun_opts_len) { 276 - if (tun_key->tun_flags & TUNNEL_GENEVE_OPT && 276 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_key->tun_flags) && 277 277 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS, 278 278 tun_opts_len, tun_opts)) 279 279 return -EMSGSIZE; 280 - else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT && 280 + else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 281 + tun_key->tun_flags) && 281 282 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS, 282 283 tun_opts_len, tun_opts)) 283 284 return -EMSGSIZE; ··· 315 314 int tun_opts_len = tun_info->options_len; 316 315 int sum = nla_total_size(0); /* PSAMPLE_ATTR_TUNNEL */ 317 316 318 - if (tun_key->tun_flags & TUNNEL_KEY) 317 + if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) 319 318 sum += nla_total_size_64bit(sizeof(u64)); 320 319 321 320 if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE) ··· 338 337 if (tun_key->tos) 339 338 sum += nla_total_size(sizeof(u8)); 340 339 sum += nla_total_size(sizeof(u8)); /* TTL */ 341 - if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) 340 + if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_key->tun_flags)) 342 341 sum += nla_total_size(0); 343 - if (tun_key->tun_flags & TUNNEL_CSUM) 342 + if (test_bit(IP_TUNNEL_CSUM_BIT, tun_key->tun_flags)) 344 343 sum += nla_total_size(0); 345 344 if (tun_key->tp_src) 346 345 sum += nla_total_size(sizeof(u16)); 347 346 if (tun_key->tp_dst) 348 347 sum += nla_total_size(sizeof(u16)); 349 - if (tun_key->tun_flags & TUNNEL_OAM) 348 + if (test_bit(IP_TUNNEL_OAM_BIT, tun_key->tun_flags)) 350 349 sum += nla_total_size(0); 351 350 if (tun_opts_len) { 352 - if (tun_key->tun_flags & TUNNEL_GENEVE_OPT) 351 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_key->tun_flags)) 353 352 sum += nla_total_size(tun_opts_len); 354 - else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT) 353 + else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, 354 + tun_key->tun_flags)) 355 355 sum += nla_total_size(tun_opts_len); 356 356 } 357 357
+18 -18
net/sched/act_tunnel_key.c
··· 230 230 nla_for_each_attr(attr, head, len, rem) { 231 231 switch (nla_type(attr)) { 232 232 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: 233 - if (type && type != TUNNEL_GENEVE_OPT) { 233 + if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) { 234 234 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 235 235 return -EINVAL; 236 236 } ··· 247 247 dst_len -= opt_len; 248 248 dst += opt_len; 249 249 } 250 - type = TUNNEL_GENEVE_OPT; 250 + type = IP_TUNNEL_GENEVE_OPT_BIT; 251 251 break; 252 252 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN: 253 253 if (type) { ··· 259 259 if (opt_len < 0) 260 260 return opt_len; 261 261 opts_len += opt_len; 262 - type = TUNNEL_VXLAN_OPT; 262 + type = IP_TUNNEL_VXLAN_OPT_BIT; 263 263 break; 264 264 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN: 265 265 if (type) { ··· 271 271 if (opt_len < 0) 272 272 return opt_len; 273 273 opts_len += opt_len; 274 - type = TUNNEL_ERSPAN_OPT; 274 + type = IP_TUNNEL_ERSPAN_OPT_BIT; 275 275 break; 276 276 } 277 277 } ··· 302 302 switch (nla_type(nla_data(nla))) { 303 303 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: 304 304 #if IS_ENABLED(CONFIG_INET) 305 - info->key.tun_flags |= TUNNEL_GENEVE_OPT; 305 + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags); 306 306 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), 307 307 opts_len, extack); 308 308 #else ··· 310 310 #endif 311 311 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN: 312 312 #if IS_ENABLED(CONFIG_INET) 313 - info->key.tun_flags |= TUNNEL_VXLAN_OPT; 313 + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags); 314 314 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), 315 315 opts_len, extack); 316 316 #else ··· 318 318 #endif 319 319 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN: 320 320 #if IS_ENABLED(CONFIG_INET) 321 - info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 321 + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags); 322 322 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), 323 323 opts_len, extack); 324 324 #else ··· 363 363 bool bind = act_flags & TCA_ACT_FLAGS_BIND; 364 364 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; 365 365 struct tcf_tunnel_key_params *params_new; 366 + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 366 367 struct metadata_dst *metadata = NULL; 367 368 struct tcf_chain *goto_ch = NULL; 368 369 struct tc_tunnel_key *parm; ··· 372 371 __be16 dst_port = 0; 373 372 __be64 key_id = 0; 374 373 int opts_len = 0; 375 - __be16 flags = 0; 376 374 u8 tos, ttl; 377 375 int ret = 0; 378 376 u32 index; ··· 412 412 413 413 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]); 414 414 key_id = key32_to_tunnel_id(key32); 415 - flags = TUNNEL_KEY; 415 + __set_bit(IP_TUNNEL_KEY_BIT, flags); 416 416 } 417 417 418 - flags |= TUNNEL_CSUM; 418 + __set_bit(IP_TUNNEL_CSUM_BIT, flags); 419 419 if (tb[TCA_TUNNEL_KEY_NO_CSUM] && 420 420 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM])) 421 - flags &= ~TUNNEL_CSUM; 421 + __clear_bit(IP_TUNNEL_CSUM_BIT, flags); 422 422 423 423 if (nla_get_flag(tb[TCA_TUNNEL_KEY_NO_FRAG])) 424 - flags |= TUNNEL_DONT_FRAGMENT; 424 + __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, flags); 425 425 426 426 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) 427 427 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); ··· 663 663 if (!start) 664 664 return -EMSGSIZE; 665 665 666 - if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { 666 + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) { 667 667 err = tunnel_key_geneve_opts_dump(skb, info); 668 668 if (err) 669 669 goto err_out; 670 - } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { 670 + } else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) { 671 671 err = tunnel_key_vxlan_opts_dump(skb, info); 672 672 if (err) 673 673 goto err_out; 674 - } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) { 674 + } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) { 675 675 err = tunnel_key_erspan_opts_dump(skb, info); 676 676 if (err) 677 677 goto err_out; ··· 741 741 struct ip_tunnel_key *key = &info->key; 742 742 __be32 key_id = tunnel_id_to_key32(key->tun_id); 743 743 744 - if (((key->tun_flags & TUNNEL_KEY) && 744 + if ((test_bit(IP_TUNNEL_KEY_BIT, key->tun_flags) && 745 745 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) || 746 746 tunnel_key_dump_addresses(skb, 747 747 &params->tcft_enc_metadata->u.tun_info) || ··· 749 749 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, 750 750 key->tp_dst)) || 751 751 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM, 752 - !(key->tun_flags & TUNNEL_CSUM)) || 753 - ((key->tun_flags & TUNNEL_DONT_FRAGMENT) && 752 + !test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags)) || 753 + (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) && 754 754 nla_put_flag(skb, TCA_TUNNEL_KEY_NO_FRAG)) || 755 755 tunnel_key_opts_dump(skb, info)) 756 756 goto nla_put_failure;
+121 -13
net/sched/cls_flower.c
··· 28 28 #include <net/vxlan.h> 29 29 #include <net/erspan.h> 30 30 #include <net/gtp.h> 31 + #include <net/pfcp.h> 31 32 #include <net/tc_wrapper.h> 32 33 33 34 #include <net/dst.h> ··· 742 741 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 743 742 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 744 743 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, 744 + [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED }, 745 745 }; 746 746 747 747 static const struct nla_policy ··· 770 768 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { 771 769 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 }, 772 770 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 }, 771 + }; 772 + 773 + static const struct nla_policy 774 + pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = { 775 + [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 }, 776 + [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 }, 773 777 }; 774 778 775 779 static const struct nla_policy ··· 1427 1419 return sizeof(*sinfo); 1428 1420 } 1429 1421 1422 + static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1423 + int depth, int option_len, 1424 + struct netlink_ext_ack *extack) 1425 + { 1426 + struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1]; 1427 + struct pfcp_metadata *md; 1428 + int err; 1429 + 1430 + md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1431 + memset(md, 0xff, sizeof(*md)); 1432 + 1433 + if (!depth) 1434 + return sizeof(*md); 1435 + 1436 + if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) { 1437 + NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask"); 1438 + return -EINVAL; 1439 + } 1440 + 1441 + err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla, 1442 + pfcp_opt_policy, extack); 1443 + if (err < 0) 1444 + return err; 1445 + 1446 + if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) { 1447 + NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type"); 1448 + return -EINVAL; 1449 + } 1450 + 1451 + if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) 1452 + md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]); 1453 + 1454 + if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]) 1455 + md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]); 1456 + 1457 + return sizeof(*md); 1458 + } 1459 + 1430 1460 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1431 1461 struct fl_flow_key *mask, 1432 1462 struct netlink_ext_ack *extack) ··· 1500 1454 switch (nla_type(nla_opt_key)) { 1501 1455 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1502 1456 if (key->enc_opts.dst_opt_type && 1503 - key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { 1457 + key->enc_opts.dst_opt_type != 1458 + IP_TUNNEL_GENEVE_OPT_BIT) { 1504 1459 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1505 1460 return -EINVAL; 1506 1461 } 1507 1462 option_len = 0; 1508 - key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1463 + key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1509 1464 option_len = fl_set_geneve_opt(nla_opt_key, key, 1510 1465 key_depth, option_len, 1511 1466 extack); ··· 1517 1470 /* At the same time we need to parse through the mask 1518 1471 * in order to verify exact and mask attribute lengths. 1519 1472 */ 1520 - mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1473 + mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1521 1474 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1522 1475 msk_depth, option_len, 1523 1476 extack); ··· 1536 1489 return -EINVAL; 1537 1490 } 1538 1491 option_len = 0; 1539 - key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1492 + key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1540 1493 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1541 1494 key_depth, option_len, 1542 1495 extack); ··· 1547 1500 /* At the same time we need to parse through the mask 1548 1501 * in order to verify exact and mask attribute lengths. 1549 1502 */ 1550 - mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1503 + mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1551 1504 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1552 1505 msk_depth, option_len, 1553 1506 extack); ··· 1566 1519 return -EINVAL; 1567 1520 } 1568 1521 option_len = 0; 1569 - key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1522 + key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1570 1523 option_len = fl_set_erspan_opt(nla_opt_key, key, 1571 1524 key_depth, option_len, 1572 1525 extack); ··· 1577 1530 /* At the same time we need to parse through the mask 1578 1531 * in order to verify exact and mask attribute lengths. 1579 1532 */ 1580 - mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1533 + mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1581 1534 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1582 1535 msk_depth, option_len, 1583 1536 extack); ··· 1597 1550 return -EINVAL; 1598 1551 } 1599 1552 option_len = 0; 1600 - key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; 1553 + key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1601 1554 option_len = fl_set_gtp_opt(nla_opt_key, key, 1602 1555 key_depth, option_len, 1603 1556 extack); ··· 1608 1561 /* At the same time we need to parse through the mask 1609 1562 * in order to verify exact and mask attribute lengths. 1610 1563 */ 1611 - mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; 1564 + mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1612 1565 option_len = fl_set_gtp_opt(nla_opt_msk, mask, 1613 1566 msk_depth, option_len, 1614 1567 extack); ··· 1619 1572 if (key->enc_opts.len != mask->enc_opts.len) { 1620 1573 NL_SET_ERR_MSG_MOD(extack, 1621 1574 "Key and mask miss aligned"); 1575 + return -EINVAL; 1576 + } 1577 + break; 1578 + case TCA_FLOWER_KEY_ENC_OPTS_PFCP: 1579 + if (key->enc_opts.dst_opt_type) { 1580 + NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options"); 1581 + return -EINVAL; 1582 + } 1583 + option_len = 0; 1584 + key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1585 + option_len = fl_set_pfcp_opt(nla_opt_key, key, 1586 + key_depth, option_len, 1587 + extack); 1588 + if (option_len < 0) 1589 + return option_len; 1590 + 1591 + key->enc_opts.len += option_len; 1592 + /* At the same time we need to parse through the mask 1593 + * in order to verify exact and mask attribute lengths. 1594 + */ 1595 + mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1596 + option_len = fl_set_pfcp_opt(nla_opt_msk, mask, 1597 + msk_depth, option_len, 1598 + extack); 1599 + if (option_len < 0) 1600 + return option_len; 1601 + 1602 + mask->enc_opts.len += option_len; 1603 + if (key->enc_opts.len != mask->enc_opts.len) { 1604 + NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned"); 1622 1605 return -EINVAL; 1623 1606 } 1624 1607 break; ··· 3194 3117 return -EMSGSIZE; 3195 3118 } 3196 3119 3120 + static int fl_dump_key_pfcp_opt(struct sk_buff *skb, 3121 + struct flow_dissector_key_enc_opts *enc_opts) 3122 + { 3123 + struct pfcp_metadata *md; 3124 + struct nlattr *nest; 3125 + 3126 + nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP); 3127 + if (!nest) 3128 + goto nla_put_failure; 3129 + 3130 + md = (struct pfcp_metadata *)&enc_opts->data[0]; 3131 + if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type)) 3132 + goto nla_put_failure; 3133 + 3134 + if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, 3135 + md->seid, 0)) 3136 + goto nla_put_failure; 3137 + 3138 + nla_nest_end(skb, nest); 3139 + return 0; 3140 + 3141 + nla_put_failure: 3142 + nla_nest_cancel(skb, nest); 3143 + return -EMSGSIZE; 3144 + } 3145 + 3197 3146 static int fl_dump_key_ct(struct sk_buff *skb, 3198 3147 struct flow_dissector_key_ct *key, 3199 3148 struct flow_dissector_key_ct *mask) ··· 3305 3202 goto nla_put_failure; 3306 3203 3307 3204 switch (enc_opts->dst_opt_type) { 3308 - case TUNNEL_GENEVE_OPT: 3205 + case IP_TUNNEL_GENEVE_OPT_BIT: 3309 3206 err = fl_dump_key_geneve_opt(skb, enc_opts); 3310 3207 if (err) 3311 3208 goto nla_put_failure; 3312 3209 break; 3313 - case TUNNEL_VXLAN_OPT: 3210 + case IP_TUNNEL_VXLAN_OPT_BIT: 3314 3211 err = fl_dump_key_vxlan_opt(skb, enc_opts); 3315 3212 if (err) 3316 3213 goto nla_put_failure; 3317 3214 break; 3318 - case TUNNEL_ERSPAN_OPT: 3215 + case IP_TUNNEL_ERSPAN_OPT_BIT: 3319 3216 err = fl_dump_key_erspan_opt(skb, enc_opts); 3320 3217 if (err) 3321 3218 goto nla_put_failure; 3322 3219 break; 3323 - case TUNNEL_GTP_OPT: 3220 + case IP_TUNNEL_GTP_OPT_BIT: 3324 3221 err = fl_dump_key_gtp_opt(skb, enc_opts); 3222 + if (err) 3223 + goto nla_put_failure; 3224 + break; 3225 + case IP_TUNNEL_PFCP_OPT_BIT: 3226 + err = fl_dump_key_pfcp_opt(skb, enc_opts); 3325 3227 if (err) 3326 3228 goto nla_put_failure; 3327 3229 break;
+12
tools/include/linux/align.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef _TOOLS_LINUX_ALIGN_H 4 + #define _TOOLS_LINUX_ALIGN_H 5 + 6 + #include <uapi/linux/const.h> 7 + 8 + #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) 9 + #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) 10 + #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) 11 + 12 + #endif /* _TOOLS_LINUX_ALIGN_H */
+5 -4
tools/include/linux/bitmap.h
··· 3 3 #define _TOOLS_LINUX_BITMAP_H 4 4 5 5 #include <string.h> 6 + #include <linux/align.h> 6 7 #include <linux/bitops.h> 7 8 #include <linux/find.h> 8 9 #include <stdlib.h> ··· 26 25 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) 27 26 #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) 28 27 28 + #define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) 29 + 29 30 static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) 30 31 { 31 32 if (small_const_nbits(nbits)) 32 33 *dst = 0UL; 33 34 else { 34 - int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 35 - memset(dst, 0, len); 35 + memset(dst, 0, bitmap_size(nbits)); 36 36 } 37 37 } 38 38 ··· 85 83 */ 86 84 static inline unsigned long *bitmap_zalloc(int nbits) 87 85 { 88 - return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long)); 86 + return calloc(1, bitmap_size(nbits)); 89 87 } 90 88 91 89 /* ··· 128 126 #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) 129 127 #endif 130 128 #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) 131 - #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) 132 129 133 130 static inline bool bitmap_equal(const unsigned long *src1, 134 131 const unsigned long *src2, unsigned int nbits)
+2
tools/include/linux/bitops.h
··· 20 20 #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) 21 21 #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) 22 22 23 + #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) 24 + 23 25 extern unsigned int __sw_hweight8(unsigned int w); 24 26 extern unsigned int __sw_hweight16(unsigned int w); 25 27 extern unsigned int __sw_hweight32(unsigned int w);
+1 -4
tools/include/linux/mm.h
··· 2 2 #ifndef _TOOLS_LINUX_MM_H 3 3 #define _TOOLS_LINUX_MM_H 4 4 5 + #include <linux/align.h> 5 6 #include <linux/mmzone.h> 6 - #include <uapi/linux/const.h> 7 7 8 8 #define PAGE_SHIFT 12 9 9 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 10 10 #define PAGE_MASK (~(PAGE_SIZE - 1)) 11 11 12 12 #define PHYS_ADDR_MAX (~(phys_addr_t)0) 13 - 14 - #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) 15 - #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) 16 13 17 14 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 18 15
+1 -3
tools/perf/util/probe-finder.c
··· 186 186 return ret2; 187 187 } 188 188 189 - #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) 190 - 191 189 static int convert_variable_type(Dwarf_Die *vr_die, 192 190 struct probe_trace_arg *tvar, 193 191 const char *cast, bool user_access) ··· 215 217 total = dwarf_bytesize(vr_die); 216 218 if (boffs < 0 || total < 0) 217 219 return -ENOENT; 218 - ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs, 220 + ret = snprintf(buf, 16, "b%d@%d/%d", bsize, boffs, 219 221 BYTES_TO_BITS(total)); 220 222 goto formatted; 221 223 }