Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.9-rc6 339 lines 9.1 kB view raw
1/* 2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Development of this code funded by Astaro AG (http://www.astaro.com/) 9 */ 10 11#include <linux/kernel.h> 12#include <linux/if_vlan.h> 13#include <linux/init.h> 14#include <linux/module.h> 15#include <linux/netlink.h> 16#include <linux/netfilter.h> 17#include <linux/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables_core.h> 19#include <net/netfilter/nf_tables.h> 20 21/* add vlan header into the user buffer for if tag was removed by offloads */ 22static bool 23nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) 24{ 25 int mac_off = skb_mac_header(skb) - skb->data; 26 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d; 27 struct vlan_ethhdr veth; 28 29 vlanh = (u8 *) &veth; 30 if (offset < ETH_HLEN) { 31 u8 ethlen = min_t(u8, len, ETH_HLEN - offset); 32 33 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN)) 34 return false; 35 36 veth.h_vlan_proto = skb->vlan_proto; 37 38 memcpy(dst_u8, vlanh + offset, ethlen); 39 40 len -= ethlen; 41 if (len == 0) 42 return true; 43 44 dst_u8 += ethlen; 45 offset = ETH_HLEN; 46 } else if (offset >= VLAN_ETH_HLEN) { 47 offset -= VLAN_HLEN; 48 goto skip; 49 } 50 51 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 52 veth.h_vlan_encapsulated_proto = skb->protocol; 53 54 vlanh += offset; 55 56 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset); 57 memcpy(dst_u8, vlanh, vlan_len); 58 59 len -= vlan_len; 60 if (!len) 61 return true; 62 63 dst_u8 += vlan_len; 64 skip: 65 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; 66} 67 68static void nft_payload_eval(const struct nft_expr *expr, 69 struct nft_regs *regs, 70 const struct nft_pktinfo *pkt) 71{ 72 const struct nft_payload *priv = nft_expr_priv(expr); 73 const struct sk_buff *skb = pkt->skb; 74 u32 *dest = &regs->data[priv->dreg]; 75 int offset; 76 77 dest[priv->len / NFT_REG32_SIZE] = 0; 78 switch (priv->base) { 79 case NFT_PAYLOAD_LL_HEADER: 80 if (!skb_mac_header_was_set(skb)) 81 goto err; 82 83 if (skb_vlan_tag_present(skb)) { 84 if (!nft_payload_copy_vlan(dest, skb, 85 priv->offset, priv->len)) 86 goto err; 87 return; 88 } 89 offset = skb_mac_header(skb) - skb->data; 90 break; 91 case NFT_PAYLOAD_NETWORK_HEADER: 92 offset = skb_network_offset(skb); 93 break; 94 case NFT_PAYLOAD_TRANSPORT_HEADER: 95 if (!pkt->tprot_set) 96 goto err; 97 offset = pkt->xt.thoff; 98 break; 99 default: 100 BUG(); 101 } 102 offset += priv->offset; 103 104 if (skb_copy_bits(skb, offset, dest, priv->len) < 0) 105 goto err; 106 return; 107err: 108 regs->verdict.code = NFT_BREAK; 109} 110 111static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { 112 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 }, 113 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, 114 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, 115 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 }, 116 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, 117 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 }, 118 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 }, 119}; 120 121static int nft_payload_init(const struct nft_ctx *ctx, 122 const struct nft_expr *expr, 123 const struct nlattr * const tb[]) 124{ 125 struct nft_payload *priv = nft_expr_priv(expr); 126 127 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 128 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 129 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 130 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]); 131 132 return nft_validate_register_store(ctx, priv->dreg, NULL, 133 NFT_DATA_VALUE, priv->len); 134} 135 136static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr) 137{ 138 const struct nft_payload *priv = nft_expr_priv(expr); 139 140 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) || 141 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 142 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 143 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len))) 144 goto nla_put_failure; 145 return 0; 146 147nla_put_failure: 148 return -1; 149} 150 151static struct nft_expr_type nft_payload_type; 152static const struct nft_expr_ops nft_payload_ops = { 153 .type = &nft_payload_type, 154 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 155 .eval = nft_payload_eval, 156 .init = nft_payload_init, 157 .dump = nft_payload_dump, 158}; 159 160const struct nft_expr_ops nft_payload_fast_ops = { 161 .type = &nft_payload_type, 162 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 163 .eval = nft_payload_eval, 164 .init = nft_payload_init, 165 .dump = nft_payload_dump, 166}; 167 168static void nft_payload_set_eval(const struct nft_expr *expr, 169 struct nft_regs *regs, 170 const struct nft_pktinfo *pkt) 171{ 172 const struct nft_payload_set *priv = nft_expr_priv(expr); 173 struct sk_buff *skb = pkt->skb; 174 const u32 *src = &regs->data[priv->sreg]; 175 int offset, csum_offset; 176 __wsum fsum, tsum; 177 __sum16 sum; 178 179 switch (priv->base) { 180 case NFT_PAYLOAD_LL_HEADER: 181 if (!skb_mac_header_was_set(skb)) 182 goto err; 183 offset = skb_mac_header(skb) - skb->data; 184 break; 185 case NFT_PAYLOAD_NETWORK_HEADER: 186 offset = skb_network_offset(skb); 187 break; 188 case NFT_PAYLOAD_TRANSPORT_HEADER: 189 if (!pkt->tprot_set) 190 goto err; 191 offset = pkt->xt.thoff; 192 break; 193 default: 194 BUG(); 195 } 196 197 csum_offset = offset + priv->csum_offset; 198 offset += priv->offset; 199 200 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && 201 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || 202 skb->ip_summed != CHECKSUM_PARTIAL)) { 203 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 204 goto err; 205 206 fsum = skb_checksum(skb, offset, priv->len, 0); 207 tsum = csum_partial(src, priv->len, 0); 208 sum = csum_fold(csum_add(csum_sub(~csum_unfold(sum), fsum), 209 tsum)); 210 if (sum == 0) 211 sum = CSUM_MANGLED_0; 212 213 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) || 214 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 215 goto err; 216 } 217 218 if (!skb_make_writable(skb, max(offset + priv->len, 0)) || 219 skb_store_bits(skb, offset, src, priv->len) < 0) 220 goto err; 221 222 return; 223err: 224 regs->verdict.code = NFT_BREAK; 225} 226 227static int nft_payload_set_init(const struct nft_ctx *ctx, 228 const struct nft_expr *expr, 229 const struct nlattr * const tb[]) 230{ 231 struct nft_payload_set *priv = nft_expr_priv(expr); 232 233 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 234 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 235 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 236 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]); 237 238 if (tb[NFTA_PAYLOAD_CSUM_TYPE]) 239 priv->csum_type = 240 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); 241 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) 242 priv->csum_offset = 243 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET])); 244 245 switch (priv->csum_type) { 246 case NFT_PAYLOAD_CSUM_NONE: 247 case NFT_PAYLOAD_CSUM_INET: 248 break; 249 default: 250 return -EOPNOTSUPP; 251 } 252 253 return nft_validate_register_load(priv->sreg, priv->len); 254} 255 256static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr) 257{ 258 const struct nft_payload_set *priv = nft_expr_priv(expr); 259 260 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) || 261 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 262 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 263 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) || 264 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) || 265 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET, 266 htonl(priv->csum_offset))) 267 goto nla_put_failure; 268 return 0; 269 270nla_put_failure: 271 return -1; 272} 273 274static const struct nft_expr_ops nft_payload_set_ops = { 275 .type = &nft_payload_type, 276 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)), 277 .eval = nft_payload_set_eval, 278 .init = nft_payload_set_init, 279 .dump = nft_payload_set_dump, 280}; 281 282static const struct nft_expr_ops * 283nft_payload_select_ops(const struct nft_ctx *ctx, 284 const struct nlattr * const tb[]) 285{ 286 enum nft_payload_bases base; 287 unsigned int offset, len; 288 289 if (tb[NFTA_PAYLOAD_BASE] == NULL || 290 tb[NFTA_PAYLOAD_OFFSET] == NULL || 291 tb[NFTA_PAYLOAD_LEN] == NULL) 292 return ERR_PTR(-EINVAL); 293 294 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 295 switch (base) { 296 case NFT_PAYLOAD_LL_HEADER: 297 case NFT_PAYLOAD_NETWORK_HEADER: 298 case NFT_PAYLOAD_TRANSPORT_HEADER: 299 break; 300 default: 301 return ERR_PTR(-EOPNOTSUPP); 302 } 303 304 if (tb[NFTA_PAYLOAD_SREG] != NULL) { 305 if (tb[NFTA_PAYLOAD_DREG] != NULL) 306 return ERR_PTR(-EINVAL); 307 return &nft_payload_set_ops; 308 } 309 310 if (tb[NFTA_PAYLOAD_DREG] == NULL) 311 return ERR_PTR(-EINVAL); 312 313 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 314 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 315 316 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && 317 base != NFT_PAYLOAD_LL_HEADER) 318 return &nft_payload_fast_ops; 319 else 320 return &nft_payload_ops; 321} 322 323static struct nft_expr_type nft_payload_type __read_mostly = { 324 .name = "payload", 325 .select_ops = nft_payload_select_ops, 326 .policy = nft_payload_policy, 327 .maxattr = NFTA_PAYLOAD_MAX, 328 .owner = THIS_MODULE, 329}; 330 331int __init nft_payload_module_init(void) 332{ 333 return nft_register_expr(&nft_payload_type); 334} 335 336void nft_payload_module_exit(void) 337{ 338 nft_unregister_expr(&nft_payload_type); 339}