at v4.16 706 lines 19 kB view raw
1/* 2 * VLAN An implementation of 802.1Q VLAN tagging. 3 * 4 * Authors: Ben Greear <greearb@candelatech.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12#ifndef _LINUX_IF_VLAN_H_ 13#define _LINUX_IF_VLAN_H_ 14 15#include <linux/netdevice.h> 16#include <linux/etherdevice.h> 17#include <linux/rtnetlink.h> 18#include <linux/bug.h> 19#include <uapi/linux/if_vlan.h> 20 21#define VLAN_HLEN 4 /* The additional bytes required by VLAN 22 * (in addition to the Ethernet header) 23 */ 24#define VLAN_ETH_HLEN 18 /* Total octets in header. */ 25#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ 26 27/* 28 * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan 29 */ 30#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ 31#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ 32 33/* 34 * struct vlan_hdr - vlan header 35 * @h_vlan_TCI: priority and VLAN ID 36 * @h_vlan_encapsulated_proto: packet type ID or len 37 */ 38struct vlan_hdr { 39 __be16 h_vlan_TCI; 40 __be16 h_vlan_encapsulated_proto; 41}; 42 43/** 44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) 45 * @h_dest: destination ethernet address 46 * @h_source: source ethernet address 47 * @h_vlan_proto: ethernet protocol 48 * @h_vlan_TCI: priority and VLAN ID 49 * @h_vlan_encapsulated_proto: packet type ID or len 50 */ 51struct vlan_ethhdr { 52 unsigned char h_dest[ETH_ALEN]; 53 unsigned char h_source[ETH_ALEN]; 54 __be16 h_vlan_proto; 55 __be16 h_vlan_TCI; 56 __be16 h_vlan_encapsulated_proto; 57}; 58 59#include <linux/skbuff.h> 60 61static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) 62{ 63 return (struct vlan_ethhdr *)skb_mac_header(skb); 64} 65 66#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ 67#define VLAN_PRIO_SHIFT 13 68#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ 69#define VLAN_TAG_PRESENT VLAN_CFI_MASK 70#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ 71#define VLAN_N_VID 4096 72 73/* found in socket.c */ 74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 75 76static inline bool is_vlan_dev(const struct net_device *dev) 77{ 78 return dev->priv_flags & IFF_802_1Q_VLAN; 79} 80 81#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 82#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 83#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) 84#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) 85 86/** 87 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats 88 * @rx_packets: number of received packets 89 * @rx_bytes: number of received bytes 90 * @rx_multicast: number of received multicast packets 91 * @tx_packets: number of transmitted packets 92 * @tx_bytes: number of transmitted bytes 93 * @syncp: synchronization point for 64bit counters 94 * @rx_errors: number of rx errors 95 * @tx_dropped: number of tx drops 96 */ 97struct vlan_pcpu_stats { 98 u64 rx_packets; 99 u64 rx_bytes; 100 u64 rx_multicast; 101 u64 tx_packets; 102 u64 tx_bytes; 103 struct u64_stats_sync syncp; 104 u32 rx_errors; 105 u32 tx_dropped; 106}; 107 108#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 109 110extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, 111 __be16 vlan_proto, u16 vlan_id); 112extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); 113extern u16 vlan_dev_vlan_id(const struct net_device *dev); 114extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); 115 116/** 117 * struct vlan_priority_tci_mapping - vlan egress priority mappings 118 * @priority: skb priority 119 * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 120 * @next: pointer to next struct 121 */ 122struct vlan_priority_tci_mapping { 123 u32 priority; 124 u16 vlan_qos; 125 struct vlan_priority_tci_mapping *next; 126}; 127 128struct proc_dir_entry; 129struct netpoll; 130 131/** 132 * struct vlan_dev_priv - VLAN private device data 133 * @nr_ingress_mappings: number of ingress priority mappings 134 * @ingress_priority_map: ingress priority mappings 135 * @nr_egress_mappings: number of egress priority mappings 136 * @egress_priority_map: hash of egress priority mappings 137 * @vlan_proto: VLAN encapsulation protocol 138 * @vlan_id: VLAN identifier 139 * @flags: device flags 140 * @real_dev: underlying netdevice 141 * @real_dev_addr: address of underlying netdevice 142 * @dent: proc dir entry 143 * @vlan_pcpu_stats: ptr to percpu rx stats 144 */ 145struct vlan_dev_priv { 146 unsigned int nr_ingress_mappings; 147 u32 ingress_priority_map[8]; 148 unsigned int nr_egress_mappings; 149 struct vlan_priority_tci_mapping *egress_priority_map[16]; 150 151 __be16 vlan_proto; 152 u16 vlan_id; 153 u16 flags; 154 155 struct net_device *real_dev; 156 unsigned char real_dev_addr[ETH_ALEN]; 157 158 struct proc_dir_entry *dent; 159 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; 160#ifdef CONFIG_NET_POLL_CONTROLLER 161 struct netpoll *netpoll; 162#endif 163 unsigned int nest_level; 164}; 165 166static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) 167{ 168 return netdev_priv(dev); 169} 170 171static inline u16 172vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) 173{ 174 struct vlan_priority_tci_mapping *mp; 175 176 smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ 177 178 mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; 179 while (mp) { 180 if (mp->priority == skprio) { 181 return mp->vlan_qos; /* This should already be shifted 182 * to mask correctly with the 183 * VLAN's TCI */ 184 } 185 mp = mp->next; 186 } 187 return 0; 188} 189 190extern bool vlan_do_receive(struct sk_buff **skb); 191 192extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); 193extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); 194 195extern int vlan_vids_add_by_dev(struct net_device *dev, 196 const struct net_device *by_dev); 197extern void vlan_vids_del_by_dev(struct net_device *dev, 198 const struct net_device *by_dev); 199 200extern bool vlan_uses_dev(const struct net_device *dev); 201 202static inline int vlan_get_encap_level(struct net_device *dev) 203{ 204 BUG_ON(!is_vlan_dev(dev)); 205 return vlan_dev_priv(dev)->nest_level; 206} 207#else 208static inline struct net_device * 209__vlan_find_dev_deep_rcu(struct net_device *real_dev, 210 __be16 vlan_proto, u16 vlan_id) 211{ 212 return NULL; 213} 214 215static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) 216{ 217 BUG(); 218 return NULL; 219} 220 221static inline u16 vlan_dev_vlan_id(const struct net_device *dev) 222{ 223 BUG(); 224 return 0; 225} 226 227static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) 228{ 229 BUG(); 230 return 0; 231} 232 233static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, 234 u32 skprio) 235{ 236 return 0; 237} 238 239static inline bool vlan_do_receive(struct sk_buff **skb) 240{ 241 return false; 242} 243 244static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) 245{ 246 return 0; 247} 248 249static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) 250{ 251} 252 253static inline int vlan_vids_add_by_dev(struct net_device *dev, 254 const struct net_device *by_dev) 255{ 256 return 0; 257} 258 259static inline void vlan_vids_del_by_dev(struct net_device *dev, 260 const struct net_device *by_dev) 261{ 262} 263 264static inline bool vlan_uses_dev(const struct net_device *dev) 265{ 266 return false; 267} 268static inline int vlan_get_encap_level(struct net_device *dev) 269{ 270 BUG(); 271 return 0; 272} 273#endif 274 275/** 276 * eth_type_vlan - check for valid vlan ether type. 277 * @ethertype: ether type to check 278 * 279 * Returns true if the ether type is a vlan ether type. 280 */ 281static inline bool eth_type_vlan(__be16 ethertype) 282{ 283 switch (ethertype) { 284 case htons(ETH_P_8021Q): 285 case htons(ETH_P_8021AD): 286 return true; 287 default: 288 return false; 289 } 290} 291 292static inline bool vlan_hw_offload_capable(netdev_features_t features, 293 __be16 proto) 294{ 295 if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) 296 return true; 297 if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) 298 return true; 299 return false; 300} 301 302/** 303 * __vlan_insert_inner_tag - inner VLAN tag inserting 304 * @skb: skbuff to tag 305 * @vlan_proto: VLAN encapsulation protocol 306 * @vlan_tci: VLAN TCI to insert 307 * @mac_len: MAC header length including outer vlan headers 308 * 309 * Inserts the VLAN tag into @skb as part of the payload at offset mac_len 310 * Returns error if skb_cow_head failes. 311 * 312 * Does not change skb->protocol so this function can be used during receive. 313 */ 314static inline int __vlan_insert_inner_tag(struct sk_buff *skb, 315 __be16 vlan_proto, u16 vlan_tci, 316 unsigned int mac_len) 317{ 318 struct vlan_ethhdr *veth; 319 320 if (skb_cow_head(skb, VLAN_HLEN) < 0) 321 return -ENOMEM; 322 323 skb_push(skb, VLAN_HLEN); 324 325 /* Move the mac header sans proto to the beginning of the new header. */ 326 if (likely(mac_len > ETH_TLEN)) 327 memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); 328 skb->mac_header -= VLAN_HLEN; 329 330 veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); 331 332 /* first, the ethernet type */ 333 if (likely(mac_len >= ETH_TLEN)) { 334 /* h_vlan_encapsulated_proto should already be populated, and 335 * skb->data has space for h_vlan_proto 336 */ 337 veth->h_vlan_proto = vlan_proto; 338 } else { 339 /* h_vlan_encapsulated_proto should not be populated, and 340 * skb->data has no space for h_vlan_proto 341 */ 342 veth->h_vlan_encapsulated_proto = skb->protocol; 343 } 344 345 /* now, the TCI */ 346 veth->h_vlan_TCI = htons(vlan_tci); 347 348 return 0; 349} 350 351/** 352 * __vlan_insert_tag - regular VLAN tag inserting 353 * @skb: skbuff to tag 354 * @vlan_proto: VLAN encapsulation protocol 355 * @vlan_tci: VLAN TCI to insert 356 * 357 * Inserts the VLAN tag into @skb as part of the payload 358 * Returns error if skb_cow_head failes. 359 * 360 * Does not change skb->protocol so this function can be used during receive. 361 */ 362static inline int __vlan_insert_tag(struct sk_buff *skb, 363 __be16 vlan_proto, u16 vlan_tci) 364{ 365 return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); 366} 367 368/** 369 * vlan_insert_inner_tag - inner VLAN tag inserting 370 * @skb: skbuff to tag 371 * @vlan_proto: VLAN encapsulation protocol 372 * @vlan_tci: VLAN TCI to insert 373 * @mac_len: MAC header length including outer vlan headers 374 * 375 * Inserts the VLAN tag into @skb as part of the payload at offset mac_len 376 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 377 * 378 * Following the skb_unshare() example, in case of error, the calling function 379 * doesn't have to worry about freeing the original skb. 380 * 381 * Does not change skb->protocol so this function can be used during receive. 382 */ 383static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, 384 __be16 vlan_proto, 385 u16 vlan_tci, 386 unsigned int mac_len) 387{ 388 int err; 389 390 err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); 391 if (err) { 392 dev_kfree_skb_any(skb); 393 return NULL; 394 } 395 return skb; 396} 397 398/** 399 * vlan_insert_tag - regular VLAN tag inserting 400 * @skb: skbuff to tag 401 * @vlan_proto: VLAN encapsulation protocol 402 * @vlan_tci: VLAN TCI to insert 403 * 404 * Inserts the VLAN tag into @skb as part of the payload 405 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 406 * 407 * Following the skb_unshare() example, in case of error, the calling function 408 * doesn't have to worry about freeing the original skb. 409 * 410 * Does not change skb->protocol so this function can be used during receive. 411 */ 412static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, 413 __be16 vlan_proto, u16 vlan_tci) 414{ 415 return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); 416} 417 418/** 419 * vlan_insert_tag_set_proto - regular VLAN tag inserting 420 * @skb: skbuff to tag 421 * @vlan_proto: VLAN encapsulation protocol 422 * @vlan_tci: VLAN TCI to insert 423 * 424 * Inserts the VLAN tag into @skb as part of the payload 425 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 426 * 427 * Following the skb_unshare() example, in case of error, the calling function 428 * doesn't have to worry about freeing the original skb. 429 */ 430static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, 431 __be16 vlan_proto, 432 u16 vlan_tci) 433{ 434 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); 435 if (skb) 436 skb->protocol = vlan_proto; 437 return skb; 438} 439 440/* 441 * __vlan_hwaccel_push_inside - pushes vlan tag to the payload 442 * @skb: skbuff to tag 443 * 444 * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. 445 * 446 * Following the skb_unshare() example, in case of error, the calling function 447 * doesn't have to worry about freeing the original skb. 448 */ 449static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) 450{ 451 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, 452 skb_vlan_tag_get(skb)); 453 if (likely(skb)) 454 skb->vlan_tci = 0; 455 return skb; 456} 457 458/** 459 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting 460 * @skb: skbuff to tag 461 * @vlan_proto: VLAN encapsulation protocol 462 * @vlan_tci: VLAN TCI to insert 463 * 464 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest 465 */ 466static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, 467 __be16 vlan_proto, u16 vlan_tci) 468{ 469 skb->vlan_proto = vlan_proto; 470 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; 471} 472 473/** 474 * __vlan_get_tag - get the VLAN ID that is part of the payload 475 * @skb: skbuff to query 476 * @vlan_tci: buffer to store value 477 * 478 * Returns error if the skb is not of VLAN type 479 */ 480static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) 481{ 482 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; 483 484 if (!eth_type_vlan(veth->h_vlan_proto)) 485 return -EINVAL; 486 487 *vlan_tci = ntohs(veth->h_vlan_TCI); 488 return 0; 489} 490 491/** 492 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] 493 * @skb: skbuff to query 494 * @vlan_tci: buffer to store value 495 * 496 * Returns error if @skb->vlan_tci is not set correctly 497 */ 498static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, 499 u16 *vlan_tci) 500{ 501 if (skb_vlan_tag_present(skb)) { 502 *vlan_tci = skb_vlan_tag_get(skb); 503 return 0; 504 } else { 505 *vlan_tci = 0; 506 return -EINVAL; 507 } 508} 509 510#define HAVE_VLAN_GET_TAG 511 512/** 513 * vlan_get_tag - get the VLAN ID from the skb 514 * @skb: skbuff to query 515 * @vlan_tci: buffer to store value 516 * 517 * Returns error if the skb is not VLAN tagged 518 */ 519static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) 520{ 521 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { 522 return __vlan_hwaccel_get_tag(skb, vlan_tci); 523 } else { 524 return __vlan_get_tag(skb, vlan_tci); 525 } 526} 527 528/** 529 * vlan_get_protocol - get protocol EtherType. 530 * @skb: skbuff to query 531 * @type: first vlan protocol 532 * @depth: buffer to store length of eth and vlan tags in bytes 533 * 534 * Returns the EtherType of the packet, regardless of whether it is 535 * vlan encapsulated (normal or hardware accelerated) or not. 536 */ 537static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, 538 int *depth) 539{ 540 unsigned int vlan_depth = skb->mac_len; 541 542 /* if type is 802.1Q/AD then the header should already be 543 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at 544 * ETH_HLEN otherwise 545 */ 546 if (eth_type_vlan(type)) { 547 if (vlan_depth) { 548 if (WARN_ON(vlan_depth < VLAN_HLEN)) 549 return 0; 550 vlan_depth -= VLAN_HLEN; 551 } else { 552 vlan_depth = ETH_HLEN; 553 } 554 do { 555 struct vlan_hdr *vh; 556 557 if (unlikely(!pskb_may_pull(skb, 558 vlan_depth + VLAN_HLEN))) 559 return 0; 560 561 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 562 type = vh->h_vlan_encapsulated_proto; 563 vlan_depth += VLAN_HLEN; 564 } while (eth_type_vlan(type)); 565 } 566 567 if (depth) 568 *depth = vlan_depth; 569 570 return type; 571} 572 573/** 574 * vlan_get_protocol - get protocol EtherType. 575 * @skb: skbuff to query 576 * 577 * Returns the EtherType of the packet, regardless of whether it is 578 * vlan encapsulated (normal or hardware accelerated) or not. 579 */ 580static inline __be16 vlan_get_protocol(struct sk_buff *skb) 581{ 582 return __vlan_get_protocol(skb, skb->protocol, NULL); 583} 584 585static inline void vlan_set_encap_proto(struct sk_buff *skb, 586 struct vlan_hdr *vhdr) 587{ 588 __be16 proto; 589 unsigned short *rawp; 590 591 /* 592 * Was a VLAN packet, grab the encapsulated protocol, which the layer 593 * three protocols care about. 594 */ 595 596 proto = vhdr->h_vlan_encapsulated_proto; 597 if (eth_proto_is_802_3(proto)) { 598 skb->protocol = proto; 599 return; 600 } 601 602 rawp = (unsigned short *)(vhdr + 1); 603 if (*rawp == 0xFFFF) 604 /* 605 * This is a magic hack to spot IPX packets. Older Novell 606 * breaks the protocol design and runs IPX over 802.3 without 607 * an 802.2 LLC layer. We look for FFFF which isn't a used 608 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware 609 * but does for the rest. 610 */ 611 skb->protocol = htons(ETH_P_802_3); 612 else 613 /* 614 * Real 802.2 LLC 615 */ 616 skb->protocol = htons(ETH_P_802_2); 617} 618 619/** 620 * skb_vlan_tagged - check if skb is vlan tagged. 621 * @skb: skbuff to query 622 * 623 * Returns true if the skb is tagged, regardless of whether it is hardware 624 * accelerated or not. 625 */ 626static inline bool skb_vlan_tagged(const struct sk_buff *skb) 627{ 628 if (!skb_vlan_tag_present(skb) && 629 likely(!eth_type_vlan(skb->protocol))) 630 return false; 631 632 return true; 633} 634 635/** 636 * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. 637 * @skb: skbuff to query 638 * 639 * Returns true if the skb is tagged with multiple vlan headers, regardless 640 * of whether it is hardware accelerated or not. 641 */ 642static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) 643{ 644 __be16 protocol = skb->protocol; 645 646 if (!skb_vlan_tag_present(skb)) { 647 struct vlan_ethhdr *veh; 648 649 if (likely(!eth_type_vlan(protocol))) 650 return false; 651 652 veh = (struct vlan_ethhdr *)skb->data; 653 protocol = veh->h_vlan_encapsulated_proto; 654 } 655 656 if (!eth_type_vlan(protocol)) 657 return false; 658 659 return true; 660} 661 662/** 663 * vlan_features_check - drop unsafe features for skb with multiple tags. 664 * @skb: skbuff to query 665 * @features: features to be checked 666 * 667 * Returns features without unsafe ones if the skb has multiple tags. 668 */ 669static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, 670 netdev_features_t features) 671{ 672 if (skb_vlan_tagged_multi(skb)) { 673 /* In the case of multi-tagged packets, use a direct mask 674 * instead of using netdev_interesect_features(), to make 675 * sure that only devices supporting NETIF_F_HW_CSUM will 676 * have checksum offloading support. 677 */ 678 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | 679 NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | 680 NETIF_F_HW_VLAN_STAG_TX; 681 } 682 683 return features; 684} 685 686/** 687 * compare_vlan_header - Compare two vlan headers 688 * @h1: Pointer to vlan header 689 * @h2: Pointer to vlan header 690 * 691 * Compare two vlan headers, returns 0 if equal. 692 * 693 * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. 694 */ 695static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, 696 const struct vlan_hdr *h2) 697{ 698#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 699 return *(u32 *)h1 ^ *(u32 *)h2; 700#else 701 return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | 702 ((__force u32)h1->h_vlan_encapsulated_proto ^ 703 (__force u32)h2->h_vlan_encapsulated_proto); 704#endif 705} 706#endif /* !(_LINUX_IF_VLAN_H_) */