Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

6lowpan: change naming for lowpan private data

This patch changes the naming for interface private data for lowpan
intefaces. The current private data scheme is:

-------------------------------------------------
| 6LoWPAN Generic | LinkLayer 6LoWPAN |
-------------------------------------------------

the current naming schemes are:

- 6LoWPAN Generic:
- lowpan_priv
- LinkLayer 6LoWPAN:
- BTLE
- lowpan_dev
- 802.15.4:
- lowpan_dev_info

the new naming scheme with this patch will be:

- 6LoWPAN Generic:
- lowpan_dev
- LinkLayer 6LoWPAN:
- BTLE
- lowpan_btle_dev
- 802.15.4:
- lowpan_802154_dev

Signed-off-by: Alexander Aring <aar@pengutronix.de>
Reviewed-by: Stefan Schmidt<stefan@osg.samsung.com>
Acked-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>

authored by

Alexander Aring and committed by
Marcel Holtmann
2e4d60cb 5a7f97e5

+94 -90
+3 -3
include/net/6lowpan.h
··· 93 93 } 94 94 95 95 #define LOWPAN_PRIV_SIZE(llpriv_size) \ 96 - (sizeof(struct lowpan_priv) + llpriv_size) 96 + (sizeof(struct lowpan_dev) + llpriv_size) 97 97 98 98 enum lowpan_lltypes { 99 99 LOWPAN_LLTYPE_BTLE, ··· 129 129 return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); 130 130 } 131 131 132 - struct lowpan_priv { 132 + struct lowpan_dev { 133 133 enum lowpan_lltypes lltype; 134 134 struct dentry *iface_debugfs; 135 135 struct lowpan_iphc_ctx_table ctx; ··· 139 139 }; 140 140 141 141 static inline 142 - struct lowpan_priv *lowpan_priv(const struct net_device *dev) 142 + struct lowpan_dev *lowpan_dev(const struct net_device *dev) 143 143 { 144 144 return netdev_priv(dev); 145 145 }
+4 -4
net/6lowpan/core.c
··· 27 27 dev->mtu = IPV6_MIN_MTU; 28 28 dev->priv_flags |= IFF_NO_QUEUE; 29 29 30 - lowpan_priv(dev)->lltype = lltype; 30 + lowpan_dev(dev)->lltype = lltype; 31 31 32 - spin_lock_init(&lowpan_priv(dev)->ctx.lock); 32 + spin_lock_init(&lowpan_dev(dev)->ctx.lock); 33 33 for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) 34 - lowpan_priv(dev)->ctx.table[i].id = i; 34 + lowpan_dev(dev)->ctx.table[i].id = i; 35 35 36 36 ret = register_netdevice(dev); 37 37 if (ret < 0) ··· 85 85 case NETDEV_DOWN: 86 86 for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) 87 87 clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, 88 - &lowpan_priv(dev)->ctx.table[i].flags); 88 + &lowpan_dev(dev)->ctx.table[i].flags); 89 89 break; 90 90 default: 91 91 return NOTIFY_DONE;
+11 -11
net/6lowpan/debugfs.c
··· 172 172 static int lowpan_dev_debugfs_ctx_init(struct net_device *dev, 173 173 struct dentry *ctx, u8 id) 174 174 { 175 - struct lowpan_priv *lpriv = lowpan_priv(dev); 175 + struct lowpan_dev *ldev = lowpan_dev(dev); 176 176 struct dentry *dentry, *root; 177 177 char buf[32]; 178 178 ··· 185 185 return -EINVAL; 186 186 187 187 dentry = debugfs_create_file("active", 0644, root, 188 - &lpriv->ctx.table[id], 188 + &ldev->ctx.table[id], 189 189 &lowpan_ctx_flag_active_fops); 190 190 if (!dentry) 191 191 return -EINVAL; 192 192 193 193 dentry = debugfs_create_file("compression", 0644, root, 194 - &lpriv->ctx.table[id], 194 + &ldev->ctx.table[id], 195 195 &lowpan_ctx_flag_c_fops); 196 196 if (!dentry) 197 197 return -EINVAL; 198 198 199 199 dentry = debugfs_create_file("prefix", 0644, root, 200 - &lpriv->ctx.table[id], 200 + &ldev->ctx.table[id], 201 201 &lowpan_ctx_pfx_fops); 202 202 if (!dentry) 203 203 return -EINVAL; 204 204 205 205 dentry = debugfs_create_file("prefix_len", 0644, root, 206 - &lpriv->ctx.table[id], 206 + &ldev->ctx.table[id], 207 207 &lowpan_ctx_plen_fops); 208 208 if (!dentry) 209 209 return -EINVAL; ··· 247 247 248 248 int lowpan_dev_debugfs_init(struct net_device *dev) 249 249 { 250 - struct lowpan_priv *lpriv = lowpan_priv(dev); 250 + struct lowpan_dev *ldev = lowpan_dev(dev); 251 251 struct dentry *contexts, *dentry; 252 252 int ret, i; 253 253 254 254 /* creating the root */ 255 - lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs); 256 - if (!lpriv->iface_debugfs) 255 + ldev->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs); 256 + if (!ldev->iface_debugfs) 257 257 goto fail; 258 258 259 - contexts = debugfs_create_dir("contexts", lpriv->iface_debugfs); 259 + contexts = debugfs_create_dir("contexts", ldev->iface_debugfs); 260 260 if (!contexts) 261 261 goto remove_root; 262 262 263 263 dentry = debugfs_create_file("show", 0644, contexts, 264 - &lowpan_priv(dev)->ctx, 264 + &lowpan_dev(dev)->ctx, 265 265 &lowpan_context_fops); 266 266 if (!dentry) 267 267 goto remove_root; ··· 282 282 283 283 void lowpan_dev_debugfs_exit(struct net_device *dev) 284 284 { 285 - debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs); 285 + debugfs_remove_recursive(lowpan_dev(dev)->iface_debugfs); 286 286 } 287 287 288 288 int __init lowpan_debugfs_init(void)
+19 -19
net/6lowpan/iphc.c
··· 207 207 static struct lowpan_iphc_ctx * 208 208 lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id) 209 209 { 210 - struct lowpan_iphc_ctx *ret = &lowpan_priv(dev)->ctx.table[id]; 210 + struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id]; 211 211 212 212 if (!lowpan_iphc_ctx_is_active(ret)) 213 213 return NULL; ··· 219 219 lowpan_iphc_ctx_get_by_addr(const struct net_device *dev, 220 220 const struct in6_addr *addr) 221 221 { 222 - struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table; 222 + struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; 223 223 struct lowpan_iphc_ctx *ret = NULL; 224 224 struct in6_addr addr_pfx; 225 225 u8 addr_plen; ··· 263 263 lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev, 264 264 const struct in6_addr *addr) 265 265 { 266 - struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table; 266 + struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; 267 267 struct lowpan_iphc_ctx *ret = NULL; 268 268 struct in6_addr addr_mcast, network_pfx = {}; 269 269 int i; ··· 332 332 case LOWPAN_IPHC_SAM_11: 333 333 case LOWPAN_IPHC_DAM_11: 334 334 fail = false; 335 - switch (lowpan_priv(dev)->lltype) { 335 + switch (lowpan_dev(dev)->lltype) { 336 336 case LOWPAN_LLTYPE_IEEE802154: 337 337 iphc_uncompress_802154_lladdr(ipaddr, lladdr); 338 338 break; ··· 393 393 case LOWPAN_IPHC_SAM_11: 394 394 case LOWPAN_IPHC_DAM_11: 395 395 fail = false; 396 - switch (lowpan_priv(dev)->lltype) { 396 + switch (lowpan_dev(dev)->lltype) { 397 397 case LOWPAN_LLTYPE_IEEE802154: 398 398 iphc_uncompress_802154_lladdr(ipaddr, lladdr); 399 399 break; ··· 657 657 } 658 658 659 659 if (iphc1 & LOWPAN_IPHC_SAC) { 660 - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); 660 + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); 661 661 ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid)); 662 662 if (!ci) { 663 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 663 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 664 664 return -EINVAL; 665 665 } 666 666 667 667 pr_debug("SAC bit is set. Handle context based source address.\n"); 668 668 err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr, 669 669 iphc1 & LOWPAN_IPHC_SAM_MASK, saddr); 670 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 670 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 671 671 } else { 672 672 /* Source address uncompression */ 673 673 pr_debug("source address stateless compression\n"); ··· 681 681 682 682 switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) { 683 683 case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC: 684 - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); 684 + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); 685 685 ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); 686 686 if (!ci) { 687 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 687 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 688 688 return -EINVAL; 689 689 } 690 690 ··· 693 693 err = lowpan_uncompress_multicast_ctx_daddr(skb, ci, 694 694 &hdr.daddr, 695 695 iphc1 & LOWPAN_IPHC_DAM_MASK); 696 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 696 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 697 697 break; 698 698 case LOWPAN_IPHC_M: 699 699 /* multicast */ ··· 701 701 iphc1 & LOWPAN_IPHC_DAM_MASK); 702 702 break; 703 703 case LOWPAN_IPHC_DAC: 704 - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); 704 + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); 705 705 ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); 706 706 if (!ci) { 707 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 707 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 708 708 return -EINVAL; 709 709 } 710 710 ··· 712 712 pr_debug("DAC bit is set. Handle context based destination address.\n"); 713 713 err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr, 714 714 iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); 715 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 715 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 716 716 break; 717 717 default: 718 718 err = uncompress_addr(skb, dev, &hdr.daddr, ··· 736 736 return err; 737 737 } 738 738 739 - switch (lowpan_priv(dev)->lltype) { 739 + switch (lowpan_dev(dev)->lltype) { 740 740 case LOWPAN_LLTYPE_IEEE802154: 741 741 if (lowpan_802154_cb(skb)->d_size) 742 742 hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size - ··· 1033 1033 skb->data, skb->len); 1034 1034 1035 1035 ipv6_daddr_type = ipv6_addr_type(&hdr->daddr); 1036 - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); 1036 + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); 1037 1037 if (ipv6_daddr_type & IPV6_ADDR_MULTICAST) 1038 1038 dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr); 1039 1039 else ··· 1042 1042 memcpy(&dci_entry, dci, sizeof(*dci)); 1043 1043 cid |= dci->id; 1044 1044 } 1045 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 1045 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 1046 1046 1047 - spin_lock_bh(&lowpan_priv(dev)->ctx.lock); 1047 + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); 1048 1048 sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr); 1049 1049 if (sci) { 1050 1050 memcpy(&sci_entry, sci, sizeof(*sci)); 1051 1051 cid |= (sci->id << 4); 1052 1052 } 1053 - spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); 1053 + spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); 1054 1054 1055 1055 /* if cid is zero it will be compressed */ 1056 1056 if (cid) {
+1 -1
net/6lowpan/nhc_udp.c
··· 91 91 * here, we obtain the hint from the remaining size of the 92 92 * frame 93 93 */ 94 - switch (lowpan_priv(skb->dev)->lltype) { 94 + switch (lowpan_dev(skb->dev)->lltype) { 95 95 case LOWPAN_LLTYPE_IEEE802154: 96 96 if (lowpan_802154_cb(skb)->d_size) 97 97 uh.len = htons(lowpan_802154_cb(skb)->d_size -
+43 -39
net/bluetooth/6lowpan.c
··· 68 68 struct in6_addr peer_addr; 69 69 }; 70 70 71 - struct lowpan_dev { 71 + struct lowpan_btle_dev { 72 72 struct list_head list; 73 73 74 74 struct hci_dev *hdev; ··· 80 80 struct delayed_work notify_peers; 81 81 }; 82 82 83 - static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) 83 + static inline struct lowpan_btle_dev * 84 + lowpan_btle_dev(const struct net_device *netdev) 84 85 { 85 - return (struct lowpan_dev *)lowpan_priv(netdev)->priv; 86 + return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv; 86 87 } 87 88 88 - static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) 89 + static inline void peer_add(struct lowpan_btle_dev *dev, 90 + struct lowpan_peer *peer) 89 91 { 90 92 list_add_rcu(&peer->list, &dev->peers); 91 93 atomic_inc(&dev->peer_count); 92 94 } 93 95 94 - static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) 96 + static inline bool peer_del(struct lowpan_btle_dev *dev, 97 + struct lowpan_peer *peer) 95 98 { 96 99 list_del_rcu(&peer->list); 97 100 kfree_rcu(peer, rcu); ··· 109 106 return false; 110 107 } 111 108 112 - static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, 109 + static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev, 113 110 bdaddr_t *ba, __u8 type) 114 111 { 115 112 struct lowpan_peer *peer; ··· 137 134 return NULL; 138 135 } 139 136 140 - static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, 141 - struct l2cap_chan *chan) 137 + static inline struct lowpan_peer * 138 + __peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan) 142 139 { 143 140 struct lowpan_peer *peer; 144 141 ··· 150 147 return NULL; 151 148 } 152 149 153 - static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, 154 - struct l2cap_conn *conn) 150 + static inline struct lowpan_peer * 151 + __peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn) 155 152 { 156 153 struct lowpan_peer *peer; 157 154 ··· 163 160 return NULL; 164 161 } 165 162 166 - static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, 163 + static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, 167 164 struct in6_addr *daddr, 168 165 struct sk_buff *skb) 169 166 { ··· 223 220 224 221 static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) 225 222 { 226 - struct lowpan_dev *entry; 223 + struct lowpan_btle_dev *entry; 227 224 struct lowpan_peer *peer = NULL; 228 225 229 226 rcu_read_lock(); ··· 239 236 return peer; 240 237 } 241 238 242 - static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) 239 + static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn) 243 240 { 244 - struct lowpan_dev *entry; 245 - struct lowpan_dev *dev = NULL; 241 + struct lowpan_btle_dev *entry; 242 + struct lowpan_btle_dev *dev = NULL; 246 243 247 244 rcu_read_lock(); 248 245 ··· 273 270 struct l2cap_chan *chan) 274 271 { 275 272 const u8 *saddr, *daddr; 276 - struct lowpan_dev *dev; 273 + struct lowpan_btle_dev *dev; 277 274 struct lowpan_peer *peer; 278 275 279 - dev = lowpan_dev(netdev); 276 + dev = lowpan_btle_dev(netdev); 280 277 281 278 rcu_read_lock(); 282 279 peer = __peer_lookup_chan(dev, chan); ··· 378 375 /* Packet from BT LE device */ 379 376 static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) 380 377 { 381 - struct lowpan_dev *dev; 378 + struct lowpan_btle_dev *dev; 382 379 struct lowpan_peer *peer; 383 380 int err; 384 381 ··· 434 431 bdaddr_t *peer_addr, u8 *peer_addr_type) 435 432 { 436 433 struct in6_addr ipv6_daddr; 437 - struct lowpan_dev *dev; 434 + struct lowpan_btle_dev *dev; 438 435 struct lowpan_peer *peer; 439 436 bdaddr_t addr, *any = BDADDR_ANY; 440 437 u8 *daddr = any->b; 441 438 int err, status = 0; 442 439 443 - dev = lowpan_dev(netdev); 440 + dev = lowpan_btle_dev(netdev); 444 441 445 442 memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr)); 446 443 ··· 546 543 static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) 547 544 { 548 545 struct sk_buff *local_skb; 549 - struct lowpan_dev *entry; 546 + struct lowpan_btle_dev *entry; 550 547 int err = 0; 551 548 552 549 rcu_read_lock(); 553 550 554 551 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { 555 552 struct lowpan_peer *pentry; 556 - struct lowpan_dev *dev; 553 + struct lowpan_btle_dev *dev; 557 554 558 555 if (entry->netdev != netdev) 559 556 continue; 560 557 561 - dev = lowpan_dev(entry->netdev); 558 + dev = lowpan_btle_dev(entry->netdev); 562 559 563 560 list_for_each_entry_rcu(pentry, &dev->peers, list) { 564 561 int ret; ··· 726 723 727 724 static void do_notify_peers(struct work_struct *work) 728 725 { 729 - struct lowpan_dev *dev = container_of(work, struct lowpan_dev, 730 - notify_peers.work); 726 + struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, 727 + notify_peers.work); 731 728 732 729 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ 733 730 } ··· 769 766 } 770 767 771 768 static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, 772 - struct lowpan_dev *dev) 769 + struct lowpan_btle_dev *dev) 773 770 { 774 771 struct lowpan_peer *peer; 775 772 ··· 806 803 return peer->chan; 807 804 } 808 805 809 - static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) 806 + static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) 810 807 { 811 808 struct net_device *netdev; 812 809 int err = 0; 813 810 814 - netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)), 811 + netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)), 815 812 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, 816 813 netdev_setup); 817 814 if (!netdev) ··· 823 820 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); 824 821 SET_NETDEV_DEVTYPE(netdev, &bt_type); 825 822 826 - *dev = lowpan_dev(netdev); 823 + *dev = lowpan_btle_dev(netdev); 827 824 (*dev)->netdev = netdev; 828 825 (*dev)->hdev = chan->conn->hcon->hdev; 829 826 INIT_LIST_HEAD(&(*dev)->peers); ··· 856 853 857 854 static inline void chan_ready_cb(struct l2cap_chan *chan) 858 855 { 859 - struct lowpan_dev *dev; 856 + struct lowpan_btle_dev *dev; 860 857 861 858 dev = lookup_dev(chan->conn); 862 859 ··· 893 890 894 891 static void delete_netdev(struct work_struct *work) 895 892 { 896 - struct lowpan_dev *entry = container_of(work, struct lowpan_dev, 897 - delete_netdev); 893 + struct lowpan_btle_dev *entry = container_of(work, 894 + struct lowpan_btle_dev, 895 + delete_netdev); 898 896 899 897 lowpan_unregister_netdev(entry->netdev); 900 898 ··· 904 900 905 901 static void chan_close_cb(struct l2cap_chan *chan) 906 902 { 907 - struct lowpan_dev *entry; 908 - struct lowpan_dev *dev = NULL; 903 + struct lowpan_btle_dev *entry; 904 + struct lowpan_btle_dev *dev = NULL; 909 905 struct lowpan_peer *peer; 910 906 int err = -ENOENT; 911 907 bool last = false, remove = true; ··· 925 921 spin_lock(&devices_lock); 926 922 927 923 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { 928 - dev = lowpan_dev(entry->netdev); 924 + dev = lowpan_btle_dev(entry->netdev); 929 925 peer = __peer_lookup_chan(dev, chan); 930 926 if (peer) { 931 927 last = peer_del(dev, peer); ··· 1135 1131 1136 1132 static void disconnect_all_peers(void) 1137 1133 { 1138 - struct lowpan_dev *entry; 1134 + struct lowpan_btle_dev *entry; 1139 1135 struct lowpan_peer *peer, *tmp_peer, *new_peer; 1140 1136 struct list_head peers; 1141 1137 ··· 1295 1291 1296 1292 static int lowpan_control_show(struct seq_file *f, void *ptr) 1297 1293 { 1298 - struct lowpan_dev *entry; 1294 + struct lowpan_btle_dev *entry; 1299 1295 struct lowpan_peer *peer; 1300 1296 1301 1297 spin_lock(&devices_lock); ··· 1326 1322 1327 1323 static void disconnect_devices(void) 1328 1324 { 1329 - struct lowpan_dev *entry, *tmp, *new_dev; 1325 + struct lowpan_btle_dev *entry, *tmp, *new_dev; 1330 1326 struct list_head devices; 1331 1327 1332 1328 INIT_LIST_HEAD(&devices); ··· 1364 1360 unsigned long event, void *ptr) 1365 1361 { 1366 1362 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1367 - struct lowpan_dev *entry; 1363 + struct lowpan_btle_dev *entry; 1368 1364 1369 1365 if (netdev->type != ARPHRD_6LOWPAN) 1370 1366 return NOTIFY_DONE;
+3 -3
net/ieee802154/6lowpan/6lowpan_i.h
··· 48 48 } 49 49 50 50 /* private device info */ 51 - struct lowpan_dev_info { 51 + struct lowpan_802154_dev { 52 52 struct net_device *wdev; /* wpan device ptr */ 53 53 u16 fragment_tag; 54 54 }; 55 55 56 56 static inline struct 57 - lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) 57 + lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev) 58 58 { 59 - return (struct lowpan_dev_info *)lowpan_priv(dev)->priv; 59 + return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv; 60 60 } 61 61 62 62 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
+3 -3
net/ieee802154/6lowpan/core.c
··· 148 148 return -EBUSY; 149 149 } 150 150 151 - lowpan_dev_info(ldev)->wdev = wdev; 151 + lowpan_802154_dev(ldev)->wdev = wdev; 152 152 /* Set the lowpan hardware address to the wpan hardware address. */ 153 153 memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); 154 154 /* We need headroom for possible wpan_dev_hard_header call and tailroom ··· 173 173 174 174 static void lowpan_dellink(struct net_device *ldev, struct list_head *head) 175 175 { 176 - struct net_device *wdev = lowpan_dev_info(ldev)->wdev; 176 + struct net_device *wdev = lowpan_802154_dev(ldev)->wdev; 177 177 178 178 ASSERT_RTNL(); 179 179 ··· 184 184 185 185 static struct rtnl_link_ops lowpan_link_ops __read_mostly = { 186 186 .kind = "lowpan", 187 - .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)), 187 + .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)), 188 188 .setup = lowpan_setup, 189 189 .newlink = lowpan_newlink, 190 190 .dellink = lowpan_dellink,
+7 -7
net/ieee802154/6lowpan/tx.c
··· 84 84 lowpan_alloc_frag(struct sk_buff *skb, int size, 85 85 const struct ieee802154_hdr *master_hdr, bool frag1) 86 86 { 87 - struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev; 87 + struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev; 88 88 struct sk_buff *frag; 89 89 int rc; 90 90 ··· 148 148 int frag_cap, frag_len, payload_cap, rc; 149 149 int skb_unprocessed, skb_offset; 150 150 151 - frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag); 152 - lowpan_dev_info(ldev)->fragment_tag++; 151 + frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag); 152 + lowpan_802154_dev(ldev)->fragment_tag++; 153 153 154 154 frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); 155 155 frag_hdr[1] = dgram_size & 0xff; ··· 208 208 static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, 209 209 u16 *dgram_size, u16 *dgram_offset) 210 210 { 211 - struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr; 211 + struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr; 212 212 struct ieee802154_addr sa, da; 213 213 struct ieee802154_mac_cb *cb = mac_cb_init(skb); 214 214 struct lowpan_addr_info info; ··· 248 248 cb->ackreq = wpan_dev->ackreq; 249 249 } 250 250 251 - return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa, 252 - 0); 251 + return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev, &da, 252 + &sa, 0); 253 253 } 254 254 255 255 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) ··· 283 283 max_single = ieee802154_max_payload(&wpan_hdr); 284 284 285 285 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { 286 - skb->dev = lowpan_dev_info(ldev)->wdev; 286 + skb->dev = lowpan_802154_dev(ldev)->wdev; 287 287 ldev->stats.tx_packets++; 288 288 ldev->stats.tx_bytes += dgram_size; 289 289 return dev_queue_xmit(skb);