Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET] net/core: Annotations.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Al Viro and committed by
David S. Miller
252e3346 448c31aa

+38 -42
+2 -2
net/core/dev.c
··· 1215 1215 { 1216 1216 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1217 1217 struct packet_type *ptype; 1218 - int type = skb->protocol; 1218 + __be16 type = skb->protocol; 1219 1219 int err; 1220 1220 1221 1221 BUG_ON(skb_shinfo(skb)->frag_list); ··· 1766 1766 struct packet_type *ptype, *pt_prev; 1767 1767 struct net_device *orig_dev; 1768 1768 int ret = NET_RX_DROP; 1769 - unsigned short type; 1769 + __be16 type; 1770 1770 1771 1771 /* if we've gotten here through NAPI, check netpoll */ 1772 1772 if (skb->dev->poll && netpoll_rx(skb))
+3 -3
net/core/filter.c
··· 178 178 load_w: 179 179 ptr = load_pointer(skb, k, 4, &tmp); 180 180 if (ptr != NULL) { 181 - A = ntohl(get_unaligned((u32 *)ptr)); 181 + A = ntohl(get_unaligned((__be32 *)ptr)); 182 182 continue; 183 183 } 184 184 break; ··· 187 187 load_h: 188 188 ptr = load_pointer(skb, k, 2, &tmp); 189 189 if (ptr != NULL) { 190 - A = ntohs(get_unaligned((u16 *)ptr)); 190 + A = ntohs(get_unaligned((__be16 *)ptr)); 191 191 continue; 192 192 } 193 193 break; ··· 261 261 */ 262 262 switch (k-SKF_AD_OFF) { 263 263 case SKF_AD_PROTOCOL: 264 - A = htons(skb->protocol); 264 + A = ntohs(skb->protocol); 265 265 continue; 266 266 case SKF_AD_PKTTYPE: 267 267 A = skb->pkt_type;
+1 -1
net/core/netpoll.c
··· 330 330 struct arphdr *arp; 331 331 unsigned char *arp_ptr; 332 332 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 333 - u32 sip, tip; 333 + __be32 sip, tip; 334 334 struct sk_buff *send_skb; 335 335 struct netpoll *np = NULL; 336 336
+32 -36
net/core/pktgen.c
··· 207 207 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) 208 208 209 209 struct flow_state { 210 - __u32 cur_daddr; 210 + __be32 cur_daddr; 211 211 int count; 212 212 }; 213 213 ··· 282 282 /* If we're doing ranges, random or incremental, then this 283 283 * defines the min/max for those ranges. 284 284 */ 285 - __u32 saddr_min; /* inclusive, source IP address */ 286 - __u32 saddr_max; /* exclusive, source IP address */ 287 - __u32 daddr_min; /* inclusive, dest IP address */ 288 - __u32 daddr_max; /* exclusive, dest IP address */ 285 + __be32 saddr_min; /* inclusive, source IP address */ 286 + __be32 saddr_max; /* exclusive, source IP address */ 287 + __be32 daddr_min; /* inclusive, dest IP address */ 288 + __be32 daddr_max; /* exclusive, dest IP address */ 289 289 290 290 __u16 udp_src_min; /* inclusive, source UDP port */ 291 291 __u16 udp_src_max; /* exclusive, source UDP port */ ··· 317 317 318 318 __u32 cur_dst_mac_offset; 319 319 __u32 cur_src_mac_offset; 320 - __u32 cur_saddr; 321 - __u32 cur_daddr; 320 + __be32 cur_saddr; 321 + __be32 cur_daddr; 322 322 __u16 cur_udp_dst; 323 323 __u16 cur_udp_src; 324 324 __u32 cur_pkt_size; ··· 350 350 }; 351 351 352 352 struct pktgen_hdr { 353 - __u32 pgh_magic; 354 - __u32 seq_num; 355 - __u32 tv_sec; 356 - __u32 tv_usec; 353 + __be32 pgh_magic; 354 + __be32 seq_num; 355 + __be32 tv_sec; 356 + __be32 tv_usec; 357 357 }; 358 358 359 359 struct pktgen_thread { ··· 2160 2160 for(i = 0; i < pkt_dev->nr_labels; i++) 2161 2161 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2162 2162 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2163 - (pktgen_random() & 2163 + ((__force __be32)pktgen_random() & 2164 2164 htonl(0x000fffff)); 2165 2165 } 2166 2166 ··· 2220 2220 if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { 2221 2221 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2222 2222 } else { 2223 - 2224 - if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = 2225 - ntohl(pkt_dev-> 2226 - daddr_max))) 2227 - { 2223 + imn = ntohl(pkt_dev->daddr_min); 2224 + imx = ntohl(pkt_dev->daddr_max); 2225 + if (imn < imx) { 2228 2226 __u32 t; 2227 + __be32 s; 2229 2228 if (pkt_dev->flags & F_IPDST_RND) { 2230 2229 2231 - t = ((pktgen_random() % (imx - imn)) + 2232 - imn); 2233 - t = htonl(t); 2230 + t = pktgen_random() % (imx - imn) + imn; 2231 + s = htonl(t); 2234 2232 2235 - while (LOOPBACK(t) || MULTICAST(t) 2236 - || BADCLASS(t) || ZERONET(t) 2237 - || LOCAL_MCAST(t)) { 2238 - t = ((pktgen_random() % 2239 - (imx - imn)) + imn); 2240 - t = htonl(t); 2233 + while (LOOPBACK(s) || MULTICAST(s) 2234 + || BADCLASS(s) || ZERONET(s) 2235 + || LOCAL_MCAST(s)) { 2236 + t = (pktgen_random() % 2237 + (imx - imn)) + imn; 2238 + s = htonl(t); 2241 2239 } 2242 - pkt_dev->cur_daddr = t; 2243 - } 2244 - 2245 - else { 2240 + pkt_dev->cur_daddr = s; 2241 + } else { 2246 2242 t = ntohl(pkt_dev->cur_daddr); 2247 2243 t++; 2248 2244 if (t > imx) { ··· 2266 2270 2267 2271 for (i = 0; i < 4; i++) { 2268 2272 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2269 - ((pktgen_random() | 2273 + (((__force __be32)pktgen_random() | 2270 2274 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2271 2275 pkt_dev->max_in6_daddr.s6_addr32[i]); 2272 2276 } ··· 2373 2377 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2374 2378 2375 2379 memcpy(eth, pkt_dev->hh, 12); 2376 - *(u16 *) & eth[12] = protocol; 2380 + *(__be16 *) & eth[12] = protocol; 2377 2381 2378 2382 /* Eth + IPh + UDPh + mpls */ 2379 2383 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - ··· 2493 2497 char suffix[16]; 2494 2498 unsigned int prefixlen = 0; 2495 2499 unsigned int suffixlen = 0; 2496 - __u32 tmp; 2500 + __be32 tmp; 2497 2501 2498 2502 for (i = 0; i < 16; i++) 2499 2503 ip[i] = 0; ··· 2709 2713 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2710 2714 2711 2715 memcpy(eth, pkt_dev->hh, 12); 2712 - *(u16 *) & eth[12] = protocol; 2716 + *(__be16 *) & eth[12] = protocol; 2713 2717 2714 2718 /* Eth + IPh + UDPh + mpls */ 2715 2719 datalen = pkt_dev->cur_pkt_size - 14 - ··· 2728 2732 udph->len = htons(datalen + sizeof(struct udphdr)); 2729 2733 udph->check = 0; /* No checksum */ 2730 2734 2731 - *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ 2735 + *(__be32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ 2732 2736 2733 2737 if (pkt_dev->traffic_class) { 2734 2738 /* Version + traffic class + flow (0) */ 2735 - *(u32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); 2739 + *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); 2736 2740 } 2737 2741 2738 2742 iph->hop_limit = 32;