Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net.h/skbuff.h: Remove extern from function prototypes

There are a mix of function prototypes with and without extern
in the kernel sources. Standardize on not using extern for
function prototypes.

Function prototypes don't need to be written with extern.
extern is assumed by the compiler. Its use is as unnecessary as
using auto to declare automatic/local variables in a block.

Signed-off-by: Joe Perches <joe@perches.com>

+146 -182
+37 -45
include/linux/net.h
··· 195 195 SOCK_WAKE_URG, 196 196 }; 197 197 198 - extern int sock_wake_async(struct socket *sk, int how, int band); 199 - extern int sock_register(const struct net_proto_family *fam); 200 - extern void sock_unregister(int family); 201 - extern int __sock_create(struct net *net, int family, int type, int proto, 202 - struct socket **res, int kern); 203 - extern int sock_create(int family, int type, int proto, 204 - struct socket **res); 205 - extern int sock_create_kern(int family, int type, int proto, 206 - struct socket **res); 207 - extern int sock_create_lite(int family, int type, int proto, 208 - struct socket **res); 209 - extern void sock_release(struct socket *sock); 210 - extern int sock_sendmsg(struct socket *sock, struct msghdr *msg, 211 - size_t len); 212 - extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, 213 - size_t size, int flags); 214 - extern struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); 215 - extern struct socket *sockfd_lookup(int fd, int *err); 216 - extern struct socket *sock_from_file(struct file *file, int *err); 198 + int sock_wake_async(struct socket *sk, int how, int band); 199 + int sock_register(const struct net_proto_family *fam); 200 + void sock_unregister(int family); 201 + int __sock_create(struct net *net, int family, int type, int proto, 202 + struct socket **res, int kern); 203 + int sock_create(int family, int type, int proto, struct socket **res); 204 + int sock_create_kern(int family, int type, int proto, struct socket **res); 205 + int sock_create_lite(int family, int type, int proto, struct socket **res); 206 + void sock_release(struct socket *sock); 207 + int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len); 208 + int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 209 + int flags); 210 + struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); 211 + struct socket *sockfd_lookup(int fd, int *err); 212 + struct socket *sock_from_file(struct file *file, int *err); 217 213 #define sockfd_put(sock) fput(sock->file) 218 - extern int net_ratelimit(void); 214 + int net_ratelimit(void); 219 215 220 216 #define net_ratelimited_function(function, ...) \ 221 217 do { \ ··· 239 243 #define net_random() prandom_u32() 240 244 #define net_srandom(seed) prandom_seed((__force u32)(seed)) 241 245 242 - extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 243 - struct kvec *vec, size_t num, size_t len); 244 - extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 245 - struct kvec *vec, size_t num, 246 - size_t len, int flags); 246 + int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, 247 + size_t num, size_t len); 248 + int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, 249 + size_t num, size_t len, int flags); 247 250 248 - extern int kernel_bind(struct socket *sock, struct sockaddr *addr, 249 - int addrlen); 250 - extern int kernel_listen(struct socket *sock, int backlog); 251 - extern int kernel_accept(struct socket *sock, struct socket **newsock, 252 - int flags); 253 - extern int kernel_connect(struct socket *sock, struct sockaddr *addr, 254 - int addrlen, int flags); 255 - extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr, 256 - int *addrlen); 257 - extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr, 258 - int *addrlen); 259 - extern int kernel_getsockopt(struct socket *sock, int level, int optname, 260 - char *optval, int *optlen); 261 - extern int kernel_setsockopt(struct socket *sock, int level, int optname, 262 - char *optval, unsigned int optlen); 263 - extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, 264 - size_t size, int flags); 265 - extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); 266 - extern int kernel_sock_shutdown(struct socket *sock, 267 - enum sock_shutdown_cmd how); 251 + int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen); 252 + int kernel_listen(struct socket *sock, int backlog); 253 + int kernel_accept(struct socket *sock, struct socket **newsock, int flags); 254 + int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 255 + int flags); 256 + int kernel_getsockname(struct socket *sock, struct sockaddr *addr, 257 + int *addrlen); 258 + int kernel_getpeername(struct socket *sock, struct sockaddr *addr, 259 + int *addrlen); 260 + int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, 261 + int *optlen); 262 + int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, 263 + unsigned int optlen); 264 + int kernel_sendpage(struct socket *sock, struct page *page, int offset, 265 + size_t size, int flags); 266 + int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); 267 + int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); 268 268 269 269 #define MODULE_ALIAS_NETPROTO(proto) \ 270 270 MODULE_ALIAS("net-pf-" __stringify(proto))
+109 -137
include/linux/skbuff.h
··· 585 585 skb->_skb_refdst = (unsigned long)dst; 586 586 } 587 587 588 - extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, 589 - bool force); 588 + void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, 589 + bool force); 590 590 591 591 /** 592 592 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference ··· 634 634 return (struct rtable *)skb_dst(skb); 635 635 } 636 636 637 - extern void kfree_skb(struct sk_buff *skb); 638 - extern void kfree_skb_list(struct sk_buff *segs); 639 - extern void skb_tx_error(struct sk_buff *skb); 640 - extern void consume_skb(struct sk_buff *skb); 641 - extern void __kfree_skb(struct sk_buff *skb); 637 + void kfree_skb(struct sk_buff *skb); 638 + void kfree_skb_list(struct sk_buff *segs); 639 + void skb_tx_error(struct sk_buff *skb); 640 + void consume_skb(struct sk_buff *skb); 641 + void __kfree_skb(struct sk_buff *skb); 642 642 extern struct kmem_cache *skbuff_head_cache; 643 643 644 - extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 645 - extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 646 - bool *fragstolen, int *delta_truesize); 644 + void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 645 + bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 646 + bool *fragstolen, int *delta_truesize); 647 647 648 - extern struct sk_buff *__alloc_skb(unsigned int size, 649 - gfp_t priority, int flags, int node); 650 - extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 648 + struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, 649 + int node); 650 + struct sk_buff *build_skb(void *data, unsigned int frag_size); 651 651 static inline struct sk_buff *alloc_skb(unsigned int size, 652 652 gfp_t priority) 653 653 { ··· 660 660 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 661 661 } 662 662 663 - extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node); 663 + struct sk_buff *__alloc_skb_head(gfp_t priority, int node); 664 664 static inline struct sk_buff *alloc_skb_head(gfp_t priority) 665 665 { 666 666 return __alloc_skb_head(priority, -1); 667 667 } 668 668 669 - extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 670 - extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 671 - extern struct sk_buff *skb_clone(struct sk_buff *skb, 672 - gfp_t priority); 673 - extern struct sk_buff *skb_copy(const struct sk_buff *skb, 674 - gfp_t priority); 675 - extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 676 - int headroom, gfp_t gfp_mask); 669 + struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 670 + int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 671 + struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); 672 + struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); 673 + struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); 677 674 678 - extern int pskb_expand_head(struct sk_buff *skb, 679 - int nhead, int ntail, 680 - gfp_t gfp_mask); 681 - extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 682 - unsigned int headroom); 683 - extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 684 - int newheadroom, int newtailroom, 685 - gfp_t priority); 686 - extern int skb_to_sgvec(struct sk_buff *skb, 687 - struct scatterlist *sg, int offset, 688 - int len); 689 - extern int skb_cow_data(struct sk_buff *skb, int tailbits, 690 - struct sk_buff **trailer); 691 - extern int skb_pad(struct sk_buff *skb, int pad); 675 + int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); 676 + struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 677 + unsigned int headroom); 678 + struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, 679 + int newtailroom, gfp_t priority); 680 + int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, 681 + int len); 682 + int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 683 + int skb_pad(struct sk_buff *skb, int pad); 692 684 #define dev_kfree_skb(a) consume_skb(a) 693 685 694 - extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 695 - int getfrag(void *from, char *to, int offset, 696 - int len,int odd, struct sk_buff *skb), 697 - void *from, int length); 686 + int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 687 + int getfrag(void *from, char *to, int offset, 688 + int len, int odd, struct sk_buff *skb), 689 + void *from, int length); 698 690 699 691 struct skb_seq_state { 700 692 __u32 lower_offset; ··· 698 706 __u8 *frag_data; 699 707 }; 700 708 701 - extern void skb_prepare_seq_read(struct sk_buff *skb, 702 - unsigned int from, unsigned int to, 703 - struct skb_seq_state *st); 704 - extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 705 - struct skb_seq_state *st); 706 - extern void skb_abort_seq_read(struct skb_seq_state *st); 709 + void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 710 + unsigned int to, struct skb_seq_state *st); 711 + unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 712 + struct skb_seq_state *st); 713 + void skb_abort_seq_read(struct skb_seq_state *st); 707 714 708 - extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 709 - unsigned int to, struct ts_config *config, 710 - struct ts_state *state); 715 + unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 716 + unsigned int to, struct ts_config *config, 717 + struct ts_state *state); 711 718 712 - extern void __skb_get_rxhash(struct sk_buff *skb); 719 + void __skb_get_rxhash(struct sk_buff *skb); 713 720 static inline __u32 skb_get_rxhash(struct sk_buff *skb) 714 721 { 715 722 if (!skb->l4_rxhash) ··· 1086 1095 * The "__skb_xxxx()" functions are the non-atomic ones that 1087 1096 * can only be called with interrupts disabled. 1088 1097 */ 1089 - extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 1098 + void skb_insert(struct sk_buff *old, struct sk_buff *newsk, 1099 + struct sk_buff_head *list); 1090 1100 static inline void __skb_insert(struct sk_buff *newsk, 1091 1101 struct sk_buff *prev, struct sk_buff *next, 1092 1102 struct sk_buff_head *list) ··· 1193 1201 __skb_insert(newsk, prev, prev->next, list); 1194 1202 } 1195 1203 1196 - extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1197 - struct sk_buff_head *list); 1204 + void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1205 + struct sk_buff_head *list); 1198 1206 1199 1207 static inline void __skb_queue_before(struct sk_buff_head *list, 1200 1208 struct sk_buff *next, ··· 1213 1221 * 1214 1222 * A buffer cannot be placed on two lists at the same time. 1215 1223 */ 1216 - extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1224 + void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1217 1225 static inline void __skb_queue_head(struct sk_buff_head *list, 1218 1226 struct sk_buff *newsk) 1219 1227 { ··· 1230 1238 * 1231 1239 * A buffer cannot be placed on two lists at the same time. 1232 1240 */ 1233 - extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1241 + void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1234 1242 static inline void __skb_queue_tail(struct sk_buff_head *list, 1235 1243 struct sk_buff *newsk) 1236 1244 { ··· 1241 1249 * remove sk_buff from list. _Must_ be called atomically, and with 1242 1250 * the list known.. 1243 1251 */ 1244 - extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1252 + void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1245 1253 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1246 1254 { 1247 1255 struct sk_buff *next, *prev; ··· 1262 1270 * so must be used with appropriate locks held only. The head item is 1263 1271 * returned or %NULL if the list is empty. 1264 1272 */ 1265 - extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1273 + struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1266 1274 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1267 1275 { 1268 1276 struct sk_buff *skb = skb_peek(list); ··· 1279 1287 * so must be used with appropriate locks held only. The tail item is 1280 1288 * returned or %NULL if the list is empty. 1281 1289 */ 1282 - extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1290 + struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1283 1291 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1284 1292 { 1285 1293 struct sk_buff *skb = skb_peek_tail(list); ··· 1365 1373 skb_shinfo(skb)->nr_frags = i + 1; 1366 1374 } 1367 1375 1368 - extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1369 - int off, int size, unsigned int truesize); 1376 + void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 1377 + int size, unsigned int truesize); 1370 1378 1371 1379 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1372 1380 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) ··· 1410 1418 /* 1411 1419 * Add data to an sk_buff 1412 1420 */ 1413 - extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1421 + unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1414 1422 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1415 1423 { 1416 1424 unsigned char *tmp = skb_tail_pointer(skb); ··· 1420 1428 return tmp; 1421 1429 } 1422 1430 1423 - extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1431 + unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1424 1432 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1425 1433 { 1426 1434 skb->data -= len; ··· 1428 1436 return skb->data; 1429 1437 } 1430 1438 1431 - extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1439 + unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1432 1440 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1433 1441 { 1434 1442 skb->len -= len; ··· 1441 1449 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1442 1450 } 1443 1451 1444 - extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1452 + unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1445 1453 1446 1454 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1447 1455 { ··· 1745 1753 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1746 1754 #endif 1747 1755 1748 - extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1756 + int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1749 1757 1750 1758 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1751 1759 { ··· 1757 1765 skb_set_tail_pointer(skb, len); 1758 1766 } 1759 1767 1760 - extern void skb_trim(struct sk_buff *skb, unsigned int len); 1768 + void skb_trim(struct sk_buff *skb, unsigned int len); 1761 1769 1762 1770 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1763 1771 { ··· 1830 1838 * the list and one reference dropped. This function does not take the 1831 1839 * list lock and the caller must hold the relevant locks to use it. 1832 1840 */ 1833 - extern void skb_queue_purge(struct sk_buff_head *list); 1841 + void skb_queue_purge(struct sk_buff_head *list); 1834 1842 static inline void __skb_queue_purge(struct sk_buff_head *list) 1835 1843 { 1836 1844 struct sk_buff *skb; ··· 1842 1850 #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) 1843 1851 #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE 1844 1852 1845 - extern void *netdev_alloc_frag(unsigned int fragsz); 1853 + void *netdev_alloc_frag(unsigned int fragsz); 1846 1854 1847 - extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1848 - unsigned int length, 1849 - gfp_t gfp_mask); 1855 + struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 1856 + gfp_t gfp_mask); 1850 1857 1851 1858 /** 1852 1859 * netdev_alloc_skb - allocate an skbuff for rx on a specific device ··· 2333 2342 #define skb_walk_frags(skb, iter) \ 2334 2343 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2335 2344 2336 - extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2337 - int *peeked, int *off, int *err); 2338 - extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2339 - int noblock, int *err); 2340 - extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2341 - struct poll_table_struct *wait); 2342 - extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2343 - int offset, struct iovec *to, 2344 - int size); 2345 - extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2346 - int hlen, 2347 - struct iovec *iov); 2348 - extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2349 - int offset, 2350 - const struct iovec *from, 2351 - int from_offset, 2352 - int len); 2353 - extern int zerocopy_sg_from_iovec(struct sk_buff *skb, 2354 - const struct iovec *frm, 2355 - int offset, 2356 - size_t count); 2357 - extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2358 - int offset, 2359 - const struct iovec *to, 2360 - int to_offset, 2361 - int size); 2362 - extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2363 - extern void skb_free_datagram_locked(struct sock *sk, 2364 - struct sk_buff *skb); 2365 - extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2366 - unsigned int flags); 2367 - extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2368 - int len, __wsum csum); 2369 - extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2370 - void *to, int len); 2371 - extern int skb_store_bits(struct sk_buff *skb, int offset, 2372 - const void *from, int len); 2373 - extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2374 - int offset, u8 *to, int len, 2375 - __wsum csum); 2376 - extern int skb_splice_bits(struct sk_buff *skb, 2377 - unsigned int offset, 2378 - struct pipe_inode_info *pipe, 2379 - unsigned int len, 2380 - unsigned int flags); 2381 - extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2382 - extern void skb_split(struct sk_buff *skb, 2383 - struct sk_buff *skb1, const u32 len); 2384 - extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2385 - int shiftlen); 2386 - extern void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2345 + struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2346 + int *peeked, int *off, int *err); 2347 + struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, 2348 + int *err); 2349 + unsigned int datagram_poll(struct file *file, struct socket *sock, 2350 + struct poll_table_struct *wait); 2351 + int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, 2352 + struct iovec *to, int size); 2353 + int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, 2354 + struct iovec *iov); 2355 + int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, 2356 + const struct iovec *from, int from_offset, 2357 + int len); 2358 + int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, 2359 + int offset, size_t count); 2360 + int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, 2361 + const struct iovec *to, int to_offset, 2362 + int size); 2363 + void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2364 + void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); 2365 + int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); 2366 + __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2367 + __wsum csum); 2368 + int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); 2369 + int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); 2370 + __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, 2371 + int len, __wsum csum); 2372 + int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 2373 + struct pipe_inode_info *pipe, unsigned int len, 2374 + unsigned int flags); 2375 + void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2376 + void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 2377 + int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 2378 + void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2387 2379 2388 - extern struct sk_buff *skb_segment(struct sk_buff *skb, 2389 - netdev_features_t features); 2380 + struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 2390 2381 2391 2382 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2392 2383 int len, void *buffer) ··· 2413 2440 memcpy(skb->data + offset, from, len); 2414 2441 } 2415 2442 2416 - extern void skb_init(void); 2443 + void skb_init(void); 2417 2444 2418 2445 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2419 2446 { ··· 2456 2483 return ktime_set(0, 0); 2457 2484 } 2458 2485 2459 - extern void skb_timestamping_init(void); 2486 + void skb_timestamping_init(void); 2460 2487 2461 2488 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2462 2489 2463 - extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2464 - extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2490 + void skb_clone_tx_timestamp(struct sk_buff *skb); 2491 + bool skb_defer_rx_timestamp(struct sk_buff *skb); 2465 2492 2466 2493 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2467 2494 ··· 2502 2529 * generates a software time stamp (otherwise), then queues the clone 2503 2530 * to the error queue of the socket. Errors are silently ignored. 2504 2531 */ 2505 - extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2506 - struct skb_shared_hwtstamps *hwtstamps); 2532 + void skb_tstamp_tx(struct sk_buff *orig_skb, 2533 + struct skb_shared_hwtstamps *hwtstamps); 2507 2534 2508 2535 static inline void sw_tx_timestamp(struct sk_buff *skb) 2509 2536 { ··· 2535 2562 */ 2536 2563 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2537 2564 2538 - extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2539 - extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2565 + __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2566 + __sum16 __skb_checksum_complete(struct sk_buff *skb); 2540 2567 2541 2568 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2542 2569 { ··· 2566 2593 } 2567 2594 2568 2595 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2569 - extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2596 + void nf_conntrack_destroy(struct nf_conntrack *nfct); 2570 2597 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2571 2598 { 2572 2599 if (nfct && atomic_dec_and_test(&nfct->use)) ··· 2705 2732 return skb->queue_mapping != 0; 2706 2733 } 2707 2734 2708 - extern u16 __skb_tx_hash(const struct net_device *dev, 2709 - const struct sk_buff *skb, 2710 - unsigned int num_tx_queues); 2735 + u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, 2736 + unsigned int num_tx_queues); 2711 2737 2712 2738 #ifdef CONFIG_XFRM 2713 2739 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) ··· 2760 2788 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2761 2789 } 2762 2790 2763 - extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2791 + void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2764 2792 2765 2793 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2766 2794 {