Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/skbuff: fix up kernel-doc placement

There are several skb_* functions where the locked and unlocked
functions are confusingly documented. For several of them, the
kernel-doc for the unlocked version is placed above the locked version,
which to the casual reader makes it seems like the locked version "takes
no locks and you must therefore hold required locks before calling it."

One can see, for example, that this link claims to document
skb_queue_head(), while instead describing __skb_queue_head().

https://www.kernel.org/doc/html/latest/networking/kapi.html#c.skb_queue_head

The correct documentation for skb_queue_head() is also included further
down the page.

This diff tested via:

$ scripts/kernel-doc -rst include/linux/skbuff.h net/core/skbuff.c

No new warnings were seen, and the output makes a little more sense.

Signed-off-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Brian Norris and committed by
David S. Miller
4ea7b0cf 317d8e2f

+6 -6
+6 -6
include/linux/skbuff.h
··· 1889 1889 * 1890 1890 * A buffer cannot be placed on two lists at the same time. 1891 1891 */ 1892 - void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1893 1892 static inline void __skb_queue_head(struct sk_buff_head *list, 1894 1893 struct sk_buff *newsk) 1895 1894 { 1896 1895 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1897 1896 } 1897 + void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1898 1898 1899 1899 /** 1900 1900 * __skb_queue_tail - queue a buffer at the list tail ··· 1906 1906 * 1907 1907 * A buffer cannot be placed on two lists at the same time. 1908 1908 */ 1909 - void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1910 1909 static inline void __skb_queue_tail(struct sk_buff_head *list, 1911 1910 struct sk_buff *newsk) 1912 1911 { 1913 1912 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1914 1913 } 1914 + void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1915 1915 1916 1916 /* 1917 1917 * remove sk_buff from list. _Must_ be called atomically, and with ··· 1938 1938 * so must be used with appropriate locks held only. The head item is 1939 1939 * returned or %NULL if the list is empty. 1940 1940 */ 1941 - struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1942 1941 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1943 1942 { 1944 1943 struct sk_buff *skb = skb_peek(list); ··· 1945 1946 __skb_unlink(skb, list); 1946 1947 return skb; 1947 1948 } 1949 + struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1948 1950 1949 1951 /** 1950 1952 * __skb_dequeue_tail - remove from the tail of the queue ··· 1955 1955 * so must be used with appropriate locks held only. The tail item is 1956 1956 * returned or %NULL if the list is empty. 1957 1957 */ 1958 - struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1959 1958 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1960 1959 { 1961 1960 struct sk_buff *skb = skb_peek_tail(list); ··· 1962 1963 __skb_unlink(skb, list); 1963 1964 return skb; 1964 1965 } 1966 + struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1965 1967 1966 1968 1967 1969 static inline bool skb_is_nonlinear(const struct sk_buff *skb) ··· 2653 2653 * the list and one reference dropped. This function does not take the 2654 2654 * list lock and the caller must hold the relevant locks to use it. 2655 2655 */ 2656 - void skb_queue_purge(struct sk_buff_head *list); 2657 2656 static inline void __skb_queue_purge(struct sk_buff_head *list) 2658 2657 { 2659 2658 struct sk_buff *skb; 2660 2659 while ((skb = __skb_dequeue(list)) != NULL) 2661 2660 kfree_skb(skb); 2662 2661 } 2662 + void skb_queue_purge(struct sk_buff_head *list); 2663 2663 2664 2664 unsigned int skb_rbtree_purge(struct rb_root *root); 2665 2665 ··· 3028 3028 } 3029 3029 3030 3030 /** 3031 - * skb_put_padto - increase size and pad an skbuff up to a minimal size 3031 + * __skb_put_padto - increase size and pad an skbuff up to a minimal size 3032 3032 * @skb: buffer to pad 3033 3033 * @len: minimal length 3034 3034 * @free_on_error: free buffer on error