Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

udp: Remove udp_offloads

Now that the UDP encapsulation GRO functions have been moved to the UDP
socket we not longer need the udp_offload insfrastructure so removing it.

Signed-off-by: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Tom Herbert and committed by
David S. Miller
46aa2f30 4a0090a9

-83
-17
include/linux/netdevice.h
··· 2159 2159 struct list_head list; 2160 2160 }; 2161 2161 2162 - struct udp_offload; 2163 - 2164 - struct udp_offload_callbacks { 2165 - struct sk_buff **(*gro_receive)(struct sk_buff **head, 2166 - struct sk_buff *skb, 2167 - struct udp_offload *uoff); 2168 - int (*gro_complete)(struct sk_buff *skb, 2169 - int nhoff, 2170 - struct udp_offload *uoff); 2171 - }; 2172 - 2173 - struct udp_offload { 2174 - __be16 port; 2175 - u8 ipproto; 2176 - struct udp_offload_callbacks callbacks; 2177 - }; 2178 - 2179 2162 /* often modified stats are per-CPU, other are shared (netdev->stats) */ 2180 2163 struct pcpu_sw_netstats { 2181 2164 u64 rx_packets;
-3
include/net/protocol.h
··· 107 107 void inet_register_protosw(struct inet_protosw *p); 108 108 void inet_unregister_protosw(struct inet_protosw *p); 109 109 110 - int udp_add_offload(struct net *net, struct udp_offload *prot); 111 - void udp_del_offload(struct udp_offload *prot); 112 - 113 110 #if IS_ENABLED(CONFIG_IPV6) 114 111 int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); 115 112 int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
-63
net/ipv4/udp_offload.c
··· 14 14 #include <net/udp.h> 15 15 #include <net/protocol.h> 16 16 17 - static DEFINE_SPINLOCK(udp_offload_lock); 18 - static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; 19 - 20 - #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) 21 - 22 - struct udp_offload_priv { 23 - struct udp_offload *offload; 24 - possible_net_t net; 25 - struct rcu_head rcu; 26 - struct udp_offload_priv __rcu *next; 27 - }; 28 - 29 17 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, 30 18 netdev_features_t features, 31 19 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, ··· 242 254 return segs; 243 255 } 244 256 245 - int udp_add_offload(struct net *net, struct udp_offload *uo) 246 - { 247 - struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); 248 - 249 - if (!new_offload) 250 - return -ENOMEM; 251 - 252 - write_pnet(&new_offload->net, net); 253 - new_offload->offload = uo; 254 - 255 - spin_lock(&udp_offload_lock); 256 - new_offload->next = udp_offload_base; 257 - rcu_assign_pointer(udp_offload_base, new_offload); 258 - spin_unlock(&udp_offload_lock); 259 - 260 - return 0; 261 - } 262 - EXPORT_SYMBOL(udp_add_offload); 263 - 264 - static void udp_offload_free_routine(struct rcu_head *head) 265 - { 266 - struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); 267 - kfree(ou_priv); 268 - } 269 - 270 - void udp_del_offload(struct udp_offload *uo) 271 - { 272 - struct udp_offload_priv __rcu **head = &udp_offload_base; 273 - struct udp_offload_priv *uo_priv; 274 - 275 - spin_lock(&udp_offload_lock); 276 - 277 - uo_priv = udp_deref_protected(*head); 278 - for (; uo_priv != NULL; 279 - uo_priv = udp_deref_protected(*head)) { 280 - if (uo_priv->offload == uo) { 281 - rcu_assign_pointer(*head, 282 - udp_deref_protected(uo_priv->next)); 283 - goto unlock; 284 - } 285 - head = &uo_priv->next; 286 - } 287 - pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); 288 - unlock: 289 - spin_unlock(&udp_offload_lock); 290 - if (uo_priv) 291 - call_rcu(&uo_priv->rcu, udp_offload_free_routine); 292 - } 293 - EXPORT_SYMBOL(udp_del_offload); 294 - 295 257 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 296 258 struct udphdr *uh, udp_lookup_t lookup) 297 259 { ··· 265 327 266 328 if (sk && udp_sk(sk)->gro_receive) 267 329 goto unflush; 268 - 269 330 goto out_unlock; 270 331 271 332 unflush: