Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix use-after-free in IPSEC input parsing, desintation address
pointer was loaded before pskb_may_pull() which can change the SKB
data pointers. From Florian Westphal.

2) Stack out-of-bounds read in xfrm_state_find(), from Steffen
Klassert.

3) IPVS state of SKB is not properly reset when moving between
namespaces, from Ye Yin.

4) Fix crash in asix driver suspend and resume, from Andrey Konovalov.

5) Don't deliver ipv6 l2tp tunnel packets to ipv4 l2tp tunnels, and
vice versa, from Guillaume Nault.

6) Fix DSACK undo on non-dup ACKs, from Priyaranjan Jha.

7) Fix regression in bond_xmit_hash()'s behavior after the TCP port
selection changes back in 4.2, from Hangbin Liu.

8) Two divide by zero bugs in USB networking drivers when parsing
descriptors, from Bjorn Mork.

9) Fix bonding slaves being stuck in BOND_LINK_FAIL state, from Jay
Vosburgh.

10) Missing skb_reset_mac_header() in qmi_wwan, from Kristian Evensen.

11) Fix the destruction of tc action object races properly, from Cong
Wang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (31 commits)
cls_u32: use tcf_exts_get_net() before call_rcu()
cls_tcindex: use tcf_exts_get_net() before call_rcu()
cls_rsvp: use tcf_exts_get_net() before call_rcu()
cls_route: use tcf_exts_get_net() before call_rcu()
cls_matchall: use tcf_exts_get_net() before call_rcu()
cls_fw: use tcf_exts_get_net() before call_rcu()
cls_flower: use tcf_exts_get_net() before call_rcu()
cls_flow: use tcf_exts_get_net() before call_rcu()
cls_cgroup: use tcf_exts_get_net() before call_rcu()
cls_bpf: use tcf_exts_get_net() before call_rcu()
cls_basic: use tcf_exts_get_net() before call_rcu()
net_sched: introduce tcf_exts_get_net() and tcf_exts_put_net()
Revert "net_sched: hold netns refcnt for each action"
net: usb: asix: fill null-ptr-deref in asix_suspend
Revert "net: usb: asix: fill null-ptr-deref in asix_suspend"
qmi_wwan: Add missing skb_reset_mac_header-call
bonding: fix slave stuck in BOND_LINK_FAIL state
qrtr: Move to postcore_initcall
net: qmi_wwan: fix divide by 0 on bad descriptors
net: cdc_ether: fix divide by 0 on bad descriptors
...

+289 -150
+2 -1
drivers/net/bonding/bond_main.c
··· 2042 2042 2043 2043 bond_for_each_slave_rcu(bond, slave, iter) { 2044 2044 slave->new_link = BOND_LINK_NOCHANGE; 2045 + slave->link_new_state = slave->link; 2045 2046 2046 2047 link_state = bond_check_dev_link(bond, slave->dev, 0); 2047 2048 ··· 3254 3253 hash ^= (hash >> 16); 3255 3254 hash ^= (hash >> 8); 3256 3255 3257 - return hash; 3256 + return hash >> 1; 3258 3257 } 3259 3258 3260 3259 /*-------------------------- Device entry points ----------------------------*/
+3 -3
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
··· 37 37 38 38 #define T4FW_VERSION_MAJOR 0x01 39 39 #define T4FW_VERSION_MINOR 0x10 40 - #define T4FW_VERSION_MICRO 0x2D 40 + #define T4FW_VERSION_MICRO 0x3F 41 41 #define T4FW_VERSION_BUILD 0x00 42 42 43 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 46 46 47 47 #define T5FW_VERSION_MAJOR 0x01 48 48 #define T5FW_VERSION_MINOR 0x10 49 - #define T5FW_VERSION_MICRO 0x2D 49 + #define T5FW_VERSION_MICRO 0x3F 50 50 #define T5FW_VERSION_BUILD 0x00 51 51 52 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 55 55 56 56 #define T6FW_VERSION_MAJOR 0x01 57 57 #define T6FW_VERSION_MINOR 0x10 58 - #define T6FW_VERSION_MICRO 0x2D 58 + #define T6FW_VERSION_MICRO 0x3F 59 59 #define T6FW_VERSION_BUILD 0x00 60 60 61 61 #define T6FW_MIN_VERSION_MAJOR 0x00
+4
drivers/net/ethernet/marvell/mvpp2.c
··· 6747 6747 for (i = 0; i < port->nqvecs; i++) { 6748 6748 struct mvpp2_queue_vector *qv = port->qvecs + i; 6749 6749 6750 + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) 6751 + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 6752 + 6750 6753 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 6751 6754 if (err) 6752 6755 goto err; ··· 6779 6776 struct mvpp2_queue_vector *qv = port->qvecs + i; 6780 6777 6781 6778 irq_set_affinity_hint(qv->irq, NULL); 6779 + irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 6782 6780 free_irq(qv->irq, qv); 6783 6781 } 6784 6782 }
+8 -5
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 365 365 struct mlx5e_l2_hash_node *hn) 366 366 { 367 367 u8 action = hn->action; 368 + u8 mac_addr[ETH_ALEN]; 368 369 int l2_err = 0; 370 + 371 + ether_addr_copy(mac_addr, hn->ai.addr); 369 372 370 373 switch (action) { 371 374 case MLX5E_ACTION_ADD: 372 375 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); 373 - if (!is_multicast_ether_addr(hn->ai.addr)) { 374 - l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); 376 + if (!is_multicast_ether_addr(mac_addr)) { 377 + l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr); 375 378 hn->mpfs = !l2_err; 376 379 } 377 380 hn->action = MLX5E_ACTION_NONE; 378 381 break; 379 382 380 383 case MLX5E_ACTION_DEL: 381 - if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) 382 - l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); 384 + if (!is_multicast_ether_addr(mac_addr) && hn->mpfs) 385 + l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr); 383 386 mlx5e_del_l2_flow_rule(priv, &hn->ai); 384 387 mlx5e_del_l2_from_hash(hn); 385 388 break; ··· 390 387 391 388 if (l2_err) 392 389 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", 393 - action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); 390 + action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); 394 391 } 395 392 396 393 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+2 -2
drivers/net/usb/asix_devices.c
··· 626 626 struct usbnet *dev = usb_get_intfdata(intf); 627 627 struct asix_common_private *priv = dev->driver_priv; 628 628 629 - if (priv->suspend) 629 + if (priv && priv->suspend) 630 630 priv->suspend(dev); 631 631 632 632 return usbnet_suspend(intf, message); ··· 678 678 struct usbnet *dev = usb_get_intfdata(intf); 679 679 struct asix_common_private *priv = dev->driver_priv; 680 680 681 - if (priv->resume) 681 + if (priv && priv->resume) 682 682 priv->resume(dev); 683 683 684 684 return usbnet_resume(intf);
+1 -1
drivers/net/usb/cdc_ether.c
··· 230 230 goto bad_desc; 231 231 } 232 232 233 - if (header.usb_cdc_ether_desc) { 233 + if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { 234 234 dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize); 235 235 /* because of Zaurus, we may be ignoring the host 236 236 * side link address we were given.
+2 -1
drivers/net/usb/qmi_wwan.c
··· 499 499 return 1; 500 500 } 501 501 if (rawip) { 502 + skb_reset_mac_header(skb); 502 503 skb->dev = dev->net; /* normally set by eth_type_trans */ 503 504 skb->protocol = proto; 504 505 return 1; ··· 682 681 } 683 682 684 683 /* errors aren't fatal - we can live with the dynamic address */ 685 - if (cdc_ether) { 684 + if (cdc_ether && cdc_ether->wMaxSegmentSize) { 686 685 dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize); 687 686 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); 688 687 }
+7
include/linux/skbuff.h
··· 3770 3770 #endif 3771 3771 } 3772 3772 3773 + static inline void ipvs_reset(struct sk_buff *skb) 3774 + { 3775 + #if IS_ENABLED(CONFIG_IP_VS) 3776 + skb->ipvs_property = 0; 3777 + #endif 3778 + } 3779 + 3773 3780 /* Note: This doesn't put any conntrack and bridge info in dst. */ 3774 3781 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, 3775 3782 bool copy)
+1 -3
include/net/act_api.h
··· 14 14 struct tcf_idrinfo { 15 15 spinlock_t lock; 16 16 struct idr action_idr; 17 - struct net *net; 18 17 }; 19 18 20 19 struct tc_action_ops; ··· 105 106 106 107 static inline 107 108 int tc_action_net_init(struct tc_action_net *tn, 108 - const struct tc_action_ops *ops, struct net *net) 109 + const struct tc_action_ops *ops) 109 110 { 110 111 int err = 0; 111 112 ··· 113 114 if (!tn->idrinfo) 114 115 return -ENOMEM; 115 116 tn->ops = ops; 116 - tn->idrinfo->net = net; 117 117 spin_lock_init(&tn->idrinfo->lock); 118 118 idr_init(&tn->idrinfo->action_idr); 119 119 return err;
+24
include/net/pkt_cls.h
··· 94 94 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 95 95 int nr_actions; 96 96 struct tc_action **actions; 97 + struct net *net; 97 98 #endif 98 99 /* Map to export classifier specific extension TLV types to the 99 100 * generic extensions API. Unsupported extensions must be set to 0. ··· 108 107 #ifdef CONFIG_NET_CLS_ACT 109 108 exts->type = 0; 110 109 exts->nr_actions = 0; 110 + exts->net = NULL; 111 111 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 112 112 GFP_KERNEL); 113 113 if (!exts->actions) ··· 117 115 exts->action = action; 118 116 exts->police = police; 119 117 return 0; 118 + } 119 + 120 + /* Return false if the netns is being destroyed in cleanup_net(). Callers 121 + * need to do cleanup synchronously in this case, otherwise may race with 122 + * tc_action_net_exit(). Return true for other cases. 123 + */ 124 + static inline bool tcf_exts_get_net(struct tcf_exts *exts) 125 + { 126 + #ifdef CONFIG_NET_CLS_ACT 127 + exts->net = maybe_get_net(exts->net); 128 + return exts->net != NULL; 129 + #else 130 + return true; 131 + #endif 132 + } 133 + 134 + static inline void tcf_exts_put_net(struct tcf_exts *exts) 135 + { 136 + #ifdef CONFIG_NET_CLS_ACT 137 + if (exts->net) 138 + put_net(exts->net); 139 + #endif 120 140 } 121 141 122 142 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
+1
net/core/skbuff.c
··· 4864 4864 if (!xnet) 4865 4865 return; 4866 4866 4867 + ipvs_reset(skb); 4867 4868 skb_orphan(skb); 4868 4869 skb->mark = 0; 4869 4870 }
+1 -1
net/ipv4/tcp_input.c
··· 115 115 116 116 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 117 117 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 118 - #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 118 + #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) 119 119 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 120 120 121 121 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
+9 -15
net/l2tp/l2tp_ip.c
··· 123 123 unsigned char *ptr, *optr; 124 124 struct l2tp_session *session; 125 125 struct l2tp_tunnel *tunnel = NULL; 126 + struct iphdr *iph; 126 127 int length; 127 128 128 129 if (!pskb_may_pull(skb, 4)) ··· 179 178 goto discard; 180 179 181 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 182 - tunnel = l2tp_tunnel_find(net, tunnel_id); 183 - if (tunnel) { 184 - sk = tunnel->sock; 185 - sock_hold(sk); 186 - } else { 187 - struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 181 + iph = (struct iphdr *)skb_network_header(skb); 188 182 189 - read_lock_bh(&l2tp_ip_lock); 190 - sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, 191 - inet_iif(skb), tunnel_id); 192 - if (!sk) { 193 - read_unlock_bh(&l2tp_ip_lock); 194 - goto discard; 195 - } 196 - 197 - sock_hold(sk); 183 + read_lock_bh(&l2tp_ip_lock); 184 + sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb), 185 + tunnel_id); 186 + if (!sk) { 198 187 read_unlock_bh(&l2tp_ip_lock); 188 + goto discard; 199 189 } 190 + sock_hold(sk); 191 + read_unlock_bh(&l2tp_ip_lock); 200 192 201 193 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 202 194 goto discard_put;
+9 -15
net/l2tp/l2tp_ip6.c
··· 136 136 unsigned char *ptr, *optr; 137 137 struct l2tp_session *session; 138 138 struct l2tp_tunnel *tunnel = NULL; 139 + struct ipv6hdr *iph; 139 140 int length; 140 141 141 142 if (!pskb_may_pull(skb, 4)) ··· 193 192 goto discard; 194 193 195 194 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 196 - tunnel = l2tp_tunnel_find(net, tunnel_id); 197 - if (tunnel) { 198 - sk = tunnel->sock; 199 - sock_hold(sk); 200 - } else { 201 - struct ipv6hdr *iph = ipv6_hdr(skb); 195 + iph = ipv6_hdr(skb); 202 196 203 - read_lock_bh(&l2tp_ip6_lock); 204 - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, 205 - inet6_iif(skb), tunnel_id); 206 - if (!sk) { 207 - read_unlock_bh(&l2tp_ip6_lock); 208 - goto discard; 209 - } 210 - 211 - sock_hold(sk); 197 + read_lock_bh(&l2tp_ip6_lock); 198 + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, 199 + inet6_iif(skb), tunnel_id); 200 + if (!sk) { 212 201 read_unlock_bh(&l2tp_ip6_lock); 202 + goto discard; 213 203 } 204 + sock_hold(sk); 205 + read_unlock_bh(&l2tp_ip6_lock); 214 206 215 207 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 216 208 goto discard_put;
+1 -1
net/qrtr/qrtr.c
··· 1085 1085 1086 1086 return 0; 1087 1087 } 1088 - module_init(qrtr_proto_init); 1088 + postcore_initcall(qrtr_proto_init); 1089 1089 1090 1090 static void __exit qrtr_proto_fini(void) 1091 1091 {
-2
net/sched/act_api.c
··· 78 78 spin_lock_bh(&idrinfo->lock); 79 79 idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); 80 80 spin_unlock_bh(&idrinfo->lock); 81 - put_net(idrinfo->net); 82 81 gen_kill_estimator(&p->tcfa_rate_est); 83 82 free_tcf(p); 84 83 } ··· 336 337 p->idrinfo = idrinfo; 337 338 p->ops = ops; 338 339 INIT_LIST_HEAD(&p->list); 339 - get_net(idrinfo->net); 340 340 *a = p; 341 341 return 0; 342 342 }
+1 -1
net/sched/act_bpf.c
··· 398 398 { 399 399 struct tc_action_net *tn = net_generic(net, bpf_net_id); 400 400 401 - return tc_action_net_init(tn, &act_bpf_ops, net); 401 + return tc_action_net_init(tn, &act_bpf_ops); 402 402 } 403 403 404 404 static void __net_exit bpf_exit_net(struct net *net)
+1 -1
net/sched/act_connmark.c
··· 206 206 { 207 207 struct tc_action_net *tn = net_generic(net, connmark_net_id); 208 208 209 - return tc_action_net_init(tn, &act_connmark_ops, net); 209 + return tc_action_net_init(tn, &act_connmark_ops); 210 210 } 211 211 212 212 static void __net_exit connmark_exit_net(struct net *net)
+1 -1
net/sched/act_csum.c
··· 626 626 { 627 627 struct tc_action_net *tn = net_generic(net, csum_net_id); 628 628 629 - return tc_action_net_init(tn, &act_csum_ops, net); 629 + return tc_action_net_init(tn, &act_csum_ops); 630 630 } 631 631 632 632 static void __net_exit csum_exit_net(struct net *net)
+1 -1
net/sched/act_gact.c
··· 232 232 { 233 233 struct tc_action_net *tn = net_generic(net, gact_net_id); 234 234 235 - return tc_action_net_init(tn, &act_gact_ops, net); 235 + return tc_action_net_init(tn, &act_gact_ops); 236 236 } 237 237 238 238 static void __net_exit gact_exit_net(struct net *net)
+1 -1
net/sched/act_ife.c
··· 818 818 { 819 819 struct tc_action_net *tn = net_generic(net, ife_net_id); 820 820 821 - return tc_action_net_init(tn, &act_ife_ops, net); 821 + return tc_action_net_init(tn, &act_ife_ops); 822 822 } 823 823 824 824 static void __net_exit ife_exit_net(struct net *net)
+2 -2
net/sched/act_ipt.c
··· 334 334 { 335 335 struct tc_action_net *tn = net_generic(net, ipt_net_id); 336 336 337 - return tc_action_net_init(tn, &act_ipt_ops, net); 337 + return tc_action_net_init(tn, &act_ipt_ops); 338 338 } 339 339 340 340 static void __net_exit ipt_exit_net(struct net *net) ··· 384 384 { 385 385 struct tc_action_net *tn = net_generic(net, xt_net_id); 386 386 387 - return tc_action_net_init(tn, &act_xt_ops, net); 387 + return tc_action_net_init(tn, &act_xt_ops); 388 388 } 389 389 390 390 static void __net_exit xt_exit_net(struct net *net)
+1 -1
net/sched/act_mirred.c
··· 343 343 { 344 344 struct tc_action_net *tn = net_generic(net, mirred_net_id); 345 345 346 - return tc_action_net_init(tn, &act_mirred_ops, net); 346 + return tc_action_net_init(tn, &act_mirred_ops); 347 347 } 348 348 349 349 static void __net_exit mirred_exit_net(struct net *net)
+1 -1
net/sched/act_nat.c
··· 307 307 { 308 308 struct tc_action_net *tn = net_generic(net, nat_net_id); 309 309 310 - return tc_action_net_init(tn, &act_nat_ops, net); 310 + return tc_action_net_init(tn, &act_nat_ops); 311 311 } 312 312 313 313 static void __net_exit nat_exit_net(struct net *net)
+1 -1
net/sched/act_pedit.c
··· 450 450 { 451 451 struct tc_action_net *tn = net_generic(net, pedit_net_id); 452 452 453 - return tc_action_net_init(tn, &act_pedit_ops, net); 453 + return tc_action_net_init(tn, &act_pedit_ops); 454 454 } 455 455 456 456 static void __net_exit pedit_exit_net(struct net *net)
+1 -1
net/sched/act_police.c
··· 331 331 { 332 332 struct tc_action_net *tn = net_generic(net, police_net_id); 333 333 334 - return tc_action_net_init(tn, &act_police_ops, net); 334 + return tc_action_net_init(tn, &act_police_ops); 335 335 } 336 336 337 337 static void __net_exit police_exit_net(struct net *net)
+1 -1
net/sched/act_sample.c
··· 240 240 { 241 241 struct tc_action_net *tn = net_generic(net, sample_net_id); 242 242 243 - return tc_action_net_init(tn, &act_sample_ops, net); 243 + return tc_action_net_init(tn, &act_sample_ops); 244 244 } 245 245 246 246 static void __net_exit sample_exit_net(struct net *net)
+1 -1
net/sched/act_simple.c
··· 201 201 { 202 202 struct tc_action_net *tn = net_generic(net, simp_net_id); 203 203 204 - return tc_action_net_init(tn, &act_simp_ops, net); 204 + return tc_action_net_init(tn, &act_simp_ops); 205 205 } 206 206 207 207 static void __net_exit simp_exit_net(struct net *net)
+1 -1
net/sched/act_skbedit.c
··· 238 238 { 239 239 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 240 240 241 - return tc_action_net_init(tn, &act_skbedit_ops, net); 241 + return tc_action_net_init(tn, &act_skbedit_ops); 242 242 } 243 243 244 244 static void __net_exit skbedit_exit_net(struct net *net)
+1 -1
net/sched/act_skbmod.c
··· 263 263 { 264 264 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 265 265 266 - return tc_action_net_init(tn, &act_skbmod_ops, net); 266 + return tc_action_net_init(tn, &act_skbmod_ops); 267 267 } 268 268 269 269 static void __net_exit skbmod_exit_net(struct net *net)
+1 -1
net/sched/act_tunnel_key.c
··· 322 322 { 323 323 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 324 324 325 - return tc_action_net_init(tn, &act_tunnel_key_ops, net); 325 + return tc_action_net_init(tn, &act_tunnel_key_ops); 326 326 } 327 327 328 328 static void __net_exit tunnel_key_exit_net(struct net *net)
+1 -1
net/sched/act_vlan.c
··· 269 269 { 270 270 struct tc_action_net *tn = net_generic(net, vlan_net_id); 271 271 272 - return tc_action_net_init(tn, &act_vlan_ops, net); 272 + return tc_action_net_init(tn, &act_vlan_ops); 273 273 } 274 274 275 275 static void __net_exit vlan_exit_net(struct net *net)
+1
net/sched/cls_api.c
··· 927 927 exts->actions[i++] = act; 928 928 exts->nr_actions = i; 929 929 } 930 + exts->net = net; 930 931 } 931 932 #else 932 933 if ((exts->action && tb[exts->action]) ||
+15 -5
net/sched/cls_basic.c
··· 85 85 return 0; 86 86 } 87 87 88 + static void __basic_delete_filter(struct basic_filter *f) 89 + { 90 + tcf_exts_destroy(&f->exts); 91 + tcf_em_tree_destroy(&f->ematches); 92 + tcf_exts_put_net(&f->exts); 93 + kfree(f); 94 + } 95 + 88 96 static void basic_delete_filter_work(struct work_struct *work) 89 97 { 90 98 struct basic_filter *f = container_of(work, struct basic_filter, work); 91 99 92 100 rtnl_lock(); 93 - tcf_exts_destroy(&f->exts); 94 - tcf_em_tree_destroy(&f->ematches); 101 + __basic_delete_filter(f); 95 102 rtnl_unlock(); 96 - 97 - kfree(f); 98 103 } 99 104 100 105 static void basic_delete_filter(struct rcu_head *head) ··· 118 113 list_for_each_entry_safe(f, n, &head->flist, link) { 119 114 list_del_rcu(&f->link); 120 115 tcf_unbind_filter(tp, &f->res); 121 - call_rcu(&f->rcu, basic_delete_filter); 116 + if (tcf_exts_get_net(&f->exts)) 117 + call_rcu(&f->rcu, basic_delete_filter); 118 + else 119 + __basic_delete_filter(f); 122 120 } 123 121 kfree_rcu(head, rcu); 124 122 } ··· 133 125 134 126 list_del_rcu(&f->link); 135 127 tcf_unbind_filter(tp, &f->res); 128 + tcf_exts_get_net(&f->exts); 136 129 call_rcu(&f->rcu, basic_delete_filter); 137 130 *last = list_empty(&head->flist); 138 131 return 0; ··· 228 219 if (fold) { 229 220 list_replace_rcu(&fold->link, &fnew->link); 230 221 tcf_unbind_filter(tp, &fold->res); 222 + tcf_exts_get_net(&fold->exts); 231 223 call_rcu(&fold->rcu, basic_delete_filter); 232 224 } else { 233 225 list_add_rcu(&fnew->link, &head->flist);
+6 -1
net/sched/cls_bpf.c
··· 249 249 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 250 250 { 251 251 tcf_exts_destroy(&prog->exts); 252 + tcf_exts_put_net(&prog->exts); 252 253 253 254 if (cls_bpf_is_ebpf(prog)) 254 255 bpf_prog_put(prog->filter); ··· 283 282 cls_bpf_stop_offload(tp, prog); 284 283 list_del_rcu(&prog->link); 285 284 tcf_unbind_filter(tp, &prog->res); 286 - call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 285 + if (tcf_exts_get_net(&prog->exts)) 286 + call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 287 + else 288 + __cls_bpf_delete_prog(prog); 287 289 } 288 290 289 291 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) ··· 520 516 if (oldprog) { 521 517 list_replace_rcu(&oldprog->link, &prog->link); 522 518 tcf_unbind_filter(tp, &oldprog->res); 519 + tcf_exts_get_net(&oldprog->exts); 523 520 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); 524 521 } else { 525 522 list_add_rcu(&prog->link, &head->plist);
+18 -6
net/sched/cls_cgroup.c
··· 60 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 61 61 }; 62 62 63 + static void __cls_cgroup_destroy(struct cls_cgroup_head *head) 64 + { 65 + tcf_exts_destroy(&head->exts); 66 + tcf_em_tree_destroy(&head->ematches); 67 + tcf_exts_put_net(&head->exts); 68 + kfree(head); 69 + } 70 + 63 71 static void cls_cgroup_destroy_work(struct work_struct *work) 64 72 { 65 73 struct cls_cgroup_head *head = container_of(work, 66 74 struct cls_cgroup_head, 67 75 work); 68 76 rtnl_lock(); 69 - tcf_exts_destroy(&head->exts); 70 - tcf_em_tree_destroy(&head->ematches); 71 - kfree(head); 77 + __cls_cgroup_destroy(head); 72 78 rtnl_unlock(); 73 79 } 74 80 ··· 130 124 goto errout; 131 125 132 126 rcu_assign_pointer(tp->root, new); 133 - if (head) 127 + if (head) { 128 + tcf_exts_get_net(&head->exts); 134 129 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 130 + } 135 131 return 0; 136 132 errout: 137 133 tcf_exts_destroy(&new->exts); ··· 146 138 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 147 139 148 140 /* Head can still be NULL due to cls_cgroup_init(). */ 149 - if (head) 150 - call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 141 + if (head) { 142 + if (tcf_exts_get_net(&head->exts)) 143 + call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 144 + else 145 + __cls_cgroup_destroy(head); 146 + } 151 147 } 152 148 153 149 static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)
+18 -6
net/sched/cls_flow.c
··· 372 372 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 373 373 }; 374 374 375 + static void __flow_destroy_filter(struct flow_filter *f) 376 + { 377 + del_timer_sync(&f->perturb_timer); 378 + tcf_exts_destroy(&f->exts); 379 + tcf_em_tree_destroy(&f->ematches); 380 + tcf_exts_put_net(&f->exts); 381 + kfree(f); 382 + } 383 + 375 384 static void flow_destroy_filter_work(struct work_struct *work) 376 385 { 377 386 struct flow_filter *f = container_of(work, struct flow_filter, work); 378 387 379 388 rtnl_lock(); 380 - del_timer_sync(&f->perturb_timer); 381 - tcf_exts_destroy(&f->exts); 382 - tcf_em_tree_destroy(&f->ematches); 383 - kfree(f); 389 + __flow_destroy_filter(f); 384 390 rtnl_unlock(); 385 391 } 386 392 ··· 558 552 559 553 *arg = fnew; 560 554 561 - if (fold) 555 + if (fold) { 556 + tcf_exts_get_net(&fold->exts); 562 557 call_rcu(&fold->rcu, flow_destroy_filter); 558 + } 563 559 return 0; 564 560 565 561 err2: ··· 578 570 struct flow_filter *f = arg; 579 571 580 572 list_del_rcu(&f->list); 573 + tcf_exts_get_net(&f->exts); 581 574 call_rcu(&f->rcu, flow_destroy_filter); 582 575 *last = list_empty(&head->filters); 583 576 return 0; ··· 603 594 604 595 list_for_each_entry_safe(f, next, &head->filters, list) { 605 596 list_del_rcu(&f->list); 606 - call_rcu(&f->rcu, flow_destroy_filter); 597 + if (tcf_exts_get_net(&f->exts)) 598 + call_rcu(&f->rcu, flow_destroy_filter); 599 + else 600 + __flow_destroy_filter(f); 607 601 } 608 602 kfree_rcu(head, rcu); 609 603 }
+13 -3
net/sched/cls_flower.c
··· 218 218 return 0; 219 219 } 220 220 221 + static void __fl_destroy_filter(struct cls_fl_filter *f) 222 + { 223 + tcf_exts_destroy(&f->exts); 224 + tcf_exts_put_net(&f->exts); 225 + kfree(f); 226 + } 227 + 221 228 static void fl_destroy_filter_work(struct work_struct *work) 222 229 { 223 230 struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work); 224 231 225 232 rtnl_lock(); 226 - tcf_exts_destroy(&f->exts); 227 - kfree(f); 233 + __fl_destroy_filter(f); 228 234 rtnl_unlock(); 229 235 } 230 236 ··· 324 318 if (!tc_skip_hw(f->flags)) 325 319 fl_hw_destroy_filter(tp, f); 326 320 tcf_unbind_filter(tp, &f->res); 327 - call_rcu(&f->rcu, fl_destroy_filter); 321 + if (tcf_exts_get_net(&f->exts)) 322 + call_rcu(&f->rcu, fl_destroy_filter); 323 + else 324 + __fl_destroy_filter(f); 328 325 } 329 326 330 327 static void fl_destroy_sleepable(struct work_struct *work) ··· 997 988 idr_replace_ext(&head->handle_idr, fnew, fnew->handle); 998 989 list_replace_rcu(&fold->list, &fnew->list); 999 990 tcf_unbind_filter(tp, &fold->res); 991 + tcf_exts_get_net(&fold->exts); 1000 992 call_rcu(&fold->rcu, fl_destroy_filter); 1001 993 } else { 1002 994 list_add_tail_rcu(&fnew->list, &head->filters);
+14 -3
net/sched/cls_fw.c
··· 122 122 return 0; 123 123 } 124 124 125 + static void __fw_delete_filter(struct fw_filter *f) 126 + { 127 + tcf_exts_destroy(&f->exts); 128 + tcf_exts_put_net(&f->exts); 129 + kfree(f); 130 + } 131 + 125 132 static void fw_delete_filter_work(struct work_struct *work) 126 133 { 127 134 struct fw_filter *f = container_of(work, struct fw_filter, work); 128 135 129 136 rtnl_lock(); 130 - tcf_exts_destroy(&f->exts); 131 - kfree(f); 137 + __fw_delete_filter(f); 132 138 rtnl_unlock(); 133 139 } 134 140 ··· 160 154 RCU_INIT_POINTER(head->ht[h], 161 155 rtnl_dereference(f->next)); 162 156 tcf_unbind_filter(tp, &f->res); 163 - call_rcu(&f->rcu, fw_delete_filter); 157 + if (tcf_exts_get_net(&f->exts)) 158 + call_rcu(&f->rcu, fw_delete_filter); 159 + else 160 + __fw_delete_filter(f); 164 161 } 165 162 } 166 163 kfree_rcu(head, rcu); ··· 188 179 if (pfp == f) { 189 180 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 190 181 tcf_unbind_filter(tp, &f->res); 182 + tcf_exts_get_net(&f->exts); 191 183 call_rcu(&f->rcu, fw_delete_filter); 192 184 ret = 0; 193 185 break; ··· 309 299 RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); 310 300 rcu_assign_pointer(*fp, fnew); 311 301 tcf_unbind_filter(tp, &f->res); 302 + tcf_exts_get_net(&f->exts); 312 303 call_rcu(&f->rcu, fw_delete_filter); 313 304 314 305 *arg = fnew;
+12 -3
net/sched/cls_matchall.c
··· 44 44 return 0; 45 45 } 46 46 47 + static void __mall_destroy(struct cls_mall_head *head) 48 + { 49 + tcf_exts_destroy(&head->exts); 50 + tcf_exts_put_net(&head->exts); 51 + kfree(head); 52 + } 53 + 47 54 static void mall_destroy_work(struct work_struct *work) 48 55 { 49 56 struct cls_mall_head *head = container_of(work, struct cls_mall_head, 50 57 work); 51 58 rtnl_lock(); 52 - tcf_exts_destroy(&head->exts); 53 - kfree(head); 59 + __mall_destroy(head); 54 60 rtnl_unlock(); 55 61 } 56 62 ··· 115 109 if (tc_should_offload(dev, head->flags)) 116 110 mall_destroy_hw_filter(tp, head, (unsigned long) head); 117 111 118 - call_rcu(&head->rcu, mall_destroy_rcu); 112 + if (tcf_exts_get_net(&head->exts)) 113 + call_rcu(&head->rcu, mall_destroy_rcu); 114 + else 115 + __mall_destroy(head); 119 116 } 120 117 121 118 static void *mall_get(struct tcf_proto *tp, u32 handle)
+14 -3
net/sched/cls_route.c
··· 257 257 return 0; 258 258 } 259 259 260 + static void __route4_delete_filter(struct route4_filter *f) 261 + { 262 + tcf_exts_destroy(&f->exts); 263 + tcf_exts_put_net(&f->exts); 264 + kfree(f); 265 + } 266 + 260 267 static void route4_delete_filter_work(struct work_struct *work) 261 268 { 262 269 struct route4_filter *f = container_of(work, struct route4_filter, work); 263 270 264 271 rtnl_lock(); 265 - tcf_exts_destroy(&f->exts); 266 - kfree(f); 272 + __route4_delete_filter(f); 267 273 rtnl_unlock(); 268 274 } 269 275 ··· 303 297 next = rtnl_dereference(f->next); 304 298 RCU_INIT_POINTER(b->ht[h2], next); 305 299 tcf_unbind_filter(tp, &f->res); 306 - call_rcu(&f->rcu, route4_delete_filter); 300 + if (tcf_exts_get_net(&f->exts)) 301 + call_rcu(&f->rcu, route4_delete_filter); 302 + else 303 + __route4_delete_filter(f); 307 304 } 308 305 } 309 306 RCU_INIT_POINTER(head->table[h1], NULL); ··· 347 338 348 339 /* Delete it */ 349 340 tcf_unbind_filter(tp, &f->res); 341 + tcf_exts_get_net(&f->exts); 350 342 call_rcu(&f->rcu, route4_delete_filter); 351 343 352 344 /* Strip RTNL protected tree */ ··· 551 541 *arg = f; 552 542 if (fold) { 553 543 tcf_unbind_filter(tp, &fold->res); 544 + tcf_exts_get_net(&fold->exts); 554 545 call_rcu(&fold->rcu, route4_delete_filter); 555 546 } 556 547 return 0;
+12 -3
net/sched/cls_rsvp.h
··· 285 285 return -ENOBUFS; 286 286 } 287 287 288 + static void __rsvp_delete_filter(struct rsvp_filter *f) 289 + { 290 + tcf_exts_destroy(&f->exts); 291 + tcf_exts_put_net(&f->exts); 292 + kfree(f); 293 + } 294 + 288 295 static void rsvp_delete_filter_work(struct work_struct *work) 289 296 { 290 297 struct rsvp_filter *f = container_of(work, struct rsvp_filter, work); 291 298 292 299 rtnl_lock(); 293 - tcf_exts_destroy(&f->exts); 294 - kfree(f); 300 + __rsvp_delete_filter(f); 295 301 rtnl_unlock(); 296 302 } 297 303 ··· 316 310 * grace period, since converted-to-rcu actions are relying on that 317 311 * in cleanup() callback 318 312 */ 319 - call_rcu(&f->rcu, rsvp_delete_filter_rcu); 313 + if (tcf_exts_get_net(&f->exts)) 314 + call_rcu(&f->rcu, rsvp_delete_filter_rcu); 315 + else 316 + __rsvp_delete_filter(f); 320 317 } 321 318 322 319 static void rsvp_destroy(struct tcf_proto *tp)
+26 -7
net/sched/cls_tcindex.c
··· 139 139 return 0; 140 140 } 141 141 142 + static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) 143 + { 144 + tcf_exts_destroy(&r->exts); 145 + tcf_exts_put_net(&r->exts); 146 + } 147 + 142 148 static void tcindex_destroy_rexts_work(struct work_struct *work) 143 149 { 144 150 struct tcindex_filter_result *r; 145 151 146 152 r = container_of(work, struct tcindex_filter_result, work); 147 153 rtnl_lock(); 148 - tcf_exts_destroy(&r->exts); 154 + __tcindex_destroy_rexts(r); 149 155 rtnl_unlock(); 150 156 } 151 157 ··· 164 158 tcf_queue_work(&r->work); 165 159 } 166 160 161 + static void __tcindex_destroy_fexts(struct tcindex_filter *f) 162 + { 163 + tcf_exts_destroy(&f->result.exts); 164 + tcf_exts_put_net(&f->result.exts); 165 + kfree(f); 166 + } 167 + 167 168 static void tcindex_destroy_fexts_work(struct work_struct *work) 168 169 { 169 170 struct tcindex_filter *f = container_of(work, struct tcindex_filter, 170 171 work); 171 172 172 173 rtnl_lock(); 173 - tcf_exts_destroy(&f->result.exts); 174 - kfree(f); 174 + __tcindex_destroy_fexts(f); 175 175 rtnl_unlock(); 176 176 } 177 177 ··· 222 210 * grace period, since converted-to-rcu actions are relying on that 223 211 * in cleanup() callback 224 212 */ 225 - if (f) 226 - call_rcu(&f->rcu, tcindex_destroy_fexts); 227 - else 228 - call_rcu(&r->rcu, tcindex_destroy_rexts); 213 + if (f) { 214 + if (tcf_exts_get_net(&f->result.exts)) 215 + call_rcu(&f->rcu, tcindex_destroy_fexts); 216 + else 217 + __tcindex_destroy_fexts(f); 218 + } else { 219 + if (tcf_exts_get_net(&r->exts)) 220 + call_rcu(&r->rcu, tcindex_destroy_rexts); 221 + else 222 + __tcindex_destroy_rexts(r); 223 + } 229 224 230 225 *last = false; 231 226 return 0;
+7 -1
net/sched/cls_u32.c
··· 399 399 bool free_pf) 400 400 { 401 401 tcf_exts_destroy(&n->exts); 402 + tcf_exts_put_net(&n->exts); 402 403 if (n->ht_down) 403 404 n->ht_down->refcnt--; 404 405 #ifdef CONFIG_CLS_U32_PERF ··· 477 476 RCU_INIT_POINTER(*kp, key->next); 478 477 479 478 tcf_unbind_filter(tp, &key->res); 479 + tcf_exts_get_net(&key->exts); 480 480 call_rcu(&key->rcu, u32_delete_key_freepf_rcu); 481 481 return 0; 482 482 } ··· 590 588 rtnl_dereference(n->next)); 591 589 tcf_unbind_filter(tp, &n->res); 592 590 u32_remove_hw_knode(tp, n->handle); 593 - call_rcu(&n->rcu, u32_delete_key_freepf_rcu); 591 + if (tcf_exts_get_net(&n->exts)) 592 + call_rcu(&n->rcu, u32_delete_key_freepf_rcu); 593 + else 594 + u32_destroy_key(n->tp, n, true); 594 595 } 595 596 } 596 597 } ··· 954 949 955 950 u32_replace_knode(tp, tp_c, new); 956 951 tcf_unbind_filter(tp, &n->res); 952 + tcf_exts_get_net(&n->exts); 957 953 call_rcu(&n->rcu, u32_delete_key_rcu); 958 954 return 0; 959 955 }
+2 -2
net/xfrm/xfrm_input.c
··· 266 266 goto lock; 267 267 } 268 268 269 - daddr = (xfrm_address_t *)(skb_network_header(skb) + 270 - XFRM_SPI_SKB_CB(skb)->daddroff); 271 269 family = XFRM_SPI_SKB_CB(skb)->family; 272 270 273 271 /* if tunnel is present override skb->mark value with tunnel i_key */ ··· 292 294 goto drop; 293 295 } 294 296 297 + daddr = (xfrm_address_t *)(skb_network_header(skb) + 298 + XFRM_SPI_SKB_CB(skb)->daddroff); 295 299 do { 296 300 if (skb->sp->len == XFRM_MAX_DEPTH) { 297 301 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+39 -40
net/xfrm/xfrm_policy.c
··· 1361 1361 struct net *net = xp_net(policy); 1362 1362 int nx; 1363 1363 int i, error; 1364 - xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1365 - xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1366 1364 xfrm_address_t tmp; 1367 1365 1368 1366 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 1369 1367 struct xfrm_state *x; 1370 - xfrm_address_t *remote = daddr; 1371 - xfrm_address_t *local = saddr; 1368 + xfrm_address_t *local; 1369 + xfrm_address_t *remote; 1372 1370 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1373 1371 1374 - if (tmpl->mode == XFRM_MODE_TUNNEL || 1375 - tmpl->mode == XFRM_MODE_BEET) { 1376 - remote = &tmpl->id.daddr; 1377 - local = &tmpl->saddr; 1378 - if (xfrm_addr_any(local, tmpl->encap_family)) { 1379 - error = xfrm_get_saddr(net, fl->flowi_oif, 1380 - &tmp, remote, 1381 - tmpl->encap_family, 0); 1382 - if (error) 1383 - goto fail; 1384 - local = &tmp; 1385 - } 1372 + remote = &tmpl->id.daddr; 1373 + local = &tmpl->saddr; 1374 + if (xfrm_addr_any(local, tmpl->encap_family)) { 1375 + error = xfrm_get_saddr(net, fl->flowi_oif, 1376 + &tmp, remote, 1377 + tmpl->encap_family, 0); 1378 + if (error) 1379 + goto fail; 1380 + local = &tmp; 1386 1381 } 1387 1382 1388 1383 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1389 1384 1390 1385 if (x && x->km.state == XFRM_STATE_VALID) { 1391 1386 xfrm[nx++] = x; 1392 - daddr = remote; 1393 - saddr = local; 1394 1387 continue; 1395 1388 } 1396 1389 if (x) { ··· 1780 1787 put_online_cpus(); 1781 1788 } 1782 1789 1783 - static bool xfrm_pol_dead(struct xfrm_dst *xdst) 1790 + static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst, 1791 + struct xfrm_state * const xfrm[], 1792 + int num) 1784 1793 { 1785 - unsigned int num_pols = xdst->num_pols; 1786 - unsigned int pol_dead = 0, i; 1794 + const struct dst_entry *dst = &xdst->u.dst; 1795 + int i; 1787 1796 1788 - for (i = 0; i < num_pols; i++) 1789 - pol_dead |= xdst->pols[i]->walk.dead; 1797 + if (xdst->num_xfrms != num) 1798 + return false; 1790 1799 1791 - /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */ 1792 - if (pol_dead) 1793 - xdst->u.dst.obsolete = DST_OBSOLETE_DEAD; 1800 + for (i = 0; i < num; i++) { 1801 + if (!dst || dst->xfrm != xfrm[i]) 1802 + return false; 1803 + dst = dst->child; 1804 + } 1794 1805 1795 - return pol_dead; 1806 + return xfrm_bundle_ok(xdst); 1796 1807 } 1797 1808 1798 1809 static struct xfrm_dst * ··· 1810 1813 struct dst_entry *dst; 1811 1814 int err; 1812 1815 1813 - xdst = this_cpu_read(xfrm_last_dst); 1814 - if (xdst && 1815 - xdst->u.dst.dev == dst_orig->dev && 1816 - xdst->num_pols == num_pols && 1817 - !xfrm_pol_dead(xdst) && 1818 - memcmp(xdst->pols, pols, 1819 - sizeof(struct xfrm_policy *) * num_pols) == 0 && 1820 - xfrm_bundle_ok(xdst)) { 1821 - dst_hold(&xdst->u.dst); 1822 - return xdst; 1823 - } 1824 - 1825 - old = xdst; 1826 1816 /* Try to instantiate a bundle */ 1827 1817 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1828 1818 if (err <= 0) { ··· 1817 1833 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1818 1834 return ERR_PTR(err); 1819 1835 } 1836 + 1837 + xdst = this_cpu_read(xfrm_last_dst); 1838 + if (xdst && 1839 + xdst->u.dst.dev == dst_orig->dev && 1840 + xdst->num_pols == num_pols && 1841 + memcmp(xdst->pols, pols, 1842 + sizeof(struct xfrm_policy *) * num_pols) == 0 && 1843 + xfrm_xdst_can_reuse(xdst, xfrm, err)) { 1844 + dst_hold(&xdst->u.dst); 1845 + while (err > 0) 1846 + xfrm_state_put(xfrm[--err]); 1847 + return xdst; 1848 + } 1849 + 1850 + old = xdst; 1820 1851 1821 1852 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1822 1853 if (IS_ERR(dst)) {