Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'wireguard-fixes-for-5-12-rc1'

Jason Donenfeld says:

====================
wireguard fixes for 5.12-rc1

This series has a collection of fixes that have piled up for a little
while now, that I unfortunately didn't get a chance to send out earlier.

1) Removes unlikely() from IS_ERR(), since it's already implied.

2) Remove a bogus sparse annotation that hasn't been needed for years.

3) Addition test in the test suite for stressing parallel ndo_start_xmit.

4) Slight struct reordering in preparation for subsequent fix.

5) If skb->protocol is bogus, we no longer attempt to send icmp messages.

6) Massive memory usage fix, hit by larger deployments.

7) Fix typo in kconfig dependency logic.

(1) and (2) are tiny cleanups, and (3) is just a test, so if you're
trying to reduce churn, you could not backport these. But (4), (5), (6),
and (7) fix problems and should be applied to stable. IMO, it's probably
easiest to just apply them all to stable.
====================

Link: https://lore.kernel.org/r/20210222162549.3252778-1-Jason@zx2c4.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+170 -105
+1 -1
drivers/net/Kconfig
··· 88 88 select CRYPTO_CURVE25519_X86 if X86 && 64BIT 89 89 select ARM_CRYPTO if ARM 90 90 select ARM64_CRYPTO if ARM64 91 - select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON 91 + select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON) 92 92 select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON 93 93 select CRYPTO_POLY1305_ARM if ARM 94 94 select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+11 -10
drivers/net/wireguard/device.c
··· 138 138 else if (skb->protocol == htons(ETH_P_IPV6)) 139 139 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", 140 140 dev->name, &ipv6_hdr(skb)->daddr); 141 - goto err; 141 + goto err_icmp; 142 142 } 143 143 144 144 family = READ_ONCE(peer->endpoint.addr.sa_family); ··· 157 157 } else { 158 158 struct sk_buff *segs = skb_gso_segment(skb, 0); 159 159 160 - if (unlikely(IS_ERR(segs))) { 160 + if (IS_ERR(segs)) { 161 161 ret = PTR_ERR(segs); 162 162 goto err_peer; 163 163 } ··· 201 201 202 202 err_peer: 203 203 wg_peer_put(peer); 204 - err: 205 - ++dev->stats.tx_errors; 204 + err_icmp: 206 205 if (skb->protocol == htons(ETH_P_IP)) 207 206 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 208 207 else if (skb->protocol == htons(ETH_P_IPV6)) 209 208 icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 209 + err: 210 + ++dev->stats.tx_errors; 210 211 kfree_skb(skb); 211 212 return ret; 212 213 } ··· 235 234 destroy_workqueue(wg->handshake_receive_wq); 236 235 destroy_workqueue(wg->handshake_send_wq); 237 236 destroy_workqueue(wg->packet_crypt_wq); 238 - wg_packet_queue_free(&wg->decrypt_queue, true); 239 - wg_packet_queue_free(&wg->encrypt_queue, true); 237 + wg_packet_queue_free(&wg->decrypt_queue); 238 + wg_packet_queue_free(&wg->encrypt_queue); 240 239 rcu_barrier(); /* Wait for all the peers to be actually freed. */ 241 240 wg_ratelimiter_uninit(); 242 241 memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); ··· 338 337 goto err_destroy_handshake_send; 339 338 340 339 ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, 341 - true, MAX_QUEUED_PACKETS); 340 + MAX_QUEUED_PACKETS); 342 341 if (ret < 0) 343 342 goto err_destroy_packet_crypt; 344 343 345 344 ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, 346 - true, MAX_QUEUED_PACKETS); 345 + MAX_QUEUED_PACKETS); 347 346 if (ret < 0) 348 347 goto err_free_encrypt_queue; 349 348 ··· 368 367 err_uninit_ratelimiter: 369 368 wg_ratelimiter_uninit(); 370 369 err_free_decrypt_queue: 371 - wg_packet_queue_free(&wg->decrypt_queue, true); 370 + wg_packet_queue_free(&wg->decrypt_queue); 372 371 err_free_encrypt_queue: 373 - wg_packet_queue_free(&wg->encrypt_queue, true); 372 + wg_packet_queue_free(&wg->encrypt_queue); 374 373 err_destroy_packet_crypt: 375 374 destroy_workqueue(wg->packet_crypt_wq); 376 375 err_destroy_handshake_send:
+8 -7
drivers/net/wireguard/device.h
··· 27 27 28 28 struct crypt_queue { 29 29 struct ptr_ring ring; 30 - union { 31 - struct { 32 - struct multicore_worker __percpu *worker; 33 - int last_cpu; 34 - }; 35 - struct work_struct work; 36 - }; 30 + struct multicore_worker __percpu *worker; 31 + int last_cpu; 32 + }; 33 + 34 + struct prev_queue { 35 + struct sk_buff *head, *tail, *peeked; 36 + struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. 37 + atomic_t count; 37 38 }; 38 39 39 40 struct wg_device {
+9 -19
drivers/net/wireguard/peer.c
··· 32 32 peer = kzalloc(sizeof(*peer), GFP_KERNEL); 33 33 if (unlikely(!peer)) 34 34 return ERR_PTR(ret); 35 - peer->device = wg; 35 + if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) 36 + goto err; 36 37 38 + peer->device = wg; 37 39 wg_noise_handshake_init(&peer->handshake, &wg->static_identity, 38 40 public_key, preshared_key, peer); 39 - if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) 40 - goto err_1; 41 - if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, 42 - MAX_QUEUED_PACKETS)) 43 - goto err_2; 44 - if (wg_packet_queue_init(&peer->rx_queue, NULL, false, 45 - MAX_QUEUED_PACKETS)) 46 - goto err_3; 47 - 48 41 peer->internal_id = atomic64_inc_return(&peer_counter); 49 42 peer->serial_work_cpu = nr_cpumask_bits; 50 43 wg_cookie_init(&peer->latest_cookie); 51 44 wg_timers_init(peer); 52 45 wg_cookie_checker_precompute_peer_keys(peer); 53 46 spin_lock_init(&peer->keypairs.keypair_update_lock); 54 - INIT_WORK(&peer->transmit_handshake_work, 55 - wg_packet_handshake_send_worker); 47 + INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); 48 + INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); 49 + wg_prev_queue_init(&peer->tx_queue); 50 + wg_prev_queue_init(&peer->rx_queue); 56 51 rwlock_init(&peer->endpoint_lock); 57 52 kref_init(&peer->refcount); 58 53 skb_queue_head_init(&peer->staged_packet_queue); ··· 63 68 pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); 64 69 return peer; 65 70 66 - err_3: 67 - wg_packet_queue_free(&peer->tx_queue, false); 68 - err_2: 69 - dst_cache_destroy(&peer->endpoint_cache); 70 - err_1: 71 + err: 71 72 kfree(peer); 72 73 return ERR_PTR(ret); 73 74 } ··· 188 197 struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); 189 198 190 199 dst_cache_destroy(&peer->endpoint_cache); 191 - wg_packet_queue_free(&peer->rx_queue, false); 192 - wg_packet_queue_free(&peer->tx_queue, false); 200 + WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); 193 201 194 202 /* The final zeroing takes care of clearing any remaining handshake key 195 203 * material and other potentially sensitive information.
+4 -4
drivers/net/wireguard/peer.h
··· 36 36 37 37 struct wg_peer { 38 38 struct wg_device *device; 39 - struct crypt_queue tx_queue, rx_queue; 39 + struct prev_queue tx_queue, rx_queue; 40 40 struct sk_buff_head staged_packet_queue; 41 41 int serial_work_cpu; 42 + bool is_dead; 42 43 struct noise_keypairs keypairs; 43 44 struct endpoint endpoint; 44 45 struct dst_cache endpoint_cache; 45 46 rwlock_t endpoint_lock; 46 47 struct noise_handshake handshake; 47 48 atomic64_t last_sent_handshake; 48 - struct work_struct transmit_handshake_work, clear_peer_work; 49 + struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; 49 50 struct cookie latest_cookie; 50 51 struct hlist_node pubkey_hash; 51 52 u64 rx_bytes, tx_bytes; ··· 62 61 struct rcu_head rcu; 63 62 struct list_head peer_list; 64 63 struct list_head allowedips_list; 65 - u64 internal_id; 66 64 struct napi_struct napi; 67 - bool is_dead; 65 + u64 internal_id; 68 66 }; 69 67 70 68 struct wg_peer *wg_peer_create(struct wg_device *wg,
+69 -17
drivers/net/wireguard/queueing.c
··· 9 9 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) 10 10 { 11 11 int cpu; 12 - struct multicore_worker __percpu *worker = 13 - alloc_percpu(struct multicore_worker); 12 + struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); 14 13 15 14 if (!worker) 16 15 return NULL; ··· 22 23 } 23 24 24 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, 25 - bool multicore, unsigned int len) 26 + unsigned int len) 26 27 { 27 28 int ret; 28 29 ··· 30 31 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); 31 32 if (ret) 32 33 return ret; 33 - if (function) { 34 - if (multicore) { 35 - queue->worker = wg_packet_percpu_multicore_worker_alloc( 36 - function, queue); 37 - if (!queue->worker) { 38 - ptr_ring_cleanup(&queue->ring, NULL); 39 - return -ENOMEM; 40 - } 41 - } else { 42 - INIT_WORK(&queue->work, function); 43 - } 34 + queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); 35 + if (!queue->worker) { 36 + ptr_ring_cleanup(&queue->ring, NULL); 37 + return -ENOMEM; 44 38 } 45 39 return 0; 46 40 } 47 41 48 - void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) 42 + void wg_packet_queue_free(struct crypt_queue *queue) 49 43 { 50 - if (multicore) 51 - free_percpu(queue->worker); 44 + free_percpu(queue->worker); 52 45 WARN_ON(!__ptr_ring_empty(&queue->ring)); 53 46 ptr_ring_cleanup(&queue->ring, NULL); 54 47 } 48 + 49 + #define NEXT(skb) ((skb)->prev) 50 + #define STUB(queue) ((struct sk_buff *)&queue->empty) 51 + 52 + void wg_prev_queue_init(struct prev_queue *queue) 53 + { 54 + NEXT(STUB(queue)) = NULL; 55 + queue->head = queue->tail = STUB(queue); 56 + queue->peeked = NULL; 57 + atomic_set(&queue->count, 0); 58 + BUILD_BUG_ON( 59 + offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - 60 + offsetof(struct prev_queue, empty) || 61 + offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - 62 + offsetof(struct prev_queue, empty)); 63 + } 64 + 65 + static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) 66 + { 67 + WRITE_ONCE(NEXT(skb), NULL); 68 + WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); 69 + } 70 + 71 + bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) 72 + { 73 + if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) 74 + return false; 75 + __wg_prev_queue_enqueue(queue, skb); 76 + return true; 77 + } 78 + 79 + struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) 80 + { 81 + struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); 82 + 83 + if (tail == STUB(queue)) { 84 + if (!next) 85 + return NULL; 86 + queue->tail = next; 87 + tail = next; 88 + next = smp_load_acquire(&NEXT(next)); 89 + } 90 + if (next) { 91 + queue->tail = next; 92 + atomic_dec(&queue->count); 93 + return tail; 94 + } 95 + if (tail != READ_ONCE(queue->head)) 96 + return NULL; 97 + __wg_prev_queue_enqueue(queue, STUB(queue)); 98 + next = smp_load_acquire(&NEXT(tail)); 99 + if (next) { 100 + queue->tail = next; 101 + atomic_dec(&queue->count); 102 + return tail; 103 + } 104 + return NULL; 105 + } 106 + 107 + #undef NEXT 108 + #undef STUB
+33 -12
drivers/net/wireguard/queueing.h
··· 17 17 struct wg_peer; 18 18 struct multicore_worker; 19 19 struct crypt_queue; 20 + struct prev_queue; 20 21 struct sk_buff; 21 22 22 23 /* queueing.c APIs: */ 23 24 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, 24 - bool multicore, unsigned int len); 25 - void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); 25 + unsigned int len); 26 + void wg_packet_queue_free(struct crypt_queue *queue); 26 27 struct multicore_worker __percpu * 27 28 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); 28 29 ··· 136 135 return cpu; 137 136 } 138 137 138 + void wg_prev_queue_init(struct prev_queue *queue); 139 + 140 + /* Multi producer */ 141 + bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); 142 + 143 + /* Single consumer */ 144 + struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); 145 + 146 + /* Single consumer */ 147 + static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) 148 + { 149 + if (queue->peeked) 150 + return queue->peeked; 151 + queue->peeked = wg_prev_queue_dequeue(queue); 152 + return queue->peeked; 153 + } 154 + 155 + /* Single consumer */ 156 + static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) 157 + { 158 + queue->peeked = NULL; 159 + } 160 + 139 161 static inline int wg_queue_enqueue_per_device_and_peer( 140 - struct crypt_queue *device_queue, struct crypt_queue *peer_queue, 162 + struct crypt_queue *device_queue, struct prev_queue *peer_queue, 141 163 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) 142 164 { 143 165 int cpu; ··· 169 145 /* We first queue this up for the peer ingestion, but the consumer 170 146 * will wait for the state to change to CRYPTED or DEAD before. 171 147 */ 172 - if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) 148 + if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) 173 149 return -ENOSPC; 150 + 174 151 /* Then we queue it up in the device queue, which consumes the 175 152 * packet as soon as it can. 176 153 */ ··· 182 157 return 0; 183 158 } 184 159 185 - static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, 186 - struct sk_buff *skb, 187 - enum packet_state state) 160 + static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) 188 161 { 189 162 /* We take a reference, because as soon as we call atomic_set, the 190 163 * peer can be freed from below us. ··· 190 167 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); 191 168 192 169 atomic_set_release(&PACKET_CB(skb)->state, state); 193 - queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, 194 - peer->internal_id), 195 - peer->device->packet_crypt_wq, &queue->work); 170 + queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), 171 + peer->device->packet_crypt_wq, &peer->transmit_packet_work); 196 172 wg_peer_put(peer); 197 173 } 198 174 199 - static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, 200 - enum packet_state state) 175 + static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) 201 176 { 202 177 /* We take a reference, because as soon as we call atomic_set, the 203 178 * peer can be freed from below us.
+6 -10
drivers/net/wireguard/receive.c
··· 444 444 int wg_packet_rx_poll(struct napi_struct *napi, int budget) 445 445 { 446 446 struct wg_peer *peer = container_of(napi, struct wg_peer, napi); 447 - struct crypt_queue *queue = &peer->rx_queue; 448 447 struct noise_keypair *keypair; 449 448 struct endpoint endpoint; 450 449 enum packet_state state; ··· 454 455 if (unlikely(budget <= 0)) 455 456 return 0; 456 457 457 - while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && 458 + while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && 458 459 (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != 459 460 PACKET_STATE_UNCRYPTED) { 460 - __ptr_ring_discard_one(&queue->ring); 461 - peer = PACKET_PEER(skb); 461 + wg_prev_queue_drop_peeked(&peer->rx_queue); 462 462 keypair = PACKET_CB(skb)->keypair; 463 463 free = true; 464 464 ··· 506 508 enum packet_state state = 507 509 likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? 508 510 PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; 509 - wg_queue_enqueue_per_peer_napi(skb, state); 511 + wg_queue_enqueue_per_peer_rx(skb, state); 510 512 if (need_resched()) 511 513 cond_resched(); 512 514 } ··· 529 531 if (unlikely(READ_ONCE(peer->is_dead))) 530 532 goto err; 531 533 532 - ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, 533 - &peer->rx_queue, skb, 534 - wg->packet_crypt_wq, 535 - &wg->decrypt_queue.last_cpu); 534 + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, 535 + wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); 536 536 if (unlikely(ret == -EPIPE)) 537 - wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); 537 + wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); 538 538 if (likely(!ret || ret == -EPIPE)) { 539 539 rcu_read_unlock_bh(); 540 540 return;
+11 -20
drivers/net/wireguard/send.c
··· 239 239 wg_packet_send_staged_packets(peer); 240 240 } 241 241 242 - static void wg_packet_create_data_done(struct sk_buff *first, 243 - struct wg_peer *peer) 242 + static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) 244 243 { 245 244 struct sk_buff *skb, *next; 246 245 bool is_keepalive, data_sent = false; ··· 261 262 262 263 void wg_packet_tx_worker(struct work_struct *work) 263 264 { 264 - struct crypt_queue *queue = container_of(work, struct crypt_queue, 265 - work); 265 + struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); 266 266 struct noise_keypair *keypair; 267 267 enum packet_state state; 268 268 struct sk_buff *first; 269 - struct wg_peer *peer; 270 269 271 - while ((first = __ptr_ring_peek(&queue->ring)) != NULL && 270 + while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && 272 271 (state = atomic_read_acquire(&PACKET_CB(first)->state)) != 273 272 PACKET_STATE_UNCRYPTED) { 274 - __ptr_ring_discard_one(&queue->ring); 275 - peer = PACKET_PEER(first); 273 + wg_prev_queue_drop_peeked(&peer->tx_queue); 276 274 keypair = PACKET_CB(first)->keypair; 277 275 278 276 if (likely(state == PACKET_STATE_CRYPTED)) 279 - wg_packet_create_data_done(first, peer); 277 + wg_packet_create_data_done(peer, first); 280 278 else 281 279 kfree_skb_list(first); 282 280 ··· 302 306 break; 303 307 } 304 308 } 305 - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, 306 - state); 309 + wg_queue_enqueue_per_peer_tx(first, state); 307 310 if (need_resched()) 308 311 cond_resched(); 309 312 } 310 313 } 311 314 312 - static void wg_packet_create_data(struct sk_buff *first) 315 + static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) 313 316 { 314 - struct wg_peer *peer = PACKET_PEER(first); 315 317 struct wg_device *wg = peer->device; 316 318 int ret = -EINVAL; 317 319 ··· 317 323 if (unlikely(READ_ONCE(peer->is_dead))) 318 324 goto err; 319 325 320 - ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, 321 - &peer->tx_queue, first, 322 - wg->packet_crypt_wq, 323 - &wg->encrypt_queue.last_cpu); 326 + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, 327 + wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); 324 328 if (unlikely(ret == -EPIPE)) 325 - wg_queue_enqueue_per_peer(&peer->tx_queue, first, 326 - PACKET_STATE_DEAD); 329 + wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); 327 330 err: 328 331 rcu_read_unlock_bh(); 329 332 if (likely(!ret || ret == -EPIPE)) ··· 384 393 packets.prev->next = NULL; 385 394 wg_peer_get(keypair->entry.peer); 386 395 PACKET_CB(packets.next)->keypair = keypair; 387 - wg_packet_create_data(packets.next); 396 + wg_packet_create_data(peer, packets.next); 388 397 return; 389 398 390 399 out_invalid:
+4 -4
drivers/net/wireguard/socket.c
··· 53 53 if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, 54 54 fl.saddr, RT_SCOPE_HOST))) { 55 55 endpoint->src4.s_addr = 0; 56 - *(__force __be32 *)&endpoint->src_if4 = 0; 56 + endpoint->src_if4 = 0; 57 57 fl.saddr = 0; 58 58 if (cache) 59 59 dst_cache_reset(cache); ··· 63 63 PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && 64 64 rt->dst.dev->ifindex != endpoint->src_if4)))) { 65 65 endpoint->src4.s_addr = 0; 66 - *(__force __be32 *)&endpoint->src_if4 = 0; 66 + endpoint->src_if4 = 0; 67 67 fl.saddr = 0; 68 68 if (cache) 69 69 dst_cache_reset(cache); ··· 71 71 ip_rt_put(rt); 72 72 rt = ip_route_output_flow(sock_net(sock), &fl, sock); 73 73 } 74 - if (unlikely(IS_ERR(rt))) { 74 + if (IS_ERR(rt)) { 75 75 ret = PTR_ERR(rt); 76 76 net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", 77 77 wg->dev->name, &endpoint->addr, ret); ··· 138 138 } 139 139 dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, 140 140 NULL); 141 - if (unlikely(IS_ERR(dst))) { 141 + if (IS_ERR(dst)) { 142 142 ret = PTR_ERR(dst); 143 143 net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", 144 144 wg->dev->name, &endpoint->addr, ret);
+14 -1
tools/testing/selftests/wireguard/netns.sh
··· 39 39 ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } 40 40 ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } 41 41 sleep() { read -t "$1" -N 1 || true; } 42 - waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } 42 + waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } 43 43 waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } 44 44 waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } 45 45 ··· 141 141 n2 iperf3 -s -1 -B fd00::2 & 142 142 waitiperf $netns2 $! 143 143 n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 144 + 145 + # TCP over IPv4, in parallel 146 + for max in 4 5 50; do 147 + local pids=( ) 148 + for ((i=0; i < max; ++i)) do 149 + n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & 150 + pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) 151 + done 152 + for ((i=0; i < max; ++i)) do 153 + n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & 154 + done 155 + wait "${pids[@]}" 156 + done 144 157 } 145 158 146 159 [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}"