Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net

+92 -71
+1 -1
MAINTAINERS
··· 5930 5930 5931 5931 GDB KERNEL DEBUGGING HELPER SCRIPTS 5932 5932 M: Jan Kiszka <jan.kiszka@siemens.com> 5933 - M: Kieran Bingham <kieran@bingham.xyz> 5933 + M: Kieran Bingham <kbingham@kernel.org> 5934 5934 S: Supported 5935 5935 F: scripts/gdb/ 5936 5936
-1
arch/parisc/Kconfig
··· 11 11 select ARCH_HAS_ELF_RANDOMIZE 12 12 select ARCH_HAS_STRICT_KERNEL_RWX 13 13 select ARCH_HAS_UBSAN_SANITIZE_ALL 14 - select ARCH_WANTS_UBSAN_NO_NULL 15 14 select ARCH_SUPPORTS_MEMORY_FAILURE 16 15 select RTC_CLASS 17 16 select RTC_DRV_GENERIC
-1
arch/s390/Kconfig
··· 106 106 select ARCH_USE_BUILTIN_BSWAP 107 107 select ARCH_USE_CMPXCHG_LOCKREF 108 108 select ARCH_WANTS_DYNAMIC_TASK_STRUCT 109 - select ARCH_WANTS_UBSAN_NO_NULL 110 109 select ARCH_WANT_IPC_PARSE_VERSION 111 110 select BUILDTIME_EXTABLE_SORT 112 111 select CLONE_BACKWARDS2
+14 -1
drivers/block/zram/zram_drv.c
··· 298 298 zram->backing_dev = NULL; 299 299 zram->old_block_size = 0; 300 300 zram->bdev = NULL; 301 - 301 + zram->disk->queue->backing_dev_info->capabilities |= 302 + BDI_CAP_SYNCHRONOUS_IO; 302 303 kvfree(zram->bitmap); 303 304 zram->bitmap = NULL; 304 305 } ··· 401 400 zram->backing_dev = backing_dev; 402 401 zram->bitmap = bitmap; 403 402 zram->nr_pages = nr_pages; 403 + /* 404 + * With writeback feature, zram does asynchronous IO so it's no longer 405 + * synchronous device so let's remove synchronous io flag. Othewise, 406 + * upper layer(e.g., swap) could wait IO completion rather than 407 + * (submit and return), which will cause system sluggish. 408 + * Furthermore, when the IO function returns(e.g., swap_readpage), 409 + * upper layer expects IO was done so it could deallocate the page 410 + * freely but in fact, IO is going on so finally could cause 411 + * use-after-free when the IO is really done. 412 + */ 413 + zram->disk->queue->backing_dev_info->capabilities &= 414 + ~BDI_CAP_SYNCHRONOUS_IO; 404 415 up_write(&zram->init_lock); 405 416 406 417 pr_info("setup backing device %s\n", file_name);
+28 -13
drivers/i2c/busses/i2c-xlp9xx.c
··· 191 191 if (priv->len_recv) { 192 192 /* read length byte */ 193 193 rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); 194 + 195 + /* 196 + * We expect at least 2 interrupts for I2C_M_RECV_LEN 197 + * transactions. The length is updated during the first 198 + * interrupt, and the buffer contents are only copied 199 + * during subsequent interrupts. If in case the interrupts 200 + * get merged we would complete the transaction without 201 + * copying out the bytes from RX fifo. To avoid this now we 202 + * drain the fifo as and when data is available. 203 + * We drained the rlen byte already, decrement total length 204 + * by one. 205 + */ 206 + 207 + len--; 194 208 if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) { 195 209 rlen = 0; /*abort transfer */ 196 210 priv->msg_buf_remaining = 0; 197 211 priv->msg_len = 0; 198 - } else { 199 - *buf++ = rlen; 200 - if (priv->client_pec) 201 - ++rlen; /* account for error check byte */ 202 - /* update remaining bytes and message length */ 203 - priv->msg_buf_remaining = rlen; 204 - priv->msg_len = rlen + 1; 212 + xlp9xx_i2c_update_rlen(priv); 213 + return; 205 214 } 215 + 216 + *buf++ = rlen; 217 + if (priv->client_pec) 218 + ++rlen; /* account for error check byte */ 219 + /* update remaining bytes and message length */ 220 + priv->msg_buf_remaining = rlen; 221 + priv->msg_len = rlen + 1; 206 222 xlp9xx_i2c_update_rlen(priv); 207 223 priv->len_recv = false; 208 - } else { 209 - len = min(priv->msg_buf_remaining, len); 210 - for (i = 0; i < len; i++, buf++) 211 - *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); 212 - 213 - priv->msg_buf_remaining -= len; 214 224 } 215 225 226 + len = min(priv->msg_buf_remaining, len); 227 + for (i = 0; i < len; i++, buf++) 228 + *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); 229 + 230 + priv->msg_buf_remaining -= len; 216 231 priv->msg_buf = buf; 217 232 218 233 if (priv->msg_buf_remaining)
+11 -14
drivers/net/ethernet/ti/cpsw.c
··· 2358 2358 int i; 2359 2359 2360 2360 for (i = 0; i < cpsw->data.slaves; i++) { 2361 - if (vid == cpsw->slaves[i].port_vlan) 2362 - return -EINVAL; 2361 + if (vid == cpsw->slaves[i].port_vlan) { 2362 + ret = -EINVAL; 2363 + goto err; 2364 + } 2363 2365 } 2364 2366 } 2365 2367 2366 2368 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 2367 2369 ret = cpsw_add_vlan_ale_entry(priv, vid); 2368 - 2370 + err: 2369 2371 pm_runtime_put(cpsw->dev); 2370 2372 return ret; 2371 2373 } ··· 2393 2391 2394 2392 for (i = 0; i < cpsw->data.slaves; i++) { 2395 2393 if (vid == cpsw->slaves[i].port_vlan) 2396 - return -EINVAL; 2394 + goto err; 2397 2395 } 2398 2396 } 2399 2397 2400 2398 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 2401 2399 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); 2402 - if (ret != 0) 2403 - return ret; 2404 - 2405 - ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 2406 - HOST_PORT_NUM, ALE_VLAN, vid); 2407 - if (ret != 0) 2408 - return ret; 2409 - 2410 - ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 2411 - 0, ALE_VLAN, vid); 2400 + ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 2401 + HOST_PORT_NUM, ALE_VLAN, vid); 2402 + ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 2403 + 0, ALE_VLAN, vid); 2404 + err: 2412 2405 pm_runtime_put(cpsw->dev); 2413 2406 return ret; 2414 2407 }
+1 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 394 394 395 395 idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); 396 396 if (idx < 0) 397 - return -EINVAL; 397 + return -ENOENT; 398 398 399 399 cpsw_ale_read(ale, idx, ale_entry); 400 400
+4 -4
drivers/net/xen-netfront.c
··· 895 895 struct sk_buff *skb, 896 896 struct sk_buff_head *list) 897 897 { 898 - struct skb_shared_info *shinfo = skb_shinfo(skb); 899 898 RING_IDX cons = queue->rx.rsp_cons; 900 899 struct sk_buff *nskb; 901 900 ··· 903 904 RING_GET_RESPONSE(&queue->rx, ++cons); 904 905 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 905 906 906 - if (shinfo->nr_frags == MAX_SKB_FRAGS) { 907 + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 907 908 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 908 909 909 910 BUG_ON(pull_to <= skb_headlen(skb)); 910 911 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 911 912 } 912 - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 913 + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 913 914 914 - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 915 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 916 + skb_frag_page(nfrag), 915 917 rx->offset, rx->status, PAGE_SIZE); 916 918 917 919 skb_shinfo(nskb)->nr_frags = 0;
+9 -6
kernel/bpf/cpumap.c
··· 69 69 }; 70 70 71 71 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 72 - struct xdp_bulk_queue *bq); 72 + struct xdp_bulk_queue *bq, bool in_napi_ctx); 73 73 74 74 static u64 cpu_map_bitmap_size(const union bpf_attr *attr) 75 75 { ··· 375 375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); 376 376 377 377 /* No concurrent bq_enqueue can run at this point */ 378 - bq_flush_to_queue(rcpu, bq); 378 + bq_flush_to_queue(rcpu, bq, false); 379 379 } 380 380 free_percpu(rcpu->bulkq); 381 381 /* Cannot kthread_stop() here, last put free rcpu resources */ ··· 558 558 }; 559 559 560 560 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 561 - struct xdp_bulk_queue *bq) 561 + struct xdp_bulk_queue *bq, bool in_napi_ctx) 562 562 { 563 563 unsigned int processed = 0, drops = 0; 564 564 const int to_cpu = rcpu->cpu; ··· 578 578 err = __ptr_ring_produce(q, xdpf); 579 579 if (err) { 580 580 drops++; 581 - xdp_return_frame_rx_napi(xdpf); 581 + if (likely(in_napi_ctx)) 582 + xdp_return_frame_rx_napi(xdpf); 583 + else 584 + xdp_return_frame(xdpf); 582 585 } 583 586 processed++; 584 587 } ··· 601 598 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 602 599 603 600 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 604 - bq_flush_to_queue(rcpu, bq); 601 + bq_flush_to_queue(rcpu, bq, true); 605 602 606 603 /* Notice, xdp_buff/page MUST be queued here, long enough for 607 604 * driver to code invoking us to finished, due to driver ··· 664 661 665 662 /* Flush all frames in bulkq to real queue */ 666 663 bq = this_cpu_ptr(rcpu->bulkq); 667 - bq_flush_to_queue(rcpu, bq); 664 + bq_flush_to_queue(rcpu, bq, true); 668 665 669 666 /* If already running, costs spin_lock_irqsave + smb_mb */ 670 667 wake_up_process(rcpu->kthread);
+9 -5
kernel/bpf/devmap.c
··· 217 217 } 218 218 219 219 static int bq_xmit_all(struct bpf_dtab_netdev *obj, 220 - struct xdp_bulk_queue *bq, u32 flags) 220 + struct xdp_bulk_queue *bq, u32 flags, 221 + bool in_napi_ctx) 221 222 { 222 223 struct net_device *dev = obj->dev; 223 224 int sent = 0, drops = 0, err = 0; ··· 255 254 struct xdp_frame *xdpf = bq->q[i]; 256 255 257 256 /* RX path under NAPI protection, can return frames faster */ 258 - xdp_return_frame_rx_napi(xdpf); 257 + if (likely(in_napi_ctx)) 258 + xdp_return_frame_rx_napi(xdpf); 259 + else 260 + xdp_return_frame(xdpf); 259 261 drops++; 260 262 } 261 263 goto out; ··· 290 286 __clear_bit(bit, bitmap); 291 287 292 288 bq = this_cpu_ptr(dev->bulkq); 293 - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 289 + bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); 294 290 } 295 291 } 296 292 ··· 320 316 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 321 317 322 318 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 323 - bq_xmit_all(obj, bq, 0); 319 + bq_xmit_all(obj, bq, 0, true); 324 320 325 321 /* Ingress dev_rx will be the same for all xdp_frame's in 326 322 * bulk_queue, because bq stored per-CPU and must be flushed ··· 389 385 __clear_bit(dev->bit, bitmap); 390 386 391 387 bq = per_cpu_ptr(dev->bulkq, cpu); 392 - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 388 + bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); 393 389 } 394 390 } 395 391 }
+6 -3
kernel/bpf/sockmap.c
··· 1045 1045 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1046 1046 1047 1047 while (msg_data_left(msg)) { 1048 - struct sk_msg_buff *m; 1048 + struct sk_msg_buff *m = NULL; 1049 1049 bool enospc = false; 1050 1050 int copy; 1051 1051 1052 1052 if (sk->sk_err) { 1053 - err = sk->sk_err; 1053 + err = -sk->sk_err; 1054 1054 goto out_err; 1055 1055 } 1056 1056 ··· 1113 1113 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1114 1114 wait_for_memory: 1115 1115 err = sk_stream_wait_memory(sk, &timeo); 1116 - if (err) 1116 + if (err) { 1117 + if (m && m != psock->cork) 1118 + free_start_sg(sk, m); 1117 1119 goto out_err; 1120 + } 1118 1121 } 1119 1122 out_err: 1120 1123 if (err < 0)
-11
lib/Kconfig.ubsan
··· 1 1 config ARCH_HAS_UBSAN_SANITIZE_ALL 2 2 bool 3 3 4 - config ARCH_WANTS_UBSAN_NO_NULL 5 - def_bool n 6 - 7 4 config UBSAN 8 5 bool "Undefined behaviour sanity checker" 9 6 help ··· 35 38 This option enables detection of unaligned memory accesses. 36 39 Enabling this option on architectures that support unaligned 37 40 accesses may produce a lot of false positives. 38 - 39 - config UBSAN_NULL 40 - bool "Enable checking of null pointers" 41 - depends on UBSAN 42 - default y if !ARCH_WANTS_UBSAN_NO_NULL 43 - help 44 - This option enables detection of memory accesses via a 45 - null pointer. 46 41 47 42 config TEST_UBSAN 48 43 tristate "Module for testing for undefined behavior detection"
+3
mm/memory.c
··· 4395 4395 return -EINVAL; 4396 4396 4397 4397 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 4398 + if (!maddr) 4399 + return -ENOMEM; 4400 + 4398 4401 if (write) 4399 4402 memcpy_toio(maddr + offset, buf, len); 4400 4403 else
+1 -1
samples/bpf/xdp_redirect_cpu_kern.c
··· 14 14 #include <uapi/linux/bpf.h> 15 15 #include "bpf_helpers.h" 16 16 17 - #define MAX_CPUS 12 /* WARNING - sync with _user.c */ 17 + #define MAX_CPUS 64 /* WARNING - sync with _user.c */ 18 18 19 19 /* Special map type that can XDP_REDIRECT frames to another CPU */ 20 20 struct bpf_map_def SEC("maps") cpu_map = {
+2 -2
samples/bpf/xdp_redirect_cpu_user.c
··· 19 19 #include <arpa/inet.h> 20 20 #include <linux/if_link.h> 21 21 22 - #define MAX_CPUS 12 /* WARNING - sync with _kern.c */ 22 + #define MAX_CPUS 64 /* WARNING - sync with _kern.c */ 23 23 24 24 /* How many xdp_progs are defined in _kern.c */ 25 25 #define MAX_PROG 5 ··· 527 527 * procedure. 528 528 */ 529 529 create_cpu_entry(1, 1024, 0, false); 530 - create_cpu_entry(1, 128, 0, false); 530 + create_cpu_entry(1, 8, 0, false); 531 531 create_cpu_entry(1, 16000, 0, false); 532 532 } 533 533
-4
scripts/Makefile.ubsan
··· 14 14 CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment) 15 15 endif 16 16 17 - ifdef CONFIG_UBSAN_NULL 18 - CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) 19 - endif 20 - 21 17 # -fsanitize=* options makes GCC less smart than usual and 22 18 # increase number of 'maybe-uninitialized false-positives 23 19 CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
+1 -1
tools/lib/bpf/btf.c
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + // SPDX-License-Identifier: LGPL-2.1 2 2 /* Copyright (c) 2018 Facebook */ 3 3 4 4 #include <stdlib.h>
+1 -1
tools/lib/bpf/btf.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: LGPL-2.1 */ 2 2 /* Copyright (c) 2018 Facebook */ 3 3 4 4 #ifndef __BPF_BTF_H
+1 -1
tools/testing/selftests/bpf/test_sockmap.c
··· 354 354 while (s->bytes_recvd < total_bytes) { 355 355 if (txmsg_cork) { 356 356 timeout.tv_sec = 0; 357 - timeout.tv_usec = 1000; 357 + timeout.tv_usec = 300000; 358 358 } else { 359 359 timeout.tv_sec = 1; 360 360 timeout.tv_usec = 0;