Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Both conflict were simple overlapping changes.

In the kaweth case, Eric Dumazet's skb_cow() bug fix overlapped the
conversion of the driver in net-next to use in-netdev stats.

Signed-off-by: David S. Miller <davem@davemloft.net>

+575 -245
+8 -2
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
··· 44 44 }; 45 45 46 46 HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description. 47 + 48 + Some BIOSes place the host controller in a mode where it is ECAM 49 + compliant for all devices other than the root complex. In such cases, 50 + the host controller should be described as below. 51 + 47 52 The properties and their meanings are identical to those described in 48 53 host-generic-pci.txt except as listed below. 49 54 50 55 Properties of the host controller node that differ from 51 56 host-generic-pci.txt: 52 57 53 - - compatible : Must be "hisilicon,pcie-almost-ecam" 58 + - compatible : Must be "hisilicon,hip06-pcie-ecam", or 59 + "hisilicon,hip07-pcie-ecam" 54 60 55 61 - reg : Two entries: First the ECAM configuration space for any 56 62 other bus underneath the root bus. Second, the base ··· 65 59 66 60 Example: 67 61 pcie0: pcie@a0090000 { 68 - compatible = "hisilicon,pcie-almost-ecam"; 62 + compatible = "hisilicon,hip06-pcie-ecam"; 69 63 reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */ 70 64 <0 0xa0090000 0 0x10000>; /* host bridge registers */ 71 65 bus-range = <0 31>;
+16 -2
MAINTAINERS
··· 2592 2592 2593 2593 BPF (Safe dynamic programs and tools) 2594 2594 M: Alexei Starovoitov <ast@kernel.org> 2595 + M: Daniel Borkmann <daniel@iogearbox.net> 2595 2596 L: netdev@vger.kernel.org 2596 2597 L: linux-kernel@vger.kernel.org 2597 2598 S: Supported 2599 + F: arch/x86/net/bpf_jit* 2600 + F: Documentation/networking/filter.txt 2601 + F: include/linux/bpf* 2602 + F: include/linux/filter.h 2603 + F: include/uapi/linux/bpf* 2604 + F: include/uapi/linux/filter.h 2598 2605 F: kernel/bpf/ 2599 - F: tools/testing/selftests/bpf/ 2606 + F: kernel/trace/bpf_trace.c 2600 2607 F: lib/test_bpf.c 2608 + F: net/bpf/ 2609 + F: net/core/filter.c 2610 + F: net/sched/act_bpf.c 2611 + F: net/sched/cls_bpf.c 2612 + F: samples/bpf/ 2613 + F: tools/net/bpf* 2614 + F: tools/testing/selftests/bpf/ 2601 2615 2602 2616 BROADCOM B44 10/100 ETHERNET DRIVER 2603 2617 M: Michael Chan <michael.chan@broadcom.com> ··· 8791 8777 Q: http://patchwork.ozlabs.org/project/netdev/list/ 8792 8778 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 8793 8779 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 8780 + B: mailto:netdev@vger.kernel.org 8794 8781 S: Maintained 8795 8782 F: net/ 8796 8783 F: include/net/ ··· 12506 12491 F: include/linux/clk/ti.h 12507 12492 12508 12493 TI ETHERNET SWITCH DRIVER (CPSW) 12509 - M: Mugunthan V N <mugunthanvnm@ti.com> 12510 12494 R: Grygorii Strashko <grygorii.strashko@ti.com> 12511 12495 L: linux-omap@vger.kernel.org 12512 12496 L: netdev@vger.kernel.org
+4 -4
arch/powerpc/include/asm/exception-64s.h
··· 236 236 mtctr reg; \ 237 237 bctr 238 238 239 - #define BRANCH_LINK_TO_FAR(reg, label) \ 240 - __LOAD_FAR_HANDLER(reg, label); \ 241 - mtctr reg; \ 239 + #define BRANCH_LINK_TO_FAR(label) \ 240 + __LOAD_FAR_HANDLER(r12, label); \ 241 + mtctr r12; \ 242 242 bctrl 243 243 244 244 /* ··· 265 265 #define BRANCH_TO_COMMON(reg, label) \ 266 266 b label 267 267 268 - #define BRANCH_LINK_TO_FAR(reg, label) \ 268 + #define BRANCH_LINK_TO_FAR(label) \ 269 269 bl label 270 270 271 271 #define BRANCH_TO_KVM(reg, label) \
+3 -3
arch/powerpc/kernel/entry_64.S
··· 689 689 690 690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 691 691 692 - lwz r3,GPR1(r1) 692 + ld r3,GPR1(r1) 693 693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 694 694 mr r4,r1 /* src: current exception frame */ 695 695 mr r1,r3 /* Reroute the trampoline frame to r1 */ ··· 703 703 addi r6,r6,8 704 704 bdnz 2b 705 705 706 - /* Do real store operation to complete stwu */ 707 - lwz r5,GPR1(r1) 706 + /* Do real store operation to complete stdu */ 707 + ld r5,GPR1(r1) 708 708 std r8,0(r5) 709 709 710 710 /* Clear _TIF_EMULATE_STACK_STORE flag */
+1 -1
arch/powerpc/kernel/exceptions-64s.S
··· 982 982 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) 983 983 EXCEPTION_PROLOG_COMMON_3(0xe60) 984 984 addi r3,r1,STACK_FRAME_OVERHEAD 985 - BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode) 985 + BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */ 986 986 /* Windup the stack. */ 987 987 /* Move original HSRR0 and HSRR1 into the respective regs */ 988 988 ld r9,_MSR(r1)
+2
arch/s390/include/asm/pgtable.h
··· 1051 1051 { 1052 1052 if (!MACHINE_HAS_NX) 1053 1053 pte_val(entry) &= ~_PAGE_NOEXEC; 1054 + if (pte_present(entry)) 1055 + pte_val(entry) &= ~_PAGE_UNUSED; 1054 1056 if (mm_has_pgste(mm)) 1055 1057 ptep_set_pte_at(mm, addr, ptep, entry); 1056 1058 else
+10 -1
block/blk-mq.c
··· 2928 2928 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 2929 2929 if (!blk_qc_t_is_internal(cookie)) 2930 2930 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 2931 - else 2931 + else { 2932 2932 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 2933 + /* 2934 + * With scheduling, if the request has completed, we'll 2935 + * get a NULL return here, as we clear the sched tag when 2936 + * that happens. The request still remains valid, like always, 2937 + * so we should be safe with just the NULL check. 2938 + */ 2939 + if (!rq) 2940 + return false; 2941 + } 2933 2942 2934 2943 return __blk_mq_poll(hctx, rq); 2935 2944 }
+10 -2
block/elevator.c
··· 1098 1098 } 1099 1099 EXPORT_SYMBOL(elevator_change); 1100 1100 1101 + static inline bool elv_support_iosched(struct request_queue *q) 1102 + { 1103 + if (q->mq_ops && q->tag_set && (q->tag_set->flags & 1104 + BLK_MQ_F_NO_SCHED)) 1105 + return false; 1106 + return true; 1107 + } 1108 + 1101 1109 ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1102 1110 size_t count) 1103 1111 { 1104 1112 int ret; 1105 1113 1106 - if (!(q->mq_ops || q->request_fn)) 1114 + if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q)) 1107 1115 return count; 1108 1116 1109 1117 ret = __elevator_change(q, name); ··· 1143 1135 len += sprintf(name+len, "[%s] ", elv->elevator_name); 1144 1136 continue; 1145 1137 } 1146 - if (__e->uses_mq && q->mq_ops) 1138 + if (__e->uses_mq && q->mq_ops && elv_support_iosched(q)) 1147 1139 len += sprintf(name+len, "%s ", __e->elevator_name); 1148 1140 else if (!__e->uses_mq && !q->mq_ops) 1149 1141 len += sprintf(name+len, "%s ", __e->elevator_name);
+1
drivers/acpi/power.c
··· 200 200 return -EINVAL; 201 201 202 202 /* The state of the list is 'on' IFF all resources are 'on'. */ 203 + cur_state = 0; 203 204 list_for_each_entry(entry, list, node) { 204 205 struct acpi_power_resource *resource = entry->resource; 205 206 acpi_handle handle = resource->device.handle;
+1 -1
drivers/block/mtip32xx/mtip32xx.c
··· 3969 3969 dd->tags.reserved_tags = 1; 3970 3970 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3971 3971 dd->tags.numa_node = dd->numa_node; 3972 - dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3972 + dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED; 3973 3973 dd->tags.driver_data = dd; 3974 3974 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; 3975 3975
+10 -3
drivers/clk/clk-stm32f4.c
··· 429 429 { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 } 430 430 }; 431 431 432 + static const struct clk_div_table pll_divq_table[] = { 433 + { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, 434 + { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 }, 435 + { 14, 14 }, { 15, 15 }, 436 + { 0 } 437 + }; 438 + 432 439 static const struct clk_div_table pll_divr_table[] = { 433 440 { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 } 434 441 }; ··· 503 496 504 497 #define MAX_PLL_DIV 3 505 498 static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = { 506 - { 16, 2, 0, pll_divp_table }, 507 - { 24, 4, CLK_DIVIDER_ONE_BASED, NULL }, 508 - { 28, 3, 0, pll_divr_table }, 499 + { 16, 2, 0, pll_divp_table }, 500 + { 24, 4, 0, pll_divq_table }, 501 + { 28, 3, 0, pll_divr_table }, 509 502 }; 510 503 511 504 struct stm32f4_pll_data {
+2
drivers/clk/sunxi-ng/Kconfig
··· 1 1 config SUNXI_CCU 2 2 bool "Clock support for Allwinner SoCs" 3 3 depends on ARCH_SUNXI || COMPILE_TEST 4 + select RESET_CONTROLLER 4 5 default ARCH_SUNXI 5 6 6 7 if SUNXI_CCU ··· 136 135 config SUN9I_A80_CCU 137 136 bool "Support for the Allwinner A80 CCU" 138 137 select SUNXI_CCU_DIV 138 + select SUNXI_CCU_MULT 139 139 select SUNXI_CCU_GATE 140 140 select SUNXI_CCU_NKMP 141 141 select SUNXI_CCU_NM
+11
drivers/clk/sunxi-ng/ccu-sun8i-a33.c
··· 752 752 .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets), 753 753 }; 754 754 755 + static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = { 756 + .common = &pll_cpux_clk.common, 757 + /* copy from pll_cpux_clk */ 758 + .enable = BIT(31), 759 + .lock = BIT(28), 760 + }; 761 + 755 762 static struct ccu_mux_nb sun8i_a33_cpu_nb = { 756 763 .common = &cpux_clk.common, 757 764 .cm = &cpux_clk.mux, ··· 790 783 791 784 sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc); 792 785 786 + /* Gate then ungate PLL CPU after any rate changes */ 787 + ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb); 788 + 789 + /* Reparent CPU during PLL CPU rate changes */ 793 790 ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, 794 791 &sun8i_a33_cpu_nb); 795 792 }
+49
drivers/clk/sunxi-ng/ccu_common.c
··· 14 14 * GNU General Public License for more details. 15 15 */ 16 16 17 + #include <linux/clk.h> 17 18 #include <linux/clk-provider.h> 18 19 #include <linux/iopoll.h> 19 20 #include <linux/slab.h> 20 21 21 22 #include "ccu_common.h" 23 + #include "ccu_gate.h" 22 24 #include "ccu_reset.h" 23 25 24 26 static DEFINE_SPINLOCK(ccu_lock); ··· 39 37 addr = common->base + common->reg; 40 38 41 39 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000)); 40 + } 41 + 42 + /* 43 + * This clock notifier is called when the frequency of a PLL clock is 44 + * changed. In common PLL designs, changes to the dividers take effect 45 + * almost immediately, while changes to the multipliers (implemented 46 + * as dividers in the feedback loop) take a few cycles to work into 47 + * the feedback loop for the PLL to stablize. 48 + * 49 + * Sometimes when the PLL clock rate is changed, the decrease in the 50 + * divider is too much for the decrease in the multiplier to catch up. 51 + * The PLL clock rate will spike, and in some cases, might lock up 52 + * completely. 53 + * 54 + * This notifier callback will gate and then ungate the clock, 55 + * effectively resetting it, so it proceeds to work. Care must be 56 + * taken to reparent consumers to other temporary clocks during the 57 + * rate change, and that this notifier callback must be the first 58 + * to be registered. 59 + */ 60 + static int ccu_pll_notifier_cb(struct notifier_block *nb, 61 + unsigned long event, void *data) 62 + { 63 + struct ccu_pll_nb *pll = to_ccu_pll_nb(nb); 64 + int ret = 0; 65 + 66 + if (event != POST_RATE_CHANGE) 67 + goto out; 68 + 69 + ccu_gate_helper_disable(pll->common, pll->enable); 70 + 71 + ret = ccu_gate_helper_enable(pll->common, pll->enable); 72 + if (ret) 73 + goto out; 74 + 75 + ccu_helper_wait_for_lock(pll->common, pll->lock); 76 + 77 + out: 78 + return notifier_from_errno(ret); 79 + } 80 + 81 + int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb) 82 + { 83 + pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb; 84 + 85 + return clk_notifier_register(pll_nb->common->hw.clk, 86 + &pll_nb->clk_nb); 42 87 } 43 88 44 89 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+12
drivers/clk/sunxi-ng/ccu_common.h
··· 83 83 84 84 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock); 85 85 86 + struct ccu_pll_nb { 87 + struct notifier_block clk_nb; 88 + struct ccu_common *common; 89 + 90 + u32 enable; 91 + u32 lock; 92 + }; 93 + 94 + #define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb) 95 + 96 + int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb); 97 + 86 98 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 87 99 const struct sunxi_ccu_desc *desc); 88 100
+11 -1
drivers/hid/wacom_wac.c
··· 2006 2006 return; 2007 2007 case HID_DG_TOOLSERIALNUMBER: 2008 2008 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); 2009 - wacom_wac->serial[0] |= value; 2009 + wacom_wac->serial[0] |= (__u32)value; 2010 2010 return; 2011 2011 case WACOM_HID_WD_SENSE: 2012 2012 wacom_wac->hid_data.sense_state = value; ··· 2175 2175 wacom_wac->hid_data.cc_report = field->report->id; 2176 2176 wacom_wac->hid_data.cc_index = field->index; 2177 2177 wacom_wac->hid_data.cc_value_index = usage->usage_index; 2178 + break; 2179 + case HID_DG_CONTACTID: 2180 + if ((field->logical_maximum - field->logical_minimum) < touch_max) { 2181 + /* 2182 + * The HID descriptor for G11 sensors leaves logical 2183 + * maximum set to '1' despite it being a multitouch 2184 + * device. Override to a sensible number. 2185 + */ 2186 + field->logical_maximum = 255; 2187 + } 2178 2188 break; 2179 2189 } 2180 2190 }
+8
drivers/input/mouse/elantech.c
··· 1118 1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1119 1119 * Avatar AVIU-145A2 0x361f00 ? clickpad 1120 1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1121 + * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1121 1122 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1122 1123 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons 1123 1124 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) ··· 1522 1521 .matches = { 1523 1522 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1524 1523 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), 1524 + }, 1525 + }, 1526 + { 1527 + /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ 1528 + .matches = { 1529 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1530 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"), 1525 1531 }, 1526 1532 }, 1527 1533 {
+11 -1
drivers/mmc/core/sdio_bus.c
··· 267 267 sdio_free_func_cis(func); 268 268 269 269 kfree(func->info); 270 - 270 + kfree(func->tmpbuf); 271 271 kfree(func); 272 272 } 273 273 ··· 281 281 func = kzalloc(sizeof(struct sdio_func), GFP_KERNEL); 282 282 if (!func) 283 283 return ERR_PTR(-ENOMEM); 284 + 285 + /* 286 + * allocate buffer separately to make sure it's properly aligned for 287 + * DMA usage (incl. 64 bit DMA) 288 + */ 289 + func->tmpbuf = kmalloc(4, GFP_KERNEL); 290 + if (!func->tmpbuf) { 291 + kfree(func); 292 + return ERR_PTR(-ENOMEM); 293 + } 284 294 285 295 func->card = card; 286 296
+9 -2
drivers/mmc/host/dw_mmc.c
··· 22 22 #include <linux/ioport.h> 23 23 #include <linux/module.h> 24 24 #include <linux/platform_device.h> 25 + #include <linux/pm_runtime.h> 25 26 #include <linux/seq_file.h> 26 27 #include <linux/slab.h> 27 28 #include <linux/stat.h> ··· 1622 1621 1623 1622 if (card->type == MMC_TYPE_SDIO || 1624 1623 card->type == MMC_TYPE_SD_COMBO) { 1625 - set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1624 + if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) { 1625 + pm_runtime_get_noresume(mmc->parent); 1626 + set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1627 + } 1626 1628 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1627 1629 } else { 1628 - clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1630 + if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) { 1631 + pm_runtime_put_noidle(mmc->parent); 1632 + clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1633 + } 1629 1634 clk_en_a = clk_en_a_old | clken_low_pwr; 1630 1635 } 1631 1636
+1
drivers/mmc/host/sdhci-esdhc-imx.c
··· 830 830 831 831 switch (uhs) { 832 832 case MMC_TIMING_UHS_SDR50: 833 + case MMC_TIMING_UHS_DDR50: 833 834 pinctrl = imx_data->pins_100mhz; 834 835 break; 835 836 case MMC_TIMING_UHS_SDR104:
+12 -1
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
··· 581 581 p_params->ets_cbs, 582 582 p_ets->pri_tc_tbl[0], p_params->max_ets_tc); 583 583 584 + if (p_params->ets_enabled && !p_params->max_ets_tc) { 585 + p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES; 586 + DP_VERBOSE(p_hwfn, QED_MSG_DCB, 587 + "ETS params: max_ets_tc is forced to %d\n", 588 + p_params->max_ets_tc); 589 + } 590 + 584 591 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are 585 592 * encoded in a type u32 array of size 2. 586 593 */ ··· 1006 999 u8 pfc_map = 0; 1007 1000 int i; 1008 1001 1002 + *pfc &= ~DCBX_PFC_ERROR_MASK; 1003 + 1009 1004 if (p_params->pfc.willing) 1010 1005 *pfc |= DCBX_PFC_WILLING_MASK; 1011 1006 else ··· 1262 1253 { 1263 1254 struct qed_dcbx_get *dcbx_info; 1264 1255 1265 - dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); 1256 + dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC); 1266 1257 if (!dcbx_info) 1267 1258 return NULL; 1268 1259 ··· 2079 2070 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; 2080 2071 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 2081 2072 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); 2073 + 2074 + dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap; 2082 2075 2083 2076 ptt = qed_ptt_acquire(hwfn); 2084 2077 if (!ptt)
-2
drivers/net/phy/dp83640.c
··· 1438 1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; 1439 1439 skb_queue_tail(&dp83640->rx_queue, skb); 1440 1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); 1441 - } else { 1442 - netif_rx_ni(skb); 1443 1441 } 1444 1442 1445 1443 return true;
+2 -7
drivers/net/usb/ch9200.c
··· 254 254 tx_overhead = 0x40; 255 255 256 256 len = skb->len; 257 - if (skb_headroom(skb) < tx_overhead) { 258 - struct sk_buff *skb2; 259 - 260 - skb2 = skb_copy_expand(skb, tx_overhead, 0, flags); 257 + if (skb_cow_head(skb, tx_overhead)) { 261 258 dev_kfree_skb_any(skb); 262 - skb = skb2; 263 - if (!skb) 264 - return NULL; 259 + return NULL; 265 260 } 266 261 267 262 __skb_push(skb, tx_overhead);
+2 -5
drivers/net/usb/cx82310_eth.c
··· 293 293 { 294 294 int len = skb->len; 295 295 296 - if (skb_headroom(skb) < 2) { 297 - struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags); 296 + if (skb_cow_head(skb, 2)) { 298 297 dev_kfree_skb_any(skb); 299 - skb = skb2; 300 - if (!skb) 301 - return NULL; 298 + return NULL; 302 299 } 303 300 skb_push(skb, 2); 304 301
+6 -12
drivers/net/usb/kaweth.c
··· 801 801 } 802 802 803 803 /* We now decide whether we can put our special header into the sk_buff */ 804 - if (skb_cloned(skb) || skb_headroom(skb) < 2) { 805 - /* no such luck - we make our own */ 806 - struct sk_buff *copied_skb; 807 - copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); 808 - dev_kfree_skb_irq(skb); 809 - skb = copied_skb; 810 - if (!copied_skb) { 811 - net->stats.tx_errors++; 812 - netif_start_queue(net); 813 - spin_unlock_irq(&kaweth->device_lock); 814 - return NETDEV_TX_OK; 815 - } 804 + if (skb_cow_head(skb, 2)) { 805 + net->stats.tx_errors++; 806 + netif_start_queue(net); 807 + spin_unlock_irq(&kaweth->device_lock); 808 + dev_kfree_skb_any(skb); 809 + return NETDEV_TX_OK; 816 810 } 817 811 818 812 private_header = (__le16 *)__skb_push(skb, 2);
+2 -7
drivers/net/usb/lan78xx.c
··· 2608 2608 { 2609 2609 u32 tx_cmd_a, tx_cmd_b; 2610 2610 2611 - if (skb_headroom(skb) < TX_OVERHEAD) { 2612 - struct sk_buff *skb2; 2613 - 2614 - skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags); 2611 + if (skb_cow_head(skb, TX_OVERHEAD)) { 2615 2612 dev_kfree_skb_any(skb); 2616 - skb = skb2; 2617 - if (!skb) 2618 - return NULL; 2613 + return NULL; 2619 2614 } 2620 2615 2621 2616 if (lan78xx_linearize(skb) < 0)
+2 -6
drivers/net/usb/smsc75xx.c
··· 2204 2204 { 2205 2205 u32 tx_cmd_a, tx_cmd_b; 2206 2206 2207 - if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2208 - struct sk_buff *skb2 = 2209 - skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); 2207 + if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { 2210 2208 dev_kfree_skb_any(skb); 2211 - skb = skb2; 2212 - if (!skb) 2213 - return NULL; 2209 + return NULL; 2214 2210 } 2215 2211 2216 2212 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
+6 -6
drivers/net/usb/smsc95xx.c
··· 2002 2002 /* We do not advertise SG, so skbs should be already linearized */ 2003 2003 BUG_ON(skb_shinfo(skb)->nr_frags); 2004 2004 2005 - if (skb_headroom(skb) < overhead) { 2006 - struct sk_buff *skb2 = skb_copy_expand(skb, 2007 - overhead, 0, flags); 2005 + /* Make writable and expand header space by overhead if required */ 2006 + if (skb_cow_head(skb, overhead)) { 2007 + /* Must deallocate here as returning NULL to indicate error 2008 + * means the skb won't be deallocated in the caller. 2009 + */ 2008 2010 dev_kfree_skb_any(skb); 2009 - skb = skb2; 2010 - if (!skb) 2011 - return NULL; 2011 + return NULL; 2012 2012 } 2013 2013 2014 2014 if (csum) {
+2 -7
drivers/net/usb/sr9700.c
··· 457 457 458 458 len = skb->len; 459 459 460 - if (skb_headroom(skb) < SR_TX_OVERHEAD) { 461 - struct sk_buff *skb2; 462 - 463 - skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags); 460 + if (skb_cow_head(skb, SR_TX_OVERHEAD)) { 464 461 dev_kfree_skb_any(skb); 465 - skb = skb2; 466 - if (!skb) 467 - return NULL; 462 + return NULL; 468 463 } 469 464 470 465 __skb_push(skb, SR_TX_OVERHEAD);
+15 -8
drivers/nvme/host/core.c
··· 1316 1316 table->entries[state] = target; 1317 1317 1318 1318 /* 1319 + * Don't allow transitions to the deepest state 1320 + * if it's quirked off. 1321 + */ 1322 + if (state == ctrl->npss && 1323 + (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 1324 + continue; 1325 + 1326 + /* 1319 1327 * Is this state a useful non-operational state for 1320 1328 * higher-power states to autonomously transition to? 1321 1329 */ ··· 1395 1387 }; 1396 1388 1397 1389 static const struct nvme_core_quirk_entry core_quirks[] = { 1398 - /* 1399 - * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes 1400 - * the controller to go out to lunch. It dies when the watchdog 1401 - * timer reads CSTS and gets 0xffffffff. 1402 - */ 1403 1390 { 1404 - .vid = 0x144d, 1405 - .fr = "BXW75D0Q", 1391 + /* 1392 + * This Toshiba device seems to die using any APST states. See: 1393 + * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 1394 + */ 1395 + .vid = 0x1179, 1396 + .mn = "THNSF5256GPUK TOSHIBA", 1406 1397 .quirks = NVME_QUIRK_NO_APST, 1407 - }, 1398 + } 1408 1399 }; 1409 1400 1410 1401 /* match is null-terminated but idstr is space-padded. */
+5
drivers/nvme/host/nvme.h
··· 83 83 * APST should not be used. 84 84 */ 85 85 NVME_QUIRK_NO_APST = (1 << 4), 86 + 87 + /* 88 + * The deepest sleep state should not be used. 89 + */ 90 + NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 86 91 }; 87 92 88 93 /*
+25 -1
drivers/nvme/host/pci.c
··· 19 19 #include <linux/blk-mq-pci.h> 20 20 #include <linux/cpu.h> 21 21 #include <linux/delay.h> 22 + #include <linux/dmi.h> 22 23 #include <linux/errno.h> 23 24 #include <linux/fs.h> 24 25 #include <linux/genhd.h> ··· 1944 1943 return -ENODEV; 1945 1944 } 1946 1945 1946 + static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) 1947 + { 1948 + if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 1949 + /* 1950 + * Several Samsung devices seem to drop off the PCIe bus 1951 + * randomly when APST is on and uses the deepest sleep state. 1952 + * This has been observed on a Samsung "SM951 NVMe SAMSUNG 1953 + * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 1954 + * 950 PRO 256GB", but it seems to be restricted to two Dell 1955 + * laptops. 1956 + */ 1957 + if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 1958 + (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 1959 + dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 1960 + return NVME_QUIRK_NO_DEEPEST_PS; 1961 + } 1962 + 1963 + return 0; 1964 + } 1965 + 1947 1966 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1948 1967 { 1949 1968 int node, result = -ENOMEM; 1950 1969 struct nvme_dev *dev; 1970 + unsigned long quirks = id->driver_data; 1951 1971 1952 1972 node = dev_to_node(&pdev->dev); 1953 1973 if (node == NUMA_NO_NODE) ··· 2000 1978 if (result) 2001 1979 goto put_pci; 2002 1980 1981 + quirks |= check_dell_samsung_bug(pdev); 1982 + 2003 1983 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2004 - id->driver_data); 1984 + quirks); 2005 1985 if (result) 2006 1986 goto release_pools; 2007 1987
+5 -1
drivers/pci/dwc/pcie-hisi.c
··· 380 380 381 381 static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { 382 382 { 383 - .compatible = "hisilicon,pcie-almost-ecam", 383 + .compatible = "hisilicon,hip06-pcie-ecam", 384 384 .data = (void *) &hisi_pcie_platform_ops, 385 + }, 386 + { 387 + .compatible = "hisilicon,hip07-pcie-ecam", 388 + .data = (void *) &hisi_pcie_platform_ops, 385 389 }, 386 390 {}, 387 391 };
+4 -3
drivers/video/backlight/pwm_bl.c
··· 297 297 } 298 298 299 299 /* 300 - * If the GPIO is configured as input, change the direction to output 301 - * and set the GPIO as active. 300 + * If the GPIO is not known to be already configured as output, that 301 + * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL, 302 + * change the direction to output and set the GPIO as active. 302 303 * Do not force the GPIO to active when it was already output as it 303 304 * could cause backlight flickering or we would enable the backlight too 304 305 * early. Leave the decision of the initial backlight state for later. 305 306 */ 306 307 if (pb->enable_gpio && 307 - gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN) 308 + gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT) 308 309 gpiod_direction_output(pb->enable_gpio, 1); 309 310 310 311 pb->power_supply = devm_regulator_get(&pdev->dev, "power");
+10
fs/cifs/smb1ops.c
··· 1015 1015 return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; 1016 1016 } 1017 1017 1018 + static bool 1019 + cifs_can_echo(struct TCP_Server_Info *server) 1020 + { 1021 + if (server->tcpStatus == CifsGood) 1022 + return true; 1023 + 1024 + return false; 1025 + } 1026 + 1018 1027 struct smb_version_operations smb1_operations = { 1019 1028 .send_cancel = send_nt_cancel, 1020 1029 .compare_fids = cifs_compare_fids, ··· 1058 1049 .get_dfs_refer = CIFSGetDFSRefer, 1059 1050 .qfs_tcon = cifs_qfs_tcon, 1060 1051 .is_path_accessible = cifs_is_path_accessible, 1052 + .can_echo = cifs_can_echo, 1061 1053 .query_path_info = cifs_query_path_info, 1062 1054 .query_file_info = cifs_query_file_info, 1063 1055 .get_srv_inum = cifs_get_srv_inum,
+1 -1
fs/nfsd/nfs4proc.c
··· 2489 2489 2490 2490 int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op) 2491 2491 { 2492 - if (op->opnum == OP_ILLEGAL) 2492 + if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp) 2493 2493 return op_encode_hdr_size * sizeof(__be32); 2494 2494 2495 2495 BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
+1
fs/nsfs.c
··· 91 91 return ERR_PTR(-ENOMEM); 92 92 } 93 93 d_instantiate(dentry, inode); 94 + dentry->d_flags |= DCACHE_RCUACCESS; 94 95 dentry->d_fsdata = (void *)ns->ops; 95 96 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); 96 97 if (d) {
+1 -1
include/linux/mmc/sdio_func.h
··· 53 53 unsigned int state; /* function state */ 54 54 #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ 55 55 56 - u8 tmpbuf[4]; /* DMA:able scratch buffer */ 56 + u8 *tmpbuf; /* DMA:able scratch buffer */ 57 57 58 58 unsigned num_info; /* number of info strings */ 59 59 const char **info; /* info strings */
+1 -1
include/uapi/linux/ipv6_route.h
··· 35 35 #define RTF_PREF(pref) ((pref) << 27) 36 36 #define RTF_PREF_MASK 0x18000000 37 37 38 - #define RTF_PCPU 0x40000000 38 + #define RTF_PCPU 0x40000000 /* read-only: can not be set by user */ 39 39 #define RTF_LOCAL 0x80000000 40 40 41 41
+14 -2
kernel/trace/ring_buffer.c
··· 3405 3405 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 3406 3406 { 3407 3407 struct ring_buffer_per_cpu *cpu_buffer; 3408 + struct buffer_page *reader; 3409 + struct buffer_page *head_page; 3410 + struct buffer_page *commit_page; 3411 + unsigned commit; 3408 3412 3409 3413 cpu_buffer = iter->cpu_buffer; 3410 3414 3411 - return iter->head_page == cpu_buffer->commit_page && 3412 - iter->head == rb_commit_index(cpu_buffer); 3415 + /* Remember, trace recording is off when iterator is in use */ 3416 + reader = cpu_buffer->reader_page; 3417 + head_page = cpu_buffer->head_page; 3418 + commit_page = cpu_buffer->commit_page; 3419 + commit = rb_page_commit(commit_page); 3420 + 3421 + return ((iter->head_page == commit_page && iter->head == commit) || 3422 + (iter->head_page == reader && commit_page == head_page && 3423 + head_page->read == commit && 3424 + iter->head == rb_page_commit(cpu_buffer->reader_page))); 3413 3425 } 3414 3426 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 3415 3427
+5 -3
kernel/trace/trace.c
··· 6733 6733 return ret; 6734 6734 6735 6735 out_reg: 6736 + ret = alloc_snapshot(&global_trace); 6737 + if (ret < 0) 6738 + goto out; 6739 + 6736 6740 ret = register_ftrace_function_probe(glob, ops, count); 6737 6741 6738 - if (ret >= 0) 6739 - alloc_snapshot(&global_trace); 6740 - 6742 + out: 6741 6743 return ret < 0 ? ret : 0; 6742 6744 } 6743 6745
+1 -1
mm/migrate.c
··· 184 184 unlock_page(page); 185 185 put_page(page); 186 186 } else { 187 - putback_lru_page(page); 188 187 dec_node_page_state(page, NR_ISOLATED_ANON + 189 188 page_is_file_cache(page)); 189 + putback_lru_page(page); 190 190 } 191 191 } 192 192 }
+20 -23
mm/page_alloc.c
··· 1090 1090 { 1091 1091 int migratetype = 0; 1092 1092 int batch_free = 0; 1093 - unsigned long nr_scanned, flags; 1093 + unsigned long nr_scanned; 1094 1094 bool isolated_pageblocks; 1095 1095 1096 - spin_lock_irqsave(&zone->lock, flags); 1096 + spin_lock(&zone->lock); 1097 1097 isolated_pageblocks = has_isolate_pageblock(zone); 1098 1098 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1099 1099 if (nr_scanned) ··· 1142 1142 trace_mm_page_pcpu_drain(page, 0, mt); 1143 1143 } while (--count && --batch_free && !list_empty(list)); 1144 1144 } 1145 - spin_unlock_irqrestore(&zone->lock, flags); 1145 + spin_unlock(&zone->lock); 1146 1146 } 1147 1147 1148 1148 static void free_one_page(struct zone *zone, ··· 1150 1150 unsigned int order, 1151 1151 int migratetype) 1152 1152 { 1153 - unsigned long nr_scanned, flags; 1154 - spin_lock_irqsave(&zone->lock, flags); 1155 - __count_vm_events(PGFREE, 1 << order); 1153 + unsigned long nr_scanned; 1154 + spin_lock(&zone->lock); 1156 1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1157 1156 if (nr_scanned) 1158 1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); ··· 1161 1162 migratetype = get_pfnblock_migratetype(page, pfn); 1162 1163 } 1163 1164 __free_one_page(page, pfn, zone, order, migratetype); 1164 - spin_unlock_irqrestore(&zone->lock, flags); 1165 + spin_unlock(&zone->lock); 1165 1166 } 1166 1167 1167 1168 static void __meminit __init_single_page(struct page *page, unsigned long pfn, ··· 1239 1240 1240 1241 static void __free_pages_ok(struct page *page, unsigned int order) 1241 1242 { 1243 + unsigned long flags; 1242 1244 int migratetype; 1243 1245 unsigned long pfn = page_to_pfn(page); 1244 1246 ··· 1247 1247 return; 1248 1248 1249 1249 migratetype = get_pfnblock_migratetype(page, pfn); 1250 + local_irq_save(flags); 1251 + __count_vm_events(PGFREE, 1 << order); 1250 1252 free_one_page(page_zone(page), page, pfn, order, migratetype); 1253 + local_irq_restore(flags); 1251 1254 } 1252 1255 1253 1256 static void __init __free_pages_boot_core(struct page *page, unsigned int order) ··· 2222 2219 int migratetype, bool cold) 2223 2220 { 2224 2221 int i, alloced = 0; 2225 - unsigned long flags; 2226 2222 2227 - spin_lock_irqsave(&zone->lock, flags); 2223 + spin_lock(&zone->lock); 2228 2224 for (i = 0; i < count; ++i) { 2229 2225 struct page *page = __rmqueue(zone, order, migratetype); 2230 2226 if (unlikely(page == NULL)) ··· 2259 2257 * pages added to the pcp list. 2260 2258 */ 2261 2259 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2262 - spin_unlock_irqrestore(&zone->lock, flags); 2260 + spin_unlock(&zone->lock); 2263 2261 return alloced; 2264 2262 } 2265 2263 ··· 2487 2485 { 2488 2486 struct zone *zone = page_zone(page); 2489 2487 struct per_cpu_pages *pcp; 2488 + unsigned long flags; 2490 2489 unsigned long pfn = page_to_pfn(page); 2491 2490 int migratetype; 2492 - 2493 - if (in_interrupt()) { 2494 - __free_pages_ok(page, 0); 2495 - return; 2496 - } 2497 2491 2498 2492 if (!free_pcp_prepare(page)) 2499 2493 return; 2500 2494 2501 2495 migratetype = get_pfnblock_migratetype(page, pfn); 2502 2496 set_pcppage_migratetype(page, migratetype); 2503 - preempt_disable(); 2497 + local_irq_save(flags); 2498 + __count_vm_event(PGFREE); 2504 2499 2505 2500 /* 2506 2501 * We only track unmovable, reclaimable and movable on pcp lists. ··· 2514 2515 migratetype = MIGRATE_MOVABLE; 2515 2516 } 2516 2517 2517 - __count_vm_event(PGFREE); 2518 2518 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2519 2519 if (!cold) 2520 2520 list_add(&page->lru, &pcp->lists[migratetype]); ··· 2527 2529 } 2528 2530 2529 2531 out: 2530 - preempt_enable(); 2532 + local_irq_restore(flags); 2531 2533 } 2532 2534 2533 2535 /* ··· 2652 2654 { 2653 2655 struct page *page; 2654 2656 2655 - VM_BUG_ON(in_interrupt()); 2656 - 2657 2657 do { 2658 2658 if (list_empty(list)) { 2659 2659 pcp->count += rmqueue_bulk(zone, 0, ··· 2682 2686 struct list_head *list; 2683 2687 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2684 2688 struct page *page; 2689 + unsigned long flags; 2685 2690 2686 - preempt_disable(); 2691 + local_irq_save(flags); 2687 2692 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2688 2693 list = &pcp->lists[migratetype]; 2689 2694 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); ··· 2692 2695 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2693 2696 zone_statistics(preferred_zone, zone); 2694 2697 } 2695 - preempt_enable(); 2698 + local_irq_restore(flags); 2696 2699 return page; 2697 2700 } 2698 2701 ··· 2708 2711 unsigned long flags; 2709 2712 struct page *page; 2710 2713 2711 - if (likely(order == 0) && !in_interrupt()) { 2714 + if (likely(order == 0)) { 2712 2715 page = rmqueue_pcplist(preferred_zone, zone, order, 2713 2716 gfp_flags, migratetype); 2714 2717 goto out;
+1 -2
mm/vmstat.c
··· 1768 1768 { 1769 1769 int ret __maybe_unused; 1770 1770 1771 - mm_percpu_wq = alloc_workqueue("mm_percpu_wq", 1772 - WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1771 + mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0); 1773 1772 1774 1773 #ifdef CONFIG_SMP 1775 1774 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
+8 -2
net/core/netpoll.c
··· 105 105 while ((skb = skb_dequeue(&npinfo->txq))) { 106 106 struct net_device *dev = skb->dev; 107 107 struct netdev_queue *txq; 108 + unsigned int q_index; 108 109 109 110 if (!netif_device_present(dev) || !netif_running(dev)) { 110 111 kfree_skb(skb); 111 112 continue; 112 113 } 113 114 114 - txq = skb_get_tx_queue(dev, skb); 115 - 116 115 local_irq_save(flags); 116 + /* check if skb->queue_mapping is still valid */ 117 + q_index = skb_get_queue_mapping(skb); 118 + if (unlikely(q_index >= dev->real_num_tx_queues)) { 119 + q_index = q_index % dev->real_num_tx_queues; 120 + skb_set_queue_mapping(skb, q_index); 121 + } 122 + txq = netdev_get_tx_queue(dev, q_index); 117 123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 118 124 if (netif_xmit_frozen_or_stopped(txq) || 119 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
-1
net/ipv6/exthdrs.c
··· 388 388 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 389 389 ((&hdr->segments_left) - 390 390 skb_network_header(skb))); 391 - kfree_skb(skb); 392 391 return -1; 393 392 } 394 393
+6 -7
net/ipv6/ip6mr.c
··· 774 774 * Delete a VIF entry 775 775 */ 776 776 777 - static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) 777 + static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, 778 + struct list_head *head) 778 779 { 779 780 struct mif_device *v; 780 781 struct net_device *dev; ··· 821 820 dev->ifindex, &in6_dev->cnf); 822 821 } 823 822 824 - if (v->flags & MIFF_REGISTER) 823 + if ((v->flags & MIFF_REGISTER) && !notify) 825 824 unregister_netdevice_queue(dev, head); 826 825 827 826 dev_put(dev); ··· 1332 1331 struct mr6_table *mrt; 1333 1332 struct mif_device *v; 1334 1333 int ct; 1335 - LIST_HEAD(list); 1336 1334 1337 1335 if (event != NETDEV_UNREGISTER) 1338 1336 return NOTIFY_DONE; ··· 1340 1340 v = &mrt->vif6_table[0]; 1341 1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1342 1342 if (v->dev == dev) 1343 - mif6_delete(mrt, ct, &list); 1343 + mif6_delete(mrt, ct, 1, NULL); 1344 1344 } 1345 1345 } 1346 - unregister_netdevice_many(&list); 1347 1346 1348 1347 return NOTIFY_DONE; 1349 1348 } ··· 1551 1552 for (i = 0; i < mrt->maxvif; i++) { 1552 1553 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) 1553 1554 continue; 1554 - mif6_delete(mrt, i, &list); 1555 + mif6_delete(mrt, i, 0, &list); 1555 1556 } 1556 1557 unregister_netdevice_many(&list); 1557 1558 ··· 1707 1708 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1708 1709 return -EFAULT; 1709 1710 rtnl_lock(); 1710 - ret = mif6_delete(mrt, mifi, NULL); 1711 + ret = mif6_delete(mrt, mifi, 0, NULL); 1711 1712 rtnl_unlock(); 1712 1713 return ret; 1713 1714
+4
net/ipv6/route.c
··· 1854 1854 int addr_type; 1855 1855 int err = -EINVAL; 1856 1856 1857 + /* RTF_PCPU is an internal flag; can not be set by userspace */ 1858 + if (cfg->fc_flags & RTF_PCPU) 1859 + goto out; 1860 + 1857 1861 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) 1858 1862 goto out; 1859 1863 #ifndef CONFIG_IPV6_SUBTREES
+3
net/ipv6/seg6.c
··· 53 53 struct sr6_tlv *tlv; 54 54 unsigned int tlv_len; 55 55 56 + if (trailing < sizeof(*tlv)) 57 + return false; 58 + 56 59 tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset); 57 60 tlv_len = sizeof(*tlv) + tlv->len; 58 61
+64 -29
net/key/af_key.c
··· 63 63 } u; 64 64 struct sk_buff *skb; 65 65 } dump; 66 + struct mutex dump_lock; 66 67 }; 68 + 69 + static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, 70 + xfrm_address_t *saddr, xfrm_address_t *daddr, 71 + u16 *family); 67 72 68 73 static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 69 74 { ··· 144 139 { 145 140 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 146 141 struct sock *sk; 142 + struct pfkey_sock *pfk; 147 143 int err; 148 144 149 145 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) ··· 158 152 sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern); 159 153 if (sk == NULL) 160 154 goto out; 155 + 156 + pfk = pfkey_sk(sk); 157 + mutex_init(&pfk->dump_lock); 161 158 162 159 sock->ops = &pfkey_ops; 163 160 sock_init_data(sock, sk); ··· 290 281 struct sadb_msg *hdr; 291 282 int rc; 292 283 284 + mutex_lock(&pfk->dump_lock); 285 + if (!pfk->dump.dump) { 286 + rc = 0; 287 + goto out; 288 + } 289 + 293 290 rc = pfk->dump.dump(pfk); 294 - if (rc == -ENOBUFS) 295 - return 0; 291 + if (rc == -ENOBUFS) { 292 + rc = 0; 293 + goto out; 294 + } 296 295 297 296 if (pfk->dump.skb) { 298 - if (!pfkey_can_dump(&pfk->sk)) 299 - return 0; 297 + if (!pfkey_can_dump(&pfk->sk)) { 298 + rc = 0; 299 + goto out; 300 + } 300 301 301 302 hdr = (struct sadb_msg *) pfk->dump.skb->data; 302 303 hdr->sadb_msg_seq = 0; ··· 317 298 } 318 299 319 300 pfkey_terminate_dump(pfk); 301 + 302 + out: 303 + mutex_unlock(&pfk->dump_lock); 320 304 return rc; 321 305 } 322 306 ··· 1815 1793 struct xfrm_address_filter *filter = NULL; 1816 1794 struct pfkey_sock *pfk = pfkey_sk(sk); 1817 1795 1818 - if (pfk->dump.dump != NULL) 1796 + mutex_lock(&pfk->dump_lock); 1797 + if (pfk->dump.dump != NULL) { 1798 + mutex_unlock(&pfk->dump_lock); 1819 1799 return -EBUSY; 1800 + } 1820 1801 1821 1802 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1822 - if (proto == 0) 1803 + if (proto == 0) { 1804 + mutex_unlock(&pfk->dump_lock); 1823 1805 return -EINVAL; 1806 + } 1824 1807 1825 1808 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { 1826 1809 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; 1827 1810 1828 1811 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 1829 - if (filter == NULL) 1812 + if (filter == NULL) { 1813 + mutex_unlock(&pfk->dump_lock); 1830 1814 return -ENOMEM; 1815 + } 1831 1816 1832 1817 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, 1833 1818 sizeof(xfrm_address_t)); ··· 1850 1821 pfk->dump.dump = pfkey_dump_sa; 1851 1822 pfk->dump.done = pfkey_dump_sa_done; 1852 1823 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); 1824 + mutex_unlock(&pfk->dump_lock); 1853 1825 1854 1826 return pfkey_do_dump(pfk); 1855 1827 } ··· 1943 1913 1944 1914 /* addresses present only in tunnel mode */ 1945 1915 if (t->mode == XFRM_MODE_TUNNEL) { 1946 - u8 *sa = (u8 *) (rq + 1); 1947 - int family, socklen; 1916 + int err; 1948 1917 1949 - family = pfkey_sockaddr_extract((struct sockaddr *)sa, 1950 - &t->saddr); 1951 - if (!family) 1952 - return -EINVAL; 1953 - 1954 - socklen = pfkey_sockaddr_len(family); 1955 - if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen), 1956 - &t->id.daddr) != family) 1957 - return -EINVAL; 1958 - t->encap_family = family; 1918 + err = parse_sockaddr_pair( 1919 + (struct sockaddr *)(rq + 1), 1920 + rq->sadb_x_ipsecrequest_len - sizeof(*rq), 1921 + &t->saddr, &t->id.daddr, &t->encap_family); 1922 + if (err) 1923 + return err; 1959 1924 } else 1960 1925 t->encap_family = xp->family; 1961 1926 ··· 1970 1945 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) 1971 1946 return -EINVAL; 1972 1947 1973 - while (len >= sizeof(struct sadb_x_ipsecrequest)) { 1948 + while (len >= sizeof(*rq)) { 1949 + if (len < rq->sadb_x_ipsecrequest_len || 1950 + rq->sadb_x_ipsecrequest_len < sizeof(*rq)) 1951 + return -EINVAL; 1952 + 1974 1953 if ((err = parse_ipsecrequest(xp, rq)) < 0) 1975 1954 return err; 1976 1955 len -= rq->sadb_x_ipsecrequest_len; ··· 2437 2408 return err; 2438 2409 } 2439 2410 2440 - #ifdef CONFIG_NET_KEY_MIGRATE 2441 2411 static int pfkey_sockaddr_pair_size(sa_family_t family) 2442 2412 { 2443 2413 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); ··· 2448 2420 { 2449 2421 int af, socklen; 2450 2422 2451 - if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2423 + if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2452 2424 return -EINVAL; 2453 2425 2454 2426 af = pfkey_sockaddr_extract(sa, saddr); ··· 2464 2436 return 0; 2465 2437 } 2466 2438 2439 + #ifdef CONFIG_NET_KEY_MIGRATE 2467 2440 static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, 2468 2441 struct xfrm_migrate *m) 2469 2442 { ··· 2472 2443 struct sadb_x_ipsecrequest *rq2; 2473 2444 int mode; 2474 2445 2475 - if (len <= sizeof(struct sadb_x_ipsecrequest) || 2476 - len < rq1->sadb_x_ipsecrequest_len) 2446 + if (len < sizeof(*rq1) || 2447 + len < rq1->sadb_x_ipsecrequest_len || 2448 + rq1->sadb_x_ipsecrequest_len < sizeof(*rq1)) 2477 2449 return -EINVAL; 2478 2450 2479 2451 /* old endoints */ 2480 2452 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), 2481 - rq1->sadb_x_ipsecrequest_len, 2453 + rq1->sadb_x_ipsecrequest_len - sizeof(*rq1), 2482 2454 &m->old_saddr, &m->old_daddr, 2483 2455 &m->old_family); 2484 2456 if (err) ··· 2488 2458 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); 2489 2459 len -= rq1->sadb_x_ipsecrequest_len; 2490 2460 2491 - if (len <= sizeof(struct sadb_x_ipsecrequest) || 2492 - len < rq2->sadb_x_ipsecrequest_len) 2461 + if (len <= sizeof(*rq2) || 2462 + len < rq2->sadb_x_ipsecrequest_len || 2463 + rq2->sadb_x_ipsecrequest_len < sizeof(*rq2)) 2493 2464 return -EINVAL; 2494 2465 2495 2466 /* new endpoints */ 2496 2467 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), 2497 - rq2->sadb_x_ipsecrequest_len, 2468 + rq2->sadb_x_ipsecrequest_len - sizeof(*rq2), 2498 2469 &m->new_saddr, &m->new_daddr, 2499 2470 &m->new_family); 2500 2471 if (err) ··· 2710 2679 { 2711 2680 struct pfkey_sock *pfk = pfkey_sk(sk); 2712 2681 2713 - if (pfk->dump.dump != NULL) 2682 + mutex_lock(&pfk->dump_lock); 2683 + if (pfk->dump.dump != NULL) { 2684 + mutex_unlock(&pfk->dump_lock); 2714 2685 return -EBUSY; 2686 + } 2715 2687 2716 2688 pfk->dump.msg_version = hdr->sadb_msg_version; 2717 2689 pfk->dump.msg_portid = hdr->sadb_msg_pid; 2718 2690 pfk->dump.dump = pfkey_dump_sp; 2719 2691 pfk->dump.done = pfkey_dump_sp_done; 2720 2692 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); 2693 + mutex_unlock(&pfk->dump_lock); 2721 2694 2722 2695 return pfkey_do_dump(pfk); 2723 2696 }
+68 -18
net/mac80211/rx.c
··· 197 197 return len; 198 198 } 199 199 200 + static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 201 + struct sk_buff *skb, 202 + int rtap_vendor_space) 203 + { 204 + struct { 205 + struct ieee80211_hdr_3addr hdr; 206 + u8 category; 207 + u8 action_code; 208 + } __packed action; 209 + 210 + if (!sdata) 211 + return; 212 + 213 + BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 214 + 215 + if (skb->len < rtap_vendor_space + sizeof(action) + 216 + VHT_MUMIMO_GROUPS_DATA_LEN) 217 + return; 218 + 219 + if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 220 + return; 221 + 222 + skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action)); 223 + 224 + if (!ieee80211_is_action(action.hdr.frame_control)) 225 + return; 226 + 227 + if (action.category != WLAN_CATEGORY_VHT) 228 + return; 229 + 230 + if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 231 + return; 232 + 233 + if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 234 + return; 235 + 236 + skb = skb_copy(skb, GFP_ATOMIC); 237 + if (!skb) 238 + return; 239 + 240 + skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 241 + skb_queue_tail(&sdata->skb_queue, skb); 242 + ieee80211_queue_work(&sdata->local->hw, &sdata->work); 243 + } 244 + 200 245 /* 201 246 * ieee80211_add_rx_radiotap_header - add radiotap header 202 247 * ··· 549 504 struct net_device *prev_dev = NULL; 550 505 int present_fcs_len = 0; 551 506 unsigned int rtap_vendor_space = 0; 552 - struct ieee80211_mgmt *mgmt; 553 507 struct ieee80211_sub_if_data *monitor_sdata = 554 508 rcu_dereference(local->monitor_sdata); 555 509 ··· 594 550 rtap_vendor_space); 595 551 return origskb; 596 552 } 553 + 554 + ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space); 597 555 598 556 /* room for the radiotap header based on driver features */ 599 557 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); ··· 650 604 651 605 prev_dev = sdata->dev; 652 606 ieee80211_rx_stats(sdata->dev, skb->len); 653 - } 654 - 655 - mgmt = (void *)skb->data; 656 - if (monitor_sdata && 657 - skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN && 658 - ieee80211_is_action(mgmt->frame_control) && 659 - mgmt->u.action.category == WLAN_CATEGORY_VHT && 660 - mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT && 661 - is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) && 662 - ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) { 663 - struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC); 664 - 665 - if (mu_skb) { 666 - mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 667 - skb_queue_tail(&monitor_sdata->skb_queue, mu_skb); 668 - ieee80211_queue_work(&local->hw, &monitor_sdata->work); 669 - } 670 607 } 671 608 672 609 if (prev_dev) { ··· 3627 3598 !ether_addr_equal(bssid, hdr->addr1)) 3628 3599 return false; 3629 3600 } 3601 + 3602 + /* 3603 + * 802.11-2016 Table 9-26 says that for data frames, A1 must be 3604 + * the BSSID - we've checked that already but may have accepted 3605 + * the wildcard (ff:ff:ff:ff:ff:ff). 3606 + * 3607 + * It also says: 3608 + * The BSSID of the Data frame is determined as follows: 3609 + * a) If the STA is contained within an AP or is associated 3610 + * with an AP, the BSSID is the address currently in use 3611 + * by the STA contained in the AP. 3612 + * 3613 + * So we should not accept data frames with an address that's 3614 + * multicast. 3615 + * 3616 + * Accepting it also opens a security problem because stations 3617 + * could encrypt it with the GTK and inject traffic that way. 3618 + */ 3619 + if (ieee80211_is_data(hdr->frame_control) && multicast) 3620 + return false; 3621 + 3630 3622 return true; 3631 3623 case NL80211_IFTYPE_WDS: 3632 3624 if (bssid || !ieee80211_is_data(hdr->frame_control))
+3 -1
net/qrtr/qrtr.c
··· 658 658 } 659 659 660 660 if (plen != len) { 661 - skb_pad(skb, plen - len); 661 + rc = skb_pad(skb, plen - len); 662 + if (rc) 663 + goto out_node; 662 664 skb_put(skb, plen - len); 663 665 } 664 666
+32 -23
net/sched/act_api.c
··· 529 529 return err; 530 530 } 531 531 532 - static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) 532 + static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 533 533 { 534 - a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL); 535 - if (!a->act_cookie) 536 - return -ENOMEM; 534 + struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 535 + if (!c) 536 + return NULL; 537 537 538 - a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 539 - if (!a->act_cookie->data) { 540 - kfree(a->act_cookie); 541 - return -ENOMEM; 538 + c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 539 + if (!c->data) { 540 + kfree(c); 541 + return NULL; 542 542 } 543 - a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]); 543 + c->len = nla_len(tb[TCA_ACT_COOKIE]); 544 544 545 - return 0; 545 + return c; 546 546 } 547 547 548 548 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, ··· 551 551 { 552 552 struct tc_action *a; 553 553 struct tc_action_ops *a_o; 554 + struct tc_cookie *cookie = NULL; 554 555 char act_name[IFNAMSIZ]; 555 556 struct nlattr *tb[TCA_ACT_MAX + 1]; 556 557 struct nlattr *kind; ··· 567 566 goto err_out; 568 567 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 569 568 goto err_out; 569 + if (tb[TCA_ACT_COOKIE]) { 570 + int cklen = nla_len(tb[TCA_ACT_COOKIE]); 571 + 572 + if (cklen > TC_COOKIE_MAX_SIZE) 573 + goto err_out; 574 + 575 + cookie = nla_memdup_cookie(tb); 576 + if (!cookie) { 577 + err = -ENOMEM; 578 + goto err_out; 579 + } 580 + } 570 581 } else { 571 582 err = -EINVAL; 572 583 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) ··· 617 604 if (err < 0) 618 605 goto err_mod; 619 606 620 - if (tb[TCA_ACT_COOKIE]) { 621 - int cklen = nla_len(tb[TCA_ACT_COOKIE]); 622 - 623 - if (cklen > TC_COOKIE_MAX_SIZE) { 624 - err = -EINVAL; 625 - tcf_hash_release(a, bind); 626 - goto err_mod; 607 + if (name == NULL && tb[TCA_ACT_COOKIE]) { 608 + if (a->act_cookie) { 609 + kfree(a->act_cookie->data); 610 + kfree(a->act_cookie); 627 611 } 628 - 629 - if (nla_memdup_cookie(a, tb) < 0) { 630 - err = -ENOMEM; 631 - tcf_hash_release(a, bind); 632 - goto err_mod; 633 - } 612 + a->act_cookie = cookie; 634 613 } 635 614 636 615 /* module count goes up only when brand new policy is created ··· 637 632 err_mod: 638 633 module_put(a_o->owner); 639 634 err_out: 635 + if (cookie) { 636 + kfree(cookie->data); 637 + kfree(cookie); 638 + } 640 639 return ERR_PTR(err); 641 640 } 642 641
+1 -1
security/keys/gc.c
··· 46 46 * immediately unlinked. 47 47 */ 48 48 struct key_type key_type_dead = { 49 - .name = "dead", 49 + .name = ".dead", 50 50 }; 51 51 52 52 /*
+11 -9
security/keys/keyctl.c
··· 273 273 * Create and join an anonymous session keyring or join a named session 274 274 * keyring, creating it if necessary. A named session keyring must have Search 275 275 * permission for it to be joined. Session keyrings without this permit will 276 - * be skipped over. 276 + * be skipped over. It is not permitted for userspace to create or join 277 + * keyrings whose name begin with a dot. 277 278 * 278 279 * If successful, the ID of the joined session keyring will be returned. 279 280 */ ··· 291 290 ret = PTR_ERR(name); 292 291 goto error; 293 292 } 293 + 294 + ret = -EPERM; 295 + if (name[0] == '.') 296 + goto error_name; 294 297 } 295 298 296 299 /* join the session */ 297 300 ret = join_session_keyring(name); 301 + error_name: 298 302 kfree(name); 299 - 300 303 error: 301 304 return ret; 302 305 } ··· 1258 1253 * Read or set the default keyring in which request_key() will cache keys and 1259 1254 * return the old setting. 1260 1255 * 1261 - * If a process keyring is specified then this will be created if it doesn't 1262 - * yet exist. The old setting will be returned if successful. 1256 + * If a thread or process keyring is specified then it will be created if it 1257 + * doesn't yet exist. The old setting will be returned if successful. 1263 1258 */ 1264 1259 long keyctl_set_reqkey_keyring(int reqkey_defl) 1265 1260 { ··· 1284 1279 1285 1280 case KEY_REQKEY_DEFL_PROCESS_KEYRING: 1286 1281 ret = install_process_keyring_to_cred(new); 1287 - if (ret < 0) { 1288 - if (ret != -EEXIST) 1289 - goto error; 1290 - ret = 0; 1291 - } 1282 + if (ret < 0) 1283 + goto error; 1292 1284 goto set; 1293 1285 1294 1286 case KEY_REQKEY_DEFL_DEFAULT:
+27 -17
security/keys/process_keys.c
··· 128 128 } 129 129 130 130 /* 131 - * Install a fresh thread keyring directly to new credentials. This keyring is 132 - * allowed to overrun the quota. 131 + * Install a thread keyring to the given credentials struct if it didn't have 132 + * one already. This is allowed to overrun the quota. 133 + * 134 + * Return: 0 if a thread keyring is now present; -errno on failure. 133 135 */ 134 136 int install_thread_keyring_to_cred(struct cred *new) 135 137 { 136 138 struct key *keyring; 139 + 140 + if (new->thread_keyring) 141 + return 0; 137 142 138 143 keyring = keyring_alloc("_tid", new->uid, new->gid, new, 139 144 KEY_POS_ALL | KEY_USR_VIEW, ··· 152 147 } 153 148 154 149 /* 155 - * Install a fresh thread keyring, discarding the old one. 150 + * Install a thread keyring to the current task if it didn't have one already. 151 + * 152 + * Return: 0 if a thread keyring is now present; -errno on failure. 156 153 */ 157 154 static int install_thread_keyring(void) 158 155 { ··· 164 157 new = prepare_creds(); 165 158 if (!new) 166 159 return -ENOMEM; 167 - 168 - BUG_ON(new->thread_keyring); 169 160 170 161 ret = install_thread_keyring_to_cred(new); 171 162 if (ret < 0) { ··· 175 170 } 176 171 177 172 /* 178 - * Install a process keyring directly to a credentials struct. 173 + * Install a process keyring to the given credentials struct if it didn't have 174 + * one already. This is allowed to overrun the quota. 179 175 * 180 - * Returns -EEXIST if there was already a process keyring, 0 if one installed, 181 - * and other value on any other error 176 + * Return: 0 if a process keyring is now present; -errno on failure. 182 177 */ 183 178 int install_process_keyring_to_cred(struct cred *new) 184 179 { 185 180 struct key *keyring; 186 181 187 182 if (new->process_keyring) 188 - return -EEXIST; 183 + return 0; 189 184 190 185 keyring = keyring_alloc("_pid", new->uid, new->gid, new, 191 186 KEY_POS_ALL | KEY_USR_VIEW, ··· 199 194 } 200 195 201 196 /* 202 - * Make sure a process keyring is installed for the current process. The 203 - * existing process keyring is not replaced. 197 + * Install a process keyring to the current task if it didn't have one already. 204 198 * 205 - * Returns 0 if there is a process keyring by the end of this function, some 206 - * error otherwise. 199 + * Return: 0 if a process keyring is now present; -errno on failure. 207 200 */ 208 201 static int install_process_keyring(void) 209 202 { ··· 215 212 ret = install_process_keyring_to_cred(new); 216 213 if (ret < 0) { 217 214 abort_creds(new); 218 - return ret != -EEXIST ? ret : 0; 215 + return ret; 219 216 } 220 217 221 218 return commit_creds(new); 222 219 } 223 220 224 221 /* 225 - * Install a session keyring directly to a credentials struct. 222 + * Install the given keyring as the session keyring of the given credentials 223 + * struct, replacing the existing one if any. If the given keyring is NULL, 224 + * then install a new anonymous session keyring. 225 + * 226 + * Return: 0 on success; -errno on failure. 226 227 */ 227 228 int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) 228 229 { ··· 261 254 } 262 255 263 256 /* 264 - * Install a session keyring, discarding the old one. If a keyring is not 265 - * supplied, an empty one is invented. 257 + * Install the given keyring as the session keyring of the current task, 258 + * replacing the existing one if any. If the given keyring is NULL, then 259 + * install a new anonymous session keyring. 260 + * 261 + * Return: 0 on success; -errno on failure. 266 262 */ 267 263 static int install_session_keyring(struct key *keyring) 268 264 {
+2 -2
tools/testing/selftests/bpf/test_maps.c
··· 282 282 { 283 283 unsigned int nr_cpus = bpf_num_possible_cpus(); 284 284 int key, next_key, fd, i; 285 - long values[nr_cpus]; 285 + long long values[nr_cpus]; 286 286 287 287 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 288 288 sizeof(values[0]), 2, 0); ··· 340 340 * allocator more than anything else 341 341 */ 342 342 unsigned int nr_keys = 2000; 343 - long values[nr_cpus]; 343 + long long values[nr_cpus]; 344 344 int key, fd, i; 345 345 346 346 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+20 -2
tools/testing/selftests/net/psock_fanout.c
··· 75 75 { 76 76 int fd, val; 77 77 78 - fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP)); 78 + fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP)); 79 79 if (fd < 0) { 80 80 perror("socket packet"); 81 81 exit(1); ··· 93 93 94 94 pair_udp_setfilter(fd); 95 95 return fd; 96 + } 97 + 98 + static void sock_fanout_set_cbpf(int fd) 99 + { 100 + struct sock_filter bpf_filter[] = { 101 + BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80), /* ldb [80] */ 102 + BPF_STMT(BPF_RET+BPF_A, 0), /* ret A */ 103 + }; 104 + struct sock_fprog bpf_prog; 105 + 106 + bpf_prog.filter = bpf_filter; 107 + bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); 108 + 109 + if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog, 110 + sizeof(bpf_prog))) { 111 + perror("fanout data cbpf"); 112 + exit(1); 113 + } 96 114 } 97 115 98 116 static void sock_fanout_set_ebpf(int fd) ··· 288 270 exit(1); 289 271 } 290 272 if (type == PACKET_FANOUT_CBPF) 291 - sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA); 273 + sock_fanout_set_cbpf(fds[0]); 292 274 else if (type == PACKET_FANOUT_EBPF) 293 275 sock_fanout_set_ebpf(fds[0]); 294 276
+3 -10
tools/testing/selftests/net/psock_lib.h
··· 38 38 # define __maybe_unused __attribute__ ((__unused__)) 39 39 #endif 40 40 41 - static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) 41 + static __maybe_unused void pair_udp_setfilter(int fd) 42 42 { 43 43 /* the filter below checks for all of the following conditions that 44 44 * are based on the contents of create_payload() ··· 76 76 }; 77 77 struct sock_fprog bpf_prog; 78 78 79 - if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA) 80 - bpf_filter[5].code = 0x16; /* RET A */ 81 - 82 79 bpf_prog.filter = bpf_filter; 83 80 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); 84 - if (setsockopt(fd, lvl, optnum, &bpf_prog, 81 + 82 + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog, 85 83 sizeof(bpf_prog))) { 86 84 perror("setsockopt SO_ATTACH_FILTER"); 87 85 exit(1); 88 86 } 89 - } 90 - 91 - static __maybe_unused void pair_udp_setfilter(int fd) 92 - { 93 - sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER); 94 87 } 95 88 96 89 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)