Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'tip/perf/urgent' into perf/core

To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+812 -444
+1
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 10 10 "brcm,bcm53128" 11 11 "brcm,bcm5365" 12 12 "brcm,bcm5395" 13 + "brcm,bcm5389" 13 14 "brcm,bcm5397" 14 15 "brcm,bcm5398" 15 16
+1 -1
Documentation/i2c/busses/i2c-ocores
··· 2 2 3 3 Supported adapters: 4 4 * OpenCores.org I2C controller by Richard Herveille (see datasheet link) 5 - Datasheet: http://www.opencores.org/projects.cgi/web/i2c/overview 5 + https://opencores.org/project/i2c/overview 6 6 7 7 Author: Peter Korsgaard <jacmet@sunsite.dk> 8 8
+8
MAINTAINERS
··· 15513 15513 S: Supported 15514 15514 F: drivers/char/xillybus/ 15515 15515 15516 + XLP9XX I2C DRIVER 15517 + M: George Cherian <george.cherian@cavium.com> 15518 + M: Jan Glauber <jglauber@cavium.com> 15519 + L: linux-i2c@vger.kernel.org 15520 + W: http://www.cavium.com 15521 + S: Supported 15522 + F: drivers/i2c/busses/i2c-xlp9xx.c 15523 + 15516 15524 XRA1403 GPIO EXPANDER 15517 15525 M: Nandor Han <nandor.han@ge.com> 15518 15526 M: Semi Malinen <semi.malinen@ge.com>
+4
arch/mips/kernel/process.c
··· 721 721 if (value & ~known_bits) 722 722 return -EOPNOTSUPP; 723 723 724 + /* Setting FRE without FR is not supported. */ 725 + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) 726 + return -EOPNOTSUPP; 727 + 724 728 /* Avoid inadvertently triggering emulation */ 725 729 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && 726 730 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+1 -1
arch/mips/kernel/ptrace.c
··· 818 818 break; 819 819 } 820 820 #endif 821 - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 821 + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 822 822 break; 823 823 case PC: 824 824 tmp = regs->cp0_epc;
+1 -1
arch/mips/kernel/ptrace32.c
··· 109 109 addr & 1); 110 110 break; 111 111 } 112 - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 112 + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 113 113 break; 114 114 case PC: 115 115 tmp = regs->cp0_epc;
+1 -1
arch/s390/purgatory/Makefile
··· 21 21 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes 22 22 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare 23 23 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding 24 - KBUILD_CFLAGS += -c -MD -Os -m64 24 + KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float 25 25 KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 26 26 27 27 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+2 -2
drivers/atm/zatm.c
··· 1151 1151 } 1152 1152 1153 1153 1154 - static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, 1155 - int offset, int swap) 1154 + static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, 1155 + int swap) 1156 1156 { 1157 1157 unsigned char buf[ZEPROM_SIZE]; 1158 1158 struct zatm_dev *zatm_dev;
+2 -2
drivers/crypto/inside-secure/safexcel.c
··· 152 152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; 153 153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); 154 154 155 - memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 156 - EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 155 + memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 156 + EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 157 157 158 158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, 159 159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+29 -15
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 4555 4555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4556 4556 struct amdgpu_crtc *acrtc = NULL; 4557 4557 struct amdgpu_dm_connector *aconnector = NULL; 4558 - struct drm_connector_state *new_con_state = NULL; 4559 - struct dm_connector_state *dm_conn_state = NULL; 4558 + struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 4559 + struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 4560 4560 struct drm_plane_state *new_plane_state = NULL; 4561 4561 4562 4562 new_stream = NULL; ··· 4577 4577 /* TODO This hack should go away */ 4578 4578 if (aconnector && enable) { 4579 4579 // Make sure fake sink is created in plug-in scenario 4580 - new_con_state = drm_atomic_get_connector_state(state, 4580 + drm_new_conn_state = drm_atomic_get_new_connector_state(state, 4581 4581 &aconnector->base); 4582 + drm_old_conn_state = drm_atomic_get_old_connector_state(state, 4583 + &aconnector->base); 4582 4584 4583 - if (IS_ERR(new_con_state)) { 4584 - ret = PTR_ERR_OR_ZERO(new_con_state); 4585 + 4586 + if (IS_ERR(drm_new_conn_state)) { 4587 + ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 4585 4588 break; 4586 4589 } 4587 4590 4588 - dm_conn_state = to_dm_connector_state(new_con_state); 4591 + dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 4592 + dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 4589 4593 4590 4594 new_stream = create_stream_for_sink(aconnector, 4591 4595 &new_crtc_state->mode, 4592 - dm_conn_state); 4596 + dm_new_conn_state); 4593 4597 4594 4598 /* 4595 4599 * we can have no stream on ACTION_SET if a display ··· 4699 4695 * We want to do dc stream updates that do not require a 4700 4696 * full modeset below. 4701 4697 */ 4702 - if (!enable || !aconnector || modereset_required(new_crtc_state)) 4698 + if (!(enable && aconnector && new_crtc_state->enable && 4699 + new_crtc_state->active)) 4703 4700 continue; 4704 4701 /* 4705 4702 * Given above conditions, the dc state cannot be NULL because: 4706 - * 1. We're attempting to enable a CRTC. Which has a... 4707 - * 2. Valid connector attached, and 4708 - * 3. User does not want to reset it (disable or mark inactive, 4709 - * which can happen on a CRTC that's already disabled). 4710 - * => It currently exists. 4703 + * 1. We're in the process of enabling CRTCs (just been added 4704 + * to the dc context, or already is on the context) 4705 + * 2. Has a valid connector attached, and 4706 + * 3. Is currently active and enabled. 4707 + * => The dc stream state currently exists. 4711 4708 */ 4712 4709 BUG_ON(dm_new_crtc_state->stream == NULL); 4713 4710 4714 - /* Color managment settings */ 4715 - if (dm_new_crtc_state->base.color_mgmt_changed) { 4711 + /* Scaling or underscan settings */ 4712 + if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) 4713 + update_stream_scaling_settings( 4714 + &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 4715 + 4716 + /* 4717 + * Color management settings. We also update color properties 4718 + * when a modeset is needed, to ensure it gets reprogrammed. 4719 + */ 4720 + if (dm_new_crtc_state->base.color_mgmt_changed || 4721 + drm_atomic_crtc_needs_modeset(new_crtc_state)) { 4716 4722 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); 4717 4723 if (ret) 4718 4724 goto fail;
+4 -11
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 2077 2077 return ret; 2078 2078 } 2079 2079 2080 - void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) 2080 + void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) 2081 2081 { 2082 2082 mutex_lock(&hdmi->mutex); 2083 2083 ··· 2102 2102 dw_hdmi_update_phy_mask(hdmi); 2103 2103 } 2104 2104 mutex_unlock(&hdmi->mutex); 2105 - } 2106 - 2107 - void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense) 2108 - { 2109 - struct dw_hdmi *hdmi = dev_get_drvdata(dev); 2110 - 2111 - __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense); 2112 2105 } 2113 2106 EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); 2114 2107 ··· 2138 2145 */ 2139 2146 if (intr_stat & 2140 2147 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { 2141 - __dw_hdmi_setup_rx_sense(hdmi, 2142 - phy_stat & HDMI_PHY_HPD, 2143 - phy_stat & HDMI_PHY_RX_SENSE); 2148 + dw_hdmi_setup_rx_sense(hdmi, 2149 + phy_stat & HDMI_PHY_HPD, 2150 + phy_stat & HDMI_PHY_RX_SENSE); 2144 2151 2145 2152 if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) 2146 2153 cec_notifier_set_phys_addr(hdmi->cec_notifier,
+1
drivers/gpu/drm/drm_dp_helper.c
··· 1145 1145 static const u16 psr_setup_time_us[] = { 1146 1146 PSR_SETUP_TIME(330), 1147 1147 PSR_SETUP_TIME(275), 1148 + PSR_SETUP_TIME(220), 1148 1149 PSR_SETUP_TIME(165), 1149 1150 PSR_SETUP_TIME(110), 1150 1151 PSR_SETUP_TIME(55),
+11 -4
drivers/gpu/drm/i915/i915_query.c
··· 4 4 * Copyright © 2018 Intel Corporation 5 5 */ 6 6 7 + #include <linux/nospec.h> 8 + 7 9 #include "i915_drv.h" 8 10 #include "i915_query.h" 9 11 #include <uapi/drm/i915_drm.h> ··· 102 100 103 101 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 104 102 struct drm_i915_query_item item; 105 - u64 func_idx; 103 + unsigned long func_idx; 106 104 int ret; 107 105 108 106 if (copy_from_user(&item, user_item_ptr, sizeof(item))) ··· 111 109 if (item.query_id == 0) 112 110 return -EINVAL; 113 111 112 + if (overflows_type(item.query_id - 1, unsigned long)) 113 + return -EINVAL; 114 + 114 115 func_idx = item.query_id - 1; 115 116 116 - if (func_idx < ARRAY_SIZE(i915_query_funcs)) 117 + ret = -EINVAL; 118 + if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 119 + func_idx = array_index_nospec(func_idx, 120 + ARRAY_SIZE(i915_query_funcs)); 117 121 ret = i915_query_funcs[func_idx](dev_priv, &item); 118 - else 119 - ret = -EINVAL; 122 + } 120 123 121 124 /* Only write the length back to userspace if they differ. */ 122 125 if (ret != item.length && put_user(ret, &user_item_ptr->length))
+40 -11
drivers/gpu/drm/i915/intel_lvds.c
··· 574 574 return NOTIFY_OK; 575 575 } 576 576 577 + static int 578 + intel_lvds_connector_register(struct drm_connector *connector) 579 + { 580 + struct intel_lvds_connector *lvds = to_lvds_connector(connector); 581 + int ret; 582 + 583 + ret = intel_connector_register(connector); 584 + if (ret) 585 + return ret; 586 + 587 + lvds->lid_notifier.notifier_call = intel_lid_notify; 588 + if (acpi_lid_notifier_register(&lvds->lid_notifier)) { 589 + DRM_DEBUG_KMS("lid notifier registration failed\n"); 590 + lvds->lid_notifier.notifier_call = NULL; 591 + } 592 + 593 + return 0; 594 + } 595 + 596 + static void 597 + intel_lvds_connector_unregister(struct drm_connector *connector) 598 + { 599 + struct intel_lvds_connector *lvds = to_lvds_connector(connector); 600 + 601 + if (lvds->lid_notifier.notifier_call) 602 + acpi_lid_notifier_unregister(&lvds->lid_notifier); 603 + 604 + intel_connector_unregister(connector); 605 + } 606 + 577 607 /** 578 608 * intel_lvds_destroy - unregister and free LVDS structures 579 609 * @connector: connector to free ··· 615 585 { 616 586 struct intel_lvds_connector *lvds_connector = 617 587 to_lvds_connector(connector); 618 - 619 - if (lvds_connector->lid_notifier.notifier_call) 620 - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); 621 588 622 589 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 623 590 kfree(lvds_connector->base.edid); ··· 636 609 .fill_modes = drm_helper_probe_single_connector_modes, 637 610 .atomic_get_property = intel_digital_connector_atomic_get_property, 638 611 .atomic_set_property = intel_digital_connector_atomic_set_property, 639 - .late_register = intel_connector_register, 640 - .early_unregister = intel_connector_unregister, 612 + .late_register = intel_lvds_connector_register, 613 + .early_unregister = intel_lvds_connector_unregister, 641 614 .destroy = intel_lvds_destroy, 642 615 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 643 616 .atomic_duplicate_state = intel_digital_connector_duplicate_state, ··· 852 825 .matches = { 853 826 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 854 827 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), 828 + }, 829 + }, 830 + { 831 + .callback = intel_no_lvds_dmi_callback, 832 + .ident = "Radiant P845", 833 + .matches = { 834 + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), 835 + DMI_MATCH(DMI_PRODUCT_NAME, "P845"), 855 836 }, 856 837 }, 857 838 ··· 1184 1149 lvds_encoder->is_dual_link ? "dual" : "single"); 1185 1150 1186 1151 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; 1187 - 1188 - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1189 - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1190 - DRM_DEBUG_KMS("lid notifier registration failed\n"); 1191 - lvds_connector->lid_notifier.notifier_call = NULL; 1192 - } 1193 1152 1194 1153 return; 1195 1154
+1 -1
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 529 529 if (stat & HDMITX_TOP_INTR_HPD_RISE) 530 530 hpd_connected = true; 531 531 532 - dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected, 532 + dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, 533 533 hpd_connected); 534 534 535 535 drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
+4 -1
drivers/gpu/drm/omapdrm/dss/sdi.c
··· 82 82 struct dispc_clock_info *dispc_cinfo) 83 83 { 84 84 int i; 85 - struct sdi_clk_calc_ctx ctx = { .sdi = sdi }; 85 + struct sdi_clk_calc_ctx ctx; 86 86 87 87 /* 88 88 * DSS fclk gives us very few possibilities, so finding a good pixel ··· 95 95 bool ok; 96 96 97 97 memset(&ctx, 0, sizeof(ctx)); 98 + 99 + ctx.sdi = sdi; 100 + 98 101 if (pclk > 1000 * i * i * i) 99 102 ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu); 100 103 else
+3 -3
drivers/hwtracing/intel_th/msu.c
··· 733 733 /* Reset the page to write-back before releasing */ 734 734 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 735 735 #endif 736 - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, 737 - win->block[i].addr); 736 + dma_free_coherent(msc_dev(msc)->parent->parent, size, 737 + win->block[i].bdesc, win->block[i].addr); 738 738 } 739 739 kfree(win); 740 740 ··· 769 769 /* Reset the page to write-back before releasing */ 770 770 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 771 771 #endif 772 - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, 772 + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 773 773 win->block[i].bdesc, win->block[i].addr); 774 774 } 775 775
+4 -3
drivers/hwtracing/stm/core.c
··· 19 19 #include <linux/stm.h> 20 20 #include <linux/fs.h> 21 21 #include <linux/mm.h> 22 + #include <linux/vmalloc.h> 22 23 #include "stm.h" 23 24 24 25 #include <uapi/linux/stm.h> ··· 675 674 { 676 675 struct stm_device *stm = to_stm_device(dev); 677 676 678 - kfree(stm); 677 + vfree(stm); 679 678 } 680 679 681 680 int stm_register_device(struct device *parent, struct stm_data *stm_data, ··· 692 691 return -EINVAL; 693 692 694 693 nmasters = stm_data->sw_end - stm_data->sw_start + 1; 695 - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); 694 + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *)); 696 695 if (!stm) 697 696 return -ENOMEM; 698 697 ··· 745 744 /* matches device_initialize() above */ 746 745 put_device(&stm->dev); 747 746 err_free: 748 - kfree(stm); 747 + vfree(stm); 749 748 750 749 return err; 751 750 }
+1 -1
drivers/i2c/busses/i2c-ocores.c
··· 1 1 /* 2 2 * i2c-ocores.c: I2C bus driver for OpenCores I2C controller 3 - * (http://www.opencores.org/projects.cgi/web/i2c/overview). 3 + * (https://opencores.org/project/i2c/overview) 4 4 * 5 5 * Peter Korsgaard <jacmet@sunsite.dk> 6 6 *
+1
drivers/iio/adc/Kconfig
··· 158 158 depends on ARCH_AT91 || COMPILE_TEST 159 159 depends on HAS_IOMEM 160 160 depends on HAS_DMA 161 + select IIO_BUFFER 161 162 select IIO_TRIGGERED_BUFFER 162 163 help 163 164 Say yes here to build support for Atmel SAMA5D2 ADC which is
+24 -51
drivers/iio/adc/ad7793.c
··· 348 348 static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 349 349 33, 0, 17, 16, 12, 10, 8, 6, 4}; 350 350 351 - static ssize_t ad7793_read_frequency(struct device *dev, 352 - struct device_attribute *attr, 353 - char *buf) 354 - { 355 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 356 - struct ad7793_state *st = iio_priv(indio_dev); 357 - 358 - return sprintf(buf, "%d\n", 359 - st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); 360 - } 361 - 362 - static ssize_t ad7793_write_frequency(struct device *dev, 363 - struct device_attribute *attr, 364 - const char *buf, 365 - size_t len) 366 - { 367 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 368 - struct ad7793_state *st = iio_priv(indio_dev); 369 - long lval; 370 - int i, ret; 371 - 372 - ret = kstrtol(buf, 10, &lval); 373 - if (ret) 374 - return ret; 375 - 376 - if (lval == 0) 377 - return -EINVAL; 378 - 379 - for (i = 0; i < 16; i++) 380 - if (lval == st->chip_info->sample_freq_avail[i]) 381 - break; 382 - if (i == 16) 383 - return -EINVAL; 384 - 385 - ret = iio_device_claim_direct_mode(indio_dev); 386 - if (ret) 387 - return ret; 388 - st->mode &= ~AD7793_MODE_RATE(-1); 389 - st->mode |= AD7793_MODE_RATE(i); 390 - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); 391 - iio_device_release_direct_mode(indio_dev); 392 - 393 - return len; 394 - } 395 - 396 - static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, 397 - ad7793_read_frequency, 398 - ad7793_write_frequency); 399 - 400 351 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 401 352 "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); 402 353 ··· 375 424 ad7793_show_scale_available, NULL, 0); 376 425 377 426 static struct attribute *ad7793_attributes[] = { 378 - &iio_dev_attr_sampling_frequency.dev_attr.attr, 379 427 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 380 428 &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, 381 429 NULL ··· 385 435 }; 386 436 387 437 static struct attribute *ad7797_attributes[] = { 388 - &iio_dev_attr_sampling_frequency.dev_attr.attr, 389 438 &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, 390 439 NULL 391 440 }; ··· 454 505 *val -= offset; 455 506 } 456 507 return IIO_VAL_INT; 508 + case IIO_CHAN_INFO_SAMP_FREQ: 509 + *val = st->chip_info 510 + ->sample_freq_avail[AD7793_MODE_RATE(st->mode)]; 511 + return IIO_VAL_INT; 457 512 } 458 513 return -EINVAL; 459 514 } ··· 494 541 ad7793_calibrate_all(st); 495 542 break; 496 543 } 544 + break; 545 + case IIO_CHAN_INFO_SAMP_FREQ: 546 + if (!val) { 547 + ret = -EINVAL; 548 + break; 549 + } 550 + 551 + for (i = 0; i < 16; i++) 552 + if (val == st->chip_info->sample_freq_avail[i]) 553 + break; 554 + 555 + if (i == 16) { 556 + ret = -EINVAL; 557 + break; 558 + } 559 + 560 + st->mode &= ~AD7793_MODE_RATE(-1); 561 + st->mode |= AD7793_MODE_RATE(i); 562 + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), 563 + st->mode); 497 564 break; 498 565 default: 499 566 ret = -EINVAL;
+37 -4
drivers/iio/adc/at91-sama5d2_adc.c
··· 333 333 + AT91_SAMA5D2_DIFF_CHAN_CNT + 1), 334 334 }; 335 335 336 + static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan) 337 + { 338 + int i; 339 + 340 + for (i = 0; i < indio_dev->num_channels; i++) { 341 + if (indio_dev->channels[i].scan_index == chan) 342 + return i; 343 + } 344 + return -EINVAL; 345 + } 346 + 347 + static inline struct iio_chan_spec const * 348 + at91_adc_chan_get(struct iio_dev *indio_dev, int chan) 349 + { 350 + int index = at91_adc_chan_xlate(indio_dev, chan); 351 + 352 + if (index < 0) 353 + return NULL; 354 + return indio_dev->channels + index; 355 + } 356 + 336 357 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) 337 358 { 338 359 struct iio_dev *indio = iio_trigger_get_drvdata(trig); ··· 371 350 at91_adc_writel(st, AT91_SAMA5D2_TRGR, status); 372 351 373 352 for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { 374 - struct iio_chan_spec const *chan = indio->channels + bit; 353 + struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit); 375 354 355 + if (!chan) 356 + continue; 376 357 if (state) { 377 358 at91_adc_writel(st, AT91_SAMA5D2_CHER, 378 359 BIT(chan->channel)); ··· 471 448 472 449 for_each_set_bit(bit, indio_dev->active_scan_mask, 473 450 indio_dev->num_channels) { 474 - struct iio_chan_spec const *chan = indio_dev->channels + bit; 451 + struct iio_chan_spec const *chan = 452 + at91_adc_chan_get(indio_dev, bit); 453 + 454 + if (!chan) 455 + continue; 475 456 476 457 st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8; 477 458 } ··· 553 526 */ 554 527 for_each_set_bit(bit, indio_dev->active_scan_mask, 555 528 indio_dev->num_channels) { 556 - struct iio_chan_spec const *chan = indio_dev->channels + bit; 529 + struct iio_chan_spec const *chan = 530 + at91_adc_chan_get(indio_dev, bit); 557 531 532 + if (!chan) 533 + continue; 558 534 if (st->dma_st.dma_chan) 559 535 at91_adc_readl(st, chan->address); 560 536 } ··· 617 587 618 588 for_each_set_bit(bit, indio_dev->active_scan_mask, 619 589 indio_dev->num_channels) { 620 - struct iio_chan_spec const *chan = indio_dev->channels + bit; 590 + struct iio_chan_spec const *chan = 591 + at91_adc_chan_get(indio_dev, bit); 621 592 593 + if (!chan) 594 + continue; 622 595 st->buffer[i] = at91_adc_readl(st, chan->address); 623 596 i++; 624 597 }
+14 -3
drivers/iio/adc/stm32-dfsdm-adc.c
··· 144 144 * Leave as soon as if exact resolution if reached. 145 145 * Otherwise the higher resolution below 32 bits is kept. 146 146 */ 147 + fl->res = 0; 147 148 for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) { 148 149 for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) { 149 150 if (fast) ··· 194 193 } 195 194 } 196 195 197 - if (!fl->fosr) 196 + if (!fl->res) 198 197 return -EINVAL; 199 198 200 199 return 0; ··· 771 770 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 772 771 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 773 772 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; 774 - unsigned int spi_freq = adc->spi_freq; 773 + unsigned int spi_freq; 775 774 int ret = -EINVAL; 776 775 777 776 switch (mask) { ··· 785 784 case IIO_CHAN_INFO_SAMP_FREQ: 786 785 if (!val) 787 786 return -EINVAL; 788 - if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) 787 + 788 + switch (ch->src) { 789 + case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL: 789 790 spi_freq = adc->dfsdm->spi_master_freq; 791 + break; 792 + case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING: 793 + case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING: 794 + spi_freq = adc->dfsdm->spi_master_freq / 2; 795 + break; 796 + default: 797 + spi_freq = adc->spi_freq; 798 + } 790 799 791 800 if (spi_freq % val) 792 801 dev_warn(&indio_dev->dev,
+1 -1
drivers/iio/buffer/industrialio-buffer-dma.c
··· 587 587 * Should be used as the set_length callback for iio_buffer_access_ops 588 588 * struct for DMA buffers. 589 589 */ 590 - int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) 590 + int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) 591 591 { 592 592 /* Avoid an invalid state */ 593 593 if (length < 2)
+9 -2
drivers/iio/buffer/kfifo_buf.c
··· 22 22 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) 23 23 24 24 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, 25 - int bytes_per_datum, int length) 25 + size_t bytes_per_datum, unsigned int length) 26 26 { 27 27 if ((length == 0) || (bytes_per_datum == 0)) 28 + return -EINVAL; 29 + 30 + /* 31 + * Make sure we don't overflow an unsigned int after kfifo rounds up to 32 + * the next power of 2. 33 + */ 34 + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) 28 35 return -EINVAL; 29 36 30 37 return __kfifo_alloc((struct __kfifo *)&buf->kf, length, ··· 74 67 return 0; 75 68 } 76 69 77 - static int iio_set_length_kfifo(struct iio_buffer *r, int length) 70 + static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) 78 71 { 79 72 /* Avoid an invalid state */ 80 73 if (length < 2)
+4 -4
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
··· 178 178 #ifdef CONFIG_PM 179 179 int ret; 180 180 181 - atomic_set(&st->user_requested_state, state); 182 - 183 181 if (atomic_add_unless(&st->runtime_pm_enable, 1, 1)) 184 182 pm_runtime_enable(&st->pdev->dev); 185 183 186 - if (state) 184 + if (state) { 185 + atomic_inc(&st->user_requested_state); 187 186 ret = pm_runtime_get_sync(&st->pdev->dev); 188 - else { 187 + } else { 188 + atomic_dec(&st->user_requested_state); 189 189 pm_runtime_mark_last_busy(&st->pdev->dev); 190 190 pm_runtime_use_autosuspend(&st->pdev->dev); 191 191 ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+1 -1
drivers/infiniband/core/cache.c
··· 502 502 return -EINVAL; 503 503 504 504 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) 505 - return -EAGAIN; 505 + return -EINVAL; 506 506 507 507 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); 508 508 if (attr) {
+54 -1
drivers/infiniband/hw/bnxt_re/main.c
··· 185 185 bnxt_re_ib_unreg(rdev, false); 186 186 } 187 187 188 + static void bnxt_re_stop_irq(void *handle) 189 + { 190 + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 191 + struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 192 + struct bnxt_qplib_nq *nq; 193 + int indx; 194 + 195 + for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { 196 + nq = &rdev->nq[indx - 1]; 197 + bnxt_qplib_nq_stop_irq(nq, false); 198 + } 199 + 200 + bnxt_qplib_rcfw_stop_irq(rcfw, false); 201 + } 202 + 203 + static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) 204 + { 205 + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 206 + struct bnxt_msix_entry *msix_ent = rdev->msix_entries; 207 + struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 208 + struct bnxt_qplib_nq *nq; 209 + int indx, rc; 210 + 211 + if (!ent) { 212 + /* Not setting the f/w timeout bit in rcfw. 213 + * During the driver unload the first command 214 + * to f/w will timeout and that will set the 215 + * timeout bit. 216 + */ 217 + dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); 218 + return; 219 + } 220 + 221 + /* Vectors may change after restart, so update with new vectors 222 + * in device sctructure. 223 + */ 224 + for (indx = 0; indx < rdev->num_msix; indx++) 225 + rdev->msix_entries[indx].vector = ent[indx].vector; 226 + 227 + bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, 228 + false); 229 + for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { 230 + nq = &rdev->nq[indx - 1]; 231 + rc = bnxt_qplib_nq_start_irq(nq, indx - 1, 232 + msix_ent[indx].vector, false); 233 + if (rc) 234 + dev_warn(rdev_to_dev(rdev), 235 + "Failed to reinit NQ index %d\n", indx - 1); 236 + } 237 + } 238 + 188 239 static struct bnxt_ulp_ops bnxt_re_ulp_ops = { 189 240 .ulp_async_notifier = NULL, 190 241 .ulp_stop = bnxt_re_stop, 191 242 .ulp_start = bnxt_re_start, 192 243 .ulp_sriov_config = bnxt_re_sriov_config, 193 - .ulp_shutdown = bnxt_re_shutdown 244 + .ulp_shutdown = bnxt_re_shutdown, 245 + .ulp_irq_stop = bnxt_re_stop_irq, 246 + .ulp_irq_restart = bnxt_re_start_irq 194 247 }; 195 248 196 249 /* RoCE -> Net driver */
+61 -35
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 336 336 return IRQ_HANDLED; 337 337 } 338 338 339 + void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) 340 + { 341 + tasklet_disable(&nq->worker); 342 + /* Mask h/w interrupt */ 343 + NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 344 + /* Sync with last running IRQ handler */ 345 + synchronize_irq(nq->vector); 346 + if (kill) 347 + tasklet_kill(&nq->worker); 348 + if (nq->requested) { 349 + irq_set_affinity_hint(nq->vector, NULL); 350 + free_irq(nq->vector, nq); 351 + nq->requested = false; 352 + } 353 + } 354 + 339 355 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 340 356 { 341 357 if (nq->cqn_wq) { 342 358 destroy_workqueue(nq->cqn_wq); 343 359 nq->cqn_wq = NULL; 344 360 } 345 - /* Make sure the HW is stopped! */ 346 - synchronize_irq(nq->vector); 347 - tasklet_disable(&nq->worker); 348 - tasklet_kill(&nq->worker); 349 361 350 - if (nq->requested) { 351 - irq_set_affinity_hint(nq->vector, NULL); 352 - free_irq(nq->vector, nq); 353 - nq->requested = false; 354 - } 362 + /* Make sure the HW is stopped! */ 363 + bnxt_qplib_nq_stop_irq(nq, true); 364 + 355 365 if (nq->bar_reg_iomem) 356 366 iounmap(nq->bar_reg_iomem); 357 367 nq->bar_reg_iomem = NULL; ··· 369 359 nq->cqn_handler = NULL; 370 360 nq->srqn_handler = NULL; 371 361 nq->vector = 0; 362 + } 363 + 364 + int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 365 + int msix_vector, bool need_init) 366 + { 367 + int rc; 368 + 369 + if (nq->requested) 370 + return -EFAULT; 371 + 372 + nq->vector = msix_vector; 373 + if (need_init) 374 + tasklet_init(&nq->worker, bnxt_qplib_service_nq, 375 + (unsigned long)nq); 376 + else 377 + tasklet_enable(&nq->worker); 378 + 379 + snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx); 380 + rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); 381 + if (rc) 382 + return rc; 383 + 384 + cpumask_clear(&nq->mask); 385 + cpumask_set_cpu(nq_indx, &nq->mask); 386 + rc = irq_set_affinity_hint(nq->vector, &nq->mask); 387 + if (rc) { 388 + dev_warn(&nq->pdev->dev, 389 + "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", 390 + nq->vector, nq_indx); 391 + } 392 + nq->requested = true; 393 + NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 394 + 395 + return rc; 372 396 } 373 397 374 398 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, ··· 416 372 resource_size_t nq_base; 417 373 int rc = -1; 418 374 419 - nq->pdev = pdev; 420 - nq->vector = msix_vector; 421 375 if (cqn_handler) 422 376 nq->cqn_handler = cqn_handler; 423 377 424 378 if (srqn_handler) 425 379 nq->srqn_handler = srqn_handler; 426 380 427 - tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq); 428 - 429 381 /* Have a task to schedule CQ notifiers in post send case */ 430 382 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); 431 383 if (!nq->cqn_wq) 432 - goto fail; 384 + return -ENOMEM; 433 385 434 - nq->requested = false; 435 - memset(nq->name, 0, 32); 436 - sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx); 437 - rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); 438 - if (rc) { 439 - dev_err(&nq->pdev->dev, 440 - "Failed to request IRQ for NQ: %#x", rc); 441 - goto fail; 442 - } 443 - 444 - cpumask_clear(&nq->mask); 445 - cpumask_set_cpu(nq_idx, &nq->mask); 446 - rc = irq_set_affinity_hint(nq->vector, &nq->mask); 447 - if (rc) { 448 - dev_warn(&nq->pdev->dev, 449 - "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", 450 - nq->vector, nq_idx); 451 - } 452 - 453 - nq->requested = true; 454 386 nq->bar_reg = NQ_CONS_PCI_BAR_REGION; 455 387 nq->bar_reg_off = bar_reg_offset; 456 388 nq_base = pci_resource_start(pdev, nq->bar_reg); ··· 439 419 rc = -ENOMEM; 440 420 goto fail; 441 421 } 442 - NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 422 + 423 + rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); 424 + if (rc) { 425 + dev_err(&nq->pdev->dev, 426 + "QPLIB: Failed to request irq for nq-idx %d", nq_idx); 427 + goto fail; 428 + } 443 429 444 430 return 0; 445 431 fail:
+3
drivers/infiniband/hw/bnxt_re/qplib_fp.h
··· 467 467 struct bnxt_qplib_cq *cq; 468 468 }; 469 469 470 + void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 470 471 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 472 + int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 473 + int msix_vector, bool need_init); 471 474 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 472 475 int nq_idx, int msix_vector, int bar_reg_offset, 473 476 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+43 -18
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 582 582 return -ENOMEM; 583 583 } 584 584 585 - void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 585 + void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) 586 586 { 587 - unsigned long indx; 588 - 589 - /* Make sure the HW channel is stopped! */ 590 - synchronize_irq(rcfw->vector); 591 587 tasklet_disable(&rcfw->worker); 592 - tasklet_kill(&rcfw->worker); 588 + /* Mask h/w interrupts */ 589 + CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 590 + rcfw->creq.max_elements); 591 + /* Sync with last running IRQ-handler */ 592 + synchronize_irq(rcfw->vector); 593 + if (kill) 594 + tasklet_kill(&rcfw->worker); 593 595 594 596 if (rcfw->requested) { 595 597 free_irq(rcfw->vector, rcfw); 596 598 rcfw->requested = false; 597 599 } 600 + } 601 + 602 + void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 603 + { 604 + unsigned long indx; 605 + 606 + bnxt_qplib_rcfw_stop_irq(rcfw, true); 607 + 598 608 if (rcfw->cmdq_bar_reg_iomem) 599 609 iounmap(rcfw->cmdq_bar_reg_iomem); 600 610 rcfw->cmdq_bar_reg_iomem = NULL; ··· 622 612 623 613 rcfw->aeq_handler = NULL; 624 614 rcfw->vector = 0; 615 + } 616 + 617 + int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 618 + bool need_init) 619 + { 620 + int rc; 621 + 622 + if (rcfw->requested) 623 + return -EFAULT; 624 + 625 + rcfw->vector = msix_vector; 626 + if (need_init) 627 + tasklet_init(&rcfw->worker, 628 + bnxt_qplib_service_creq, (unsigned long)rcfw); 629 + else 630 + tasklet_enable(&rcfw->worker); 631 + rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 632 + "bnxt_qplib_creq", rcfw); 633 + if (rc) 634 + return rc; 635 + rcfw->requested = true; 636 + CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 637 + rcfw->creq.max_elements); 638 + 639 + return 0; 625 640 } 626 641 627 642 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, ··· 710 675 rcfw->creq_qp_event_processed = 0; 711 676 rcfw->creq_func_event_processed = 0; 712 677 713 - rcfw->vector = msix_vector; 714 678 if (aeq_handler) 715 679 rcfw->aeq_handler = aeq_handler; 680 + init_waitqueue_head(&rcfw->waitq); 716 681 717 - tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, 718 - (unsigned long)rcfw); 719 - 720 - rcfw->requested = false; 721 - rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 722 - "bnxt_qplib_creq", rcfw); 682 + rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); 723 683 if (rc) { 724 684 dev_err(&rcfw->pdev->dev, 725 685 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); 726 686 bnxt_qplib_disable_rcfw_channel(rcfw); 727 687 return rc; 728 688 } 729 - rcfw->requested = true; 730 - 731 - init_waitqueue_head(&rcfw->waitq); 732 - 733 - CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); 734 689 735 690 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 736 691 init.cmdq_size_cmdq_lvl = cpu_to_le16(
+3
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
··· 195 195 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 196 196 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 197 197 struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz); 198 + void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); 198 199 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 200 + int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 201 + bool need_init); 199 202 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 200 203 struct bnxt_qplib_rcfw *rcfw, 201 204 int msix_vector,
+1 -1
drivers/infiniband/ulp/srpt/Kconfig
··· 1 1 config INFINIBAND_SRPT 2 2 tristate "InfiniBand SCSI RDMA Protocol target support" 3 - depends on INFINIBAND_ADDR_TRANS && TARGET_CORE 3 + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE 4 4 ---help--- 5 5 6 6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
+11 -11
drivers/input/mouse/elan_i2c_smbus.c
··· 130 130 bool max_baseline, u8 *value) 131 131 { 132 132 int error; 133 - u8 val[3]; 133 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 134 134 135 135 error = i2c_smbus_read_block_data(client, 136 136 max_baseline ? ··· 149 149 bool iap, u8 *version) 150 150 { 151 151 int error; 152 - u8 val[3]; 152 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 153 153 154 154 error = i2c_smbus_read_block_data(client, 155 155 iap ? ETP_SMBUS_IAP_VERSION_CMD : ··· 170 170 u8 *clickpad) 171 171 { 172 172 int error; 173 - u8 val[3]; 173 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 174 174 175 175 error = i2c_smbus_read_block_data(client, 176 176 ETP_SMBUS_SM_VERSION_CMD, val); ··· 188 188 static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) 189 189 { 190 190 int error; 191 - u8 val[3]; 191 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 192 192 193 193 error = i2c_smbus_read_block_data(client, 194 194 ETP_SMBUS_UNIQUEID_CMD, val); ··· 205 205 bool iap, u16 *csum) 206 206 { 207 207 int error; 208 - u8 val[3]; 208 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 209 209 210 210 error = i2c_smbus_read_block_data(client, 211 211 iap ? ETP_SMBUS_FW_CHECKSUM_CMD : ··· 226 226 { 227 227 int ret; 228 228 int error; 229 - u8 val[3]; 229 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 230 230 231 231 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); 232 232 if (ret != 3) { ··· 246 246 { 247 247 int ret; 248 248 int error; 249 - u8 val[3]; 249 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 250 250 251 251 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); 252 252 if (ret != 3) { ··· 267 267 { 268 268 int ret; 269 269 int error; 270 - u8 val[3]; 270 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 271 271 272 272 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); 273 273 if (ret != 3) { ··· 294 294 { 295 295 int error; 296 296 u16 constant; 297 - u8 val[3]; 297 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 298 298 299 299 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); 300 300 if (error < 0) { ··· 345 345 int len; 346 346 int error; 347 347 enum tp_mode mode; 348 - u8 val[3]; 348 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 349 349 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; 350 350 u16 password; 351 351 ··· 419 419 struct device *dev = &client->dev; 420 420 int error; 421 421 u16 result; 422 - u8 val[3]; 422 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; 423 423 424 424 /* 425 425 * Due to the limitation of smbus protocol limiting
+6
drivers/input/mouse/synaptics.c
··· 172 172 "LEN0048", /* X1 Carbon 3 */ 173 173 "LEN0046", /* X250 */ 174 174 "LEN004a", /* W541 */ 175 + "LEN0071", /* T480 */ 176 + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ 177 + "LEN0073", /* X1 Carbon G5 (Elantech) */ 178 + "LEN0092", /* X1 Carbon 6 */ 179 + "LEN0096", /* X280 */ 180 + "LEN0097", /* X280 -> ALPS trackpoint */ 175 181 "LEN200f", /* T450s */ 176 182 NULL 177 183 };
+13
drivers/net/dsa/b53/b53_common.c
··· 1712 1712 .duplex_reg = B53_DUPLEX_STAT_FE, 1713 1713 }, 1714 1714 { 1715 + .chip_id = BCM5389_DEVICE_ID, 1716 + .dev_name = "BCM5389", 1717 + .vlans = 4096, 1718 + .enabled_ports = 0x1f, 1719 + .arl_entries = 4, 1720 + .cpu_port = B53_CPU_PORT, 1721 + .vta_regs = B53_VTA_REGS, 1722 + .duplex_reg = B53_DUPLEX_STAT_GE, 1723 + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1724 + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1725 + }, 1726 + { 1715 1727 .chip_id = BCM5395_DEVICE_ID, 1716 1728 .dev_name = "BCM5395", 1717 1729 .vlans = 4096, ··· 2046 2034 else 2047 2035 dev->chip_id = BCM5365_DEVICE_ID; 2048 2036 break; 2037 + case BCM5389_DEVICE_ID: 2049 2038 case BCM5395_DEVICE_ID: 2050 2039 case BCM5397_DEVICE_ID: 2051 2040 case BCM5398_DEVICE_ID:
+4 -1
drivers/net/dsa/b53/b53_mdio.c
··· 285 285 #define B53_BRCM_OUI_1 0x0143bc00 286 286 #define B53_BRCM_OUI_2 0x03625c00 287 287 #define B53_BRCM_OUI_3 0x00406000 288 + #define B53_BRCM_OUI_4 0x01410c00 288 289 289 290 static int b53_mdio_probe(struct mdio_device *mdiodev) 290 291 { ··· 312 311 */ 313 312 if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && 314 313 (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && 315 - (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { 314 + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 && 315 + (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) { 316 316 dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); 317 317 return -ENODEV; 318 318 } ··· 362 360 { .compatible = "brcm,bcm53125" }, 363 361 { .compatible = "brcm,bcm53128" }, 364 362 { .compatible = "brcm,bcm5365" }, 363 + { .compatible = "brcm,bcm5389" }, 365 364 { .compatible = "brcm,bcm5395" }, 366 365 { .compatible = "brcm,bcm5397" }, 367 366 { .compatible = "brcm,bcm5398" },
+1
drivers/net/dsa/b53/b53_priv.h
··· 48 48 enum { 49 49 BCM5325_DEVICE_ID = 0x25, 50 50 BCM5365_DEVICE_ID = 0x65, 51 + BCM5389_DEVICE_ID = 0x89, 51 52 BCM5395_DEVICE_ID = 0x95, 52 53 BCM5397_DEVICE_ID = 0x97, 53 54 BCM5398_DEVICE_ID = 0x98,
+3 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 3309 3309 if ((val & POST_STAGE_FAT_LOG_START) 3310 3310 != POST_STAGE_FAT_LOG_START && 3311 3311 (val & POST_STAGE_ARMFW_UE) 3312 - != POST_STAGE_ARMFW_UE) 3312 + != POST_STAGE_ARMFW_UE && 3313 + (val & POST_STAGE_RECOVERABLE_ERR) 3314 + != POST_STAGE_RECOVERABLE_ERR) 3313 3315 return; 3314 3316 } 3315 3317
+4 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 9054 9054 { 9055 9055 const struct tc_action *a; 9056 9056 LIST_HEAD(actions); 9057 - int err; 9058 9057 9059 9058 if (!tcf_exts_has_actions(exts)) 9060 9059 return -EINVAL; ··· 9074 9075 9075 9076 if (!dev) 9076 9077 return -EINVAL; 9077 - err = handle_redirect_action(adapter, dev->ifindex, queue, 9078 - action); 9079 - if (err == 0) 9080 - return err; 9078 + return handle_redirect_action(adapter, dev->ifindex, 9079 + queue, action); 9081 9080 } 9081 + 9082 + return -EINVAL; 9082 9083 } 9083 9084 9084 9085 return -EINVAL;
+5
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 4433 4433 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4434 4434 return -EINVAL; 4435 4435 } 4436 + if (is_vlan_dev(upper_dev) && 4437 + vlan_dev_vlan_id(upper_dev) == 1) { 4438 + NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4439 + return -EINVAL; 4440 + } 4436 4441 break; 4437 4442 case NETDEV_CHANGEUPPER: 4438 4443 upper_dev = info->upper_dev;
+1 -1
drivers/net/ethernet/natsemi/sonic.c
··· 84 84 for (i = 0; i < SONIC_NUM_RRS; i++) { 85 85 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), 86 86 SONIC_RBSIZE, DMA_FROM_DEVICE); 87 - if (!laddr) { 87 + if (dma_mapping_error(lp->device, laddr)) { 88 88 while(i > 0) { /* free any that were mapped successfully */ 89 89 i--; 90 90 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+2 -2
drivers/net/ethernet/socionext/netsec.c
··· 1674 1674 if (ret) 1675 1675 goto unreg_napi; 1676 1676 1677 - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) 1678 - dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); 1677 + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) 1678 + dev_warn(&pdev->dev, "Failed to set DMA mask\n"); 1679 1679 1680 1680 ret = register_netdev(ndev); 1681 1681 if (ret) {
+12 -10
drivers/net/ethernet/ti/davinci_emac.c
··· 1873 1873 if (IS_ERR(priv->txchan)) { 1874 1874 dev_err(&pdev->dev, "error initializing tx dma channel\n"); 1875 1875 rc = PTR_ERR(priv->txchan); 1876 - goto no_cpdma_chan; 1876 + goto err_free_dma; 1877 1877 } 1878 1878 1879 1879 priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, ··· 1881 1881 if (IS_ERR(priv->rxchan)) { 1882 1882 dev_err(&pdev->dev, "error initializing rx dma channel\n"); 1883 1883 rc = PTR_ERR(priv->rxchan); 1884 - goto no_cpdma_chan; 1884 + goto err_free_txchan; 1885 1885 } 1886 1886 1887 1887 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1888 1888 if (!res) { 1889 1889 dev_err(&pdev->dev, "error getting irq res\n"); 1890 1890 rc = -ENOENT; 1891 - goto no_cpdma_chan; 1891 + goto err_free_rxchan; 1892 1892 } 1893 1893 ndev->irq = res->start; 1894 1894 ··· 1914 1914 pm_runtime_put_noidle(&pdev->dev); 1915 1915 dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", 1916 1916 __func__, rc); 1917 - goto no_cpdma_chan; 1917 + goto err_napi_del; 1918 1918 } 1919 1919 1920 1920 /* register the network device */ ··· 1924 1924 dev_err(&pdev->dev, "error in register_netdev\n"); 1925 1925 rc = -ENODEV; 1926 1926 pm_runtime_put(&pdev->dev); 1927 - goto no_cpdma_chan; 1927 + goto err_napi_del; 1928 1928 } 1929 1929 1930 1930 ··· 1937 1937 1938 1938 return 0; 1939 1939 1940 - no_cpdma_chan: 1941 - if (priv->txchan) 1942 - cpdma_chan_destroy(priv->txchan); 1943 - if (priv->rxchan) 1944 - cpdma_chan_destroy(priv->rxchan); 1940 + err_napi_del: 1941 + netif_napi_del(&priv->napi); 1942 + err_free_rxchan: 1943 + cpdma_chan_destroy(priv->rxchan); 1944 + err_free_txchan: 1945 + cpdma_chan_destroy(priv->txchan); 1946 + err_free_dma: 1945 1947 cpdma_ctlr_destroy(priv->dma); 1946 1948 no_pdata: 1947 1949 if (of_phy_is_fixed_link(np))
+9 -6
drivers/net/tun.c
··· 1650 1650 else 1651 1651 *skb_xdp = 0; 1652 1652 1653 - preempt_disable(); 1653 + local_bh_disable(); 1654 1654 rcu_read_lock(); 1655 1655 xdp_prog = rcu_dereference(tun->xdp_prog); 1656 1656 if (xdp_prog && !*skb_xdp) { ··· 1675 1675 if (err) 1676 1676 goto err_redirect; 1677 1677 rcu_read_unlock(); 1678 - preempt_enable(); 1678 + local_bh_enable(); 1679 1679 return NULL; 1680 1680 case XDP_TX: 1681 1681 get_page(alloc_frag->page); ··· 1684 1684 goto err_redirect; 1685 1685 tun_xdp_flush(tun->dev); 1686 1686 rcu_read_unlock(); 1687 - preempt_enable(); 1687 + local_bh_enable(); 1688 1688 return NULL; 1689 1689 case XDP_PASS: 1690 1690 delta = orig_data - xdp.data; ··· 1703 1703 skb = build_skb(buf, buflen); 1704 1704 if (!skb) { 1705 1705 rcu_read_unlock(); 1706 - preempt_enable(); 1706 + local_bh_enable(); 1707 1707 return ERR_PTR(-ENOMEM); 1708 1708 } 1709 1709 ··· 1713 1713 alloc_frag->offset += buflen; 1714 1714 1715 1715 rcu_read_unlock(); 1716 - preempt_enable(); 1716 + local_bh_enable(); 1717 1717 1718 1718 return skb; 1719 1719 ··· 1721 1721 put_page(alloc_frag->page); 1722 1722 err_xdp: 1723 1723 rcu_read_unlock(); 1724 - preempt_enable(); 1724 + local_bh_enable(); 1725 1725 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1726 1726 return NULL; 1727 1727 } ··· 1917 1917 struct bpf_prog *xdp_prog; 1918 1918 int ret; 1919 1919 1920 + local_bh_disable(); 1920 1921 rcu_read_lock(); 1921 1922 xdp_prog = rcu_dereference(tun->xdp_prog); 1922 1923 if (xdp_prog) { 1923 1924 ret = do_xdp_generic(xdp_prog, skb); 1924 1925 if (ret != XDP_PASS) { 1925 1926 rcu_read_unlock(); 1927 + local_bh_enable(); 1926 1928 return total_len; 1927 1929 } 1928 1930 } 1929 1931 rcu_read_unlock(); 1932 + local_bh_enable(); 1930 1933 } 1931 1934 1932 1935 rcu_read_lock();
+1 -1
drivers/net/usb/cdc_mbim.c
··· 609 609 */ 610 610 static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { 611 611 .description = "CDC MBIM", 612 - .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 612 + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, 613 613 .bind = cdc_mbim_bind, 614 614 .unbind = cdc_mbim_unbind, 615 615 .manage_power = cdc_mbim_manage_power,
+1
drivers/net/usb/qmi_wwan.c
··· 1103 1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1104 1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ 1105 1105 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1106 + {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ 1106 1107 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1107 1108 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1108 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
+5 -5
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 1590 1590 struct iwl_trans *trans) 1591 1591 { 1592 1592 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1593 - int max_irqs, num_irqs, i, ret, nr_online_cpus; 1593 + int max_irqs, num_irqs, i, ret; 1594 1594 u16 pci_cmd; 1595 1595 1596 1596 if (!trans->cfg->mq_rx_supported) 1597 1597 goto enable_msi; 1598 1598 1599 - nr_online_cpus = num_online_cpus(); 1600 - max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); 1599 + max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); 1601 1600 for (i = 0; i < max_irqs; i++) 1602 1601 trans_pcie->msix_entries[i].entry = i; 1603 1602 ··· 1622 1623 * Two interrupts less: non rx causes shared with FBQ and RSS. 1623 1624 * More than two interrupts: we will use fewer RSS queues. 1624 1625 */ 1625 - if (num_irqs <= nr_online_cpus) { 1626 + if (num_irqs <= max_irqs - 2) { 1626 1627 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1627 1628 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1628 1629 IWL_SHARED_IRQ_FIRST_RSS; 1629 - } else if (num_irqs == nr_online_cpus + 1) { 1630 + } else if (num_irqs == max_irqs - 1) { 1630 1631 trans_pcie->trans->num_rx_queues = num_irqs; 1631 1632 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1632 1633 } else { 1633 1634 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1634 1635 } 1636 + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1635 1637 1636 1638 trans_pcie->alloc_vecs = num_irqs; 1637 1639 trans_pcie->msix_enabled = true;
+3 -4
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
··· 372 372 373 373 /* 374 374 * Determine IFS values 375 - * - Use TXOP_BACKOFF for probe and management frames except beacons 375 + * - Use TXOP_BACKOFF for management frames except beacons 376 376 * - Use TXOP_SIFS for fragment bursts 377 377 * - Use TXOP_HTTXOP for everything else 378 378 * 379 379 * Note: rt2800 devices won't use CTS protection (if used) 380 380 * for frames not transmitted with TXOP_HTTXOP 381 381 */ 382 - if ((ieee80211_is_mgmt(hdr->frame_control) && 383 - !ieee80211_is_beacon(hdr->frame_control)) || 384 - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 382 + if (ieee80211_is_mgmt(hdr->frame_control) && 383 + !ieee80211_is_beacon(hdr->frame_control)) 385 384 txdesc->u.ht.txop = TXOP_BACKOFF; 386 385 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 387 386 txdesc->u.ht.txop = TXOP_SIFS;
+1 -1
drivers/nvme/host/Kconfig
··· 27 27 28 28 config NVME_RDMA 29 29 tristate "NVM Express over Fabrics RDMA host driver" 30 - depends on INFINIBAND_ADDR_TRANS && BLOCK 30 + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK 31 31 select NVME_CORE 32 32 select NVME_FABRICS 33 33 select SG_POOL
+1 -1
drivers/nvme/host/core.c
··· 1447 1447 if (ns->lba_shift == 0) 1448 1448 ns->lba_shift = 9; 1449 1449 ns->noiob = le16_to_cpu(id->noiob); 1450 - ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1451 1450 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1451 + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1452 1452 /* the PI implementation requires metadata equal t10 pi tuple size */ 1453 1453 if (ns->ms == sizeof(struct t10_pi_tuple)) 1454 1454 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+1 -1
drivers/nvme/target/Kconfig
··· 27 27 28 28 config NVME_TARGET_RDMA 29 29 tristate "NVMe over Fabrics RDMA target support" 30 - depends on INFINIBAND_ADDR_TRANS 30 + depends on INFINIBAND && INFINIBAND_ADDR_TRANS 31 31 depends on NVME_TARGET 32 32 select SGL_ALLOC 33 33 help
+13 -10
drivers/platform/x86/asus-wmi.c
··· 163 163 164 164 static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; 165 165 166 + static bool ashs_present(void) 167 + { 168 + int i = 0; 169 + while (ashs_ids[i]) { 170 + if (acpi_dev_found(ashs_ids[i++])) 171 + return true; 172 + } 173 + return false; 174 + } 175 + 166 176 struct bios_args { 167 177 u32 arg0; 168 178 u32 arg1; ··· 1035 1025 1036 1026 static void asus_wmi_rfkill_exit(struct asus_wmi *asus) 1037 1027 { 1028 + if (asus->driver->wlan_ctrl_by_user && ashs_present()) 1029 + return; 1030 + 1038 1031 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); 1039 1032 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); 1040 1033 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); ··· 2132 2119 2133 2120 pr_info("Number of fans: %d\n", asus->asus_hwmon_num_fans); 2134 2121 return 0; 2135 - } 2136 - 2137 - static bool ashs_present(void) 2138 - { 2139 - int i = 0; 2140 - while (ashs_ids[i]) { 2141 - if (acpi_dev_found(ashs_ids[i++])) 2142 - return true; 2143 - } 2144 - return false; 2145 2122 } 2146 2123 2147 2124 /*
+5 -2
drivers/s390/block/dasd.c
··· 3034 3034 cqr->callback_data = req; 3035 3035 cqr->status = DASD_CQR_FILLED; 3036 3036 cqr->dq = dq; 3037 - req->completion_data = cqr; 3037 + *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; 3038 + 3038 3039 blk_mq_start_request(req); 3039 3040 spin_lock(&block->queue_lock); 3040 3041 list_add_tail(&cqr->blocklist, &block->ccw_queue); ··· 3059 3058 */ 3060 3059 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3061 3060 { 3062 - struct dasd_ccw_req *cqr = req->completion_data; 3063 3061 struct dasd_block *block = req->q->queuedata; 3064 3062 struct dasd_device *device; 3063 + struct dasd_ccw_req *cqr; 3065 3064 unsigned long flags; 3066 3065 int rc = 0; 3067 3066 3067 + cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); 3068 3068 if (!cqr) 3069 3069 return BLK_EH_NOT_HANDLED; 3070 3070 ··· 3171 3169 int rc; 3172 3170 3173 3171 block->tag_set.ops = &dasd_mq_ops; 3172 + block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); 3174 3173 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3175 3174 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3176 3175 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+20 -2
drivers/scsi/scsi_transport_srp.c
··· 51 51 struct transport_container rport_attr_cont; 52 52 }; 53 53 54 + static int scsi_is_srp_rport(const struct device *dev); 55 + 54 56 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) 55 57 56 58 #define dev_to_rport(d) container_of(d, struct srp_rport, dev) ··· 62 60 return dev_to_shost(r->dev.parent); 63 61 } 64 62 63 + static int find_child_rport(struct device *dev, void *data) 64 + { 65 + struct device **child = data; 66 + 67 + if (scsi_is_srp_rport(dev)) { 68 + WARN_ON_ONCE(*child); 69 + *child = dev; 70 + } 71 + return 0; 72 + } 73 + 65 74 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) 66 75 { 67 - return transport_class_to_srp_rport(&shost->shost_gendev); 76 + struct device *child = NULL; 77 + 78 + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, 79 + find_child_rport) < 0); 80 + return child ? dev_to_rport(child) : NULL; 68 81 } 69 82 70 83 /** ··· 617 600 struct srp_rport *rport = shost_to_rport(shost); 618 601 619 602 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); 620 - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && 603 + return rport && rport->fast_io_fail_tmo < 0 && 604 + rport->dev_loss_tmo < 0 && 621 605 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? 622 606 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 623 607 }
-36
drivers/soc/lantiq/gphy.c
··· 30 30 struct clk *gphy_clk_gate; 31 31 struct reset_control *gphy_reset; 32 32 struct reset_control *gphy_reset2; 33 - struct notifier_block gphy_reboot_nb; 34 33 void __iomem *membase; 35 34 char *fw_name; 36 35 }; ··· 62 63 {}, 63 64 }; 64 65 MODULE_DEVICE_TABLE(of, xway_gphy_match); 65 - 66 - static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) 67 - { 68 - return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); 69 - } 70 - 71 - static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, 72 - unsigned long code, void *unused) 73 - { 74 - struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); 75 - 76 - if (priv) { 77 - reset_control_assert(priv->gphy_reset); 78 - reset_control_assert(priv->gphy_reset2); 79 - } 80 - 81 - return NOTIFY_DONE; 82 - } 83 66 84 67 static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, 85 68 dma_addr_t *dev_addr) ··· 186 205 reset_control_deassert(priv->gphy_reset); 187 206 reset_control_deassert(priv->gphy_reset2); 188 207 189 - /* assert the gphy reset because it can hang after a reboot: */ 190 - priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; 191 - priv->gphy_reboot_nb.priority = -1; 192 - 193 - ret = register_reboot_notifier(&priv->gphy_reboot_nb); 194 - if (ret) 195 - dev_warn(dev, "Failed to register reboot notifier\n"); 196 - 197 208 platform_set_drvdata(pdev, priv); 198 209 199 210 return ret; ··· 193 220 194 221 static int xway_gphy_remove(struct platform_device *pdev) 195 222 { 196 - struct device *dev = &pdev->dev; 197 223 struct xway_gphy_priv *priv = platform_get_drvdata(pdev); 198 - int ret; 199 - 200 - reset_control_assert(priv->gphy_reset); 201 - reset_control_assert(priv->gphy_reset2); 202 224 203 225 iowrite32be(0, priv->membase); 204 226 205 227 clk_disable_unprepare(priv->gphy_clk_gate); 206 - 207 - ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); 208 - if (ret) 209 - dev_warn(dev, "Failed to unregister reboot notifier\n"); 210 228 211 229 return 0; 212 230 }
+1 -1
drivers/staging/lustre/lnet/Kconfig
··· 34 34 35 35 config LNET_XPRT_IB 36 36 tristate "LNET infiniband support" 37 - depends on LNET && PCI && INFINIBAND_ADDR_TRANS 37 + depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS 38 38 default LNET && INFINIBAND 39 39 help 40 40 This option allows the LNET users to use infiniband as an
+1 -1
drivers/thunderbolt/icm.c
··· 1255 1255 /* Map empty entries to null UUID */ 1256 1256 uuid[0] = 0; 1257 1257 uuid[1] = 0; 1258 - } else { 1258 + } else if (uuid[0] != 0 || uuid[1] != 0) { 1259 1259 /* Upper two DWs are always one's */ 1260 1260 uuid[2] = 0xffffffff; 1261 1261 uuid[3] = 0xffffffff;
+10 -15
drivers/vfio/vfio_iommu_type1.c
··· 404 404 { 405 405 unsigned long pfn = 0; 406 406 long ret, pinned = 0, lock_acct = 0; 407 + bool rsvd; 407 408 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 408 409 409 410 /* This code path is only user initiated */ ··· 415 414 if (ret) 416 415 return ret; 417 416 418 - if (is_invalid_reserved_pfn(*pfn_base)) { 419 - struct vm_area_struct *vma; 420 - 421 - down_read(&current->mm->mmap_sem); 422 - vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); 423 - pinned = min_t(long, npage, vma_pages(vma)); 424 - up_read(&current->mm->mmap_sem); 425 - return pinned; 426 - } 427 - 428 417 pinned++; 418 + rsvd = is_invalid_reserved_pfn(*pfn_base); 429 419 430 420 /* 431 421 * Reserved pages aren't counted against the user, externally pinned 432 422 * pages are already counted against the user. 433 423 */ 434 - if (!vfio_find_vpfn(dma, iova)) { 424 + if (!rsvd && !vfio_find_vpfn(dma, iova)) { 435 425 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 436 426 put_pfn(*pfn_base, dma->prot); 437 427 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, ··· 442 450 if (ret) 443 451 break; 444 452 445 - if (pfn != *pfn_base + pinned) { 453 + if (pfn != *pfn_base + pinned || 454 + rsvd != is_invalid_reserved_pfn(pfn)) { 446 455 put_pfn(pfn, dma->prot); 447 456 break; 448 457 } 449 458 450 - if (!vfio_find_vpfn(dma, iova)) { 459 + if (!rsvd && !vfio_find_vpfn(dma, iova)) { 451 460 if (!lock_cap && 452 461 current->mm->locked_vm + lock_acct + 1 > limit) { 453 462 put_pfn(pfn, dma->prot); ··· 466 473 467 474 unpin_out: 468 475 if (ret) { 469 - for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 470 - put_pfn(pfn, dma->prot); 476 + if (!rsvd) { 477 + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 478 + put_pfn(pfn, dma->prot); 479 + } 471 480 472 481 return ret; 473 482 }
+24 -13
drivers/vhost/net.c
··· 105 105 /* vhost zerocopy support fields below: */ 106 106 /* last used idx for outstanding DMA zerocopy buffers */ 107 107 int upend_idx; 108 - /* first used idx for DMA done zerocopy buffers */ 108 + /* For TX, first used idx for DMA done zerocopy buffers 109 + * For RX, number of batched heads 110 + */ 109 111 int done_idx; 110 112 /* an array of userspace buffers info */ 111 113 struct ubuf_info *ubuf_info; ··· 628 626 return skb_queue_empty(&sk->sk_receive_queue); 629 627 } 630 628 629 + static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) 630 + { 631 + struct vhost_virtqueue *vq = &nvq->vq; 632 + struct vhost_dev *dev = vq->dev; 633 + 634 + if (!nvq->done_idx) 635 + return; 636 + 637 + vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); 638 + nvq->done_idx = 0; 639 + } 640 + 631 641 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) 632 642 { 633 643 struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; ··· 649 635 int len = peek_head_len(rvq, sk); 650 636 651 637 if (!len && vq->busyloop_timeout) { 638 + /* Flush batched heads first */ 639 + vhost_rx_signal_used(rvq); 652 640 /* Both tx vq and rx socket were polled here */ 653 641 mutex_lock_nested(&vq->mutex, 1); 654 642 vhost_disable_notify(&net->dev, vq); ··· 778 762 }; 779 763 size_t total_len = 0; 780 764 int err, mergeable; 781 - s16 headcount, nheads = 0; 765 + s16 headcount; 782 766 size_t vhost_hlen, sock_hlen; 783 767 size_t vhost_len, sock_len; 784 768 struct socket *sock; ··· 806 790 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { 807 791 sock_len += sock_hlen; 808 792 vhost_len = sock_len + vhost_hlen; 809 - headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, 810 - &in, vq_log, &log, 793 + headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 794 + vhost_len, &in, vq_log, &log, 811 795 likely(mergeable) ? UIO_MAXIOV : 1); 812 796 /* On error, stop handling until the next kick. */ 813 797 if (unlikely(headcount < 0)) ··· 878 862 vhost_discard_vq_desc(vq, headcount); 879 863 goto out; 880 864 } 881 - nheads += headcount; 882 - if (nheads > VHOST_RX_BATCH) { 883 - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 884 - nheads); 885 - nheads = 0; 886 - } 865 + nvq->done_idx += headcount; 866 + if (nvq->done_idx > VHOST_RX_BATCH) 867 + vhost_rx_signal_used(nvq); 887 868 if (unlikely(vq_log)) 888 869 vhost_log_write(vq, vq_log, log, vhost_len); 889 870 total_len += vhost_len; ··· 891 878 } 892 879 vhost_net_enable_vq(net, vq); 893 880 out: 894 - if (nheads) 895 - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 896 - nheads); 881 + vhost_rx_signal_used(nvq); 897 882 mutex_unlock(&vq->mutex); 898 883 } 899 884
+3 -7
fs/afs/security.c
··· 372 372 mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); 373 373 374 374 if (S_ISDIR(inode->i_mode)) { 375 - if (mask & MAY_EXEC) { 375 + if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) { 376 376 if (!(access & AFS_ACE_LOOKUP)) 377 377 goto permission_denied; 378 - } else if (mask & MAY_READ) { 379 - if (!(access & AFS_ACE_LOOKUP)) 380 - goto permission_denied; 381 - } else if (mask & MAY_WRITE) { 378 + } 379 + if (mask & MAY_WRITE) { 382 380 if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */ 383 381 AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */ 384 382 goto permission_denied; 385 - } else { 386 - BUG(); 387 383 } 388 384 } else { 389 385 if (!(access & AFS_ACE_LOOKUP))
+10 -9
fs/afs/vlclient.c
··· 23 23 struct afs_uvldbentry__xdr *uvldb; 24 24 struct afs_vldb_entry *entry; 25 25 bool new_only = false; 26 - u32 tmp, nr_servers; 26 + u32 tmp, nr_servers, vlflags; 27 27 int i, ret; 28 28 29 29 _enter(""); ··· 55 55 new_only = true; 56 56 } 57 57 58 + vlflags = ntohl(uvldb->flags); 58 59 for (i = 0; i < nr_servers; i++) { 59 60 struct afs_uuid__xdr *xdr; 60 61 struct afs_uuid *uuid; ··· 65 64 if (tmp & AFS_VLSF_DONTUSE || 66 65 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 67 66 continue; 68 - if (tmp & AFS_VLSF_RWVOL) 67 + if (tmp & AFS_VLSF_RWVOL) { 69 68 entry->fs_mask[i] |= AFS_VOL_VTM_RW; 69 + if (vlflags & AFS_VLF_BACKEXISTS) 70 + entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 71 + } 70 72 if (tmp & AFS_VLSF_ROVOL) 71 73 entry->fs_mask[i] |= AFS_VOL_VTM_RO; 72 - if (tmp & AFS_VLSF_BACKVOL) 73 - entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 74 74 if (!entry->fs_mask[i]) 75 75 continue; 76 76 ··· 91 89 for (i = 0; i < AFS_MAXTYPES; i++) 92 90 entry->vid[i] = ntohl(uvldb->volumeId[i]); 93 91 94 - tmp = ntohl(uvldb->flags); 95 - if (tmp & AFS_VLF_RWEXISTS) 92 + if (vlflags & AFS_VLF_RWEXISTS) 96 93 __set_bit(AFS_VLDB_HAS_RW, &entry->flags); 97 - if (tmp & AFS_VLF_ROEXISTS) 94 + if (vlflags & AFS_VLF_ROEXISTS) 98 95 __set_bit(AFS_VLDB_HAS_RO, &entry->flags); 99 - if (tmp & AFS_VLF_BACKEXISTS) 96 + if (vlflags & AFS_VLF_BACKEXISTS) 100 97 __set_bit(AFS_VLDB_HAS_BAK, &entry->flags); 101 98 102 - if (!(tmp & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { 99 + if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { 103 100 entry->error = -ENOMEDIUM; 104 101 __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags); 105 102 }
+1 -1
fs/cifs/Kconfig
··· 197 197 198 198 config CIFS_SMB_DIRECT 199 199 bool "SMB Direct support (Experimental)" 200 - depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y 200 + depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y 201 201 help 202 202 Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. 203 203 SMB Direct allows transferring SMB packets over RDMA. If unsure,
+1
fs/inode.c
··· 178 178 mapping->a_ops = &empty_aops; 179 179 mapping->host = inode; 180 180 mapping->flags = 0; 181 + mapping->wb_err = 0; 181 182 atomic_set(&mapping->i_mmap_writable, 0); 182 183 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 183 184 mapping->private_data = NULL;
+1 -1
include/drm/bridge/dw_hdmi.h
··· 151 151 struct drm_encoder *encoder, 152 152 const struct dw_hdmi_plat_data *plat_data); 153 153 154 - void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense); 154 + void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); 155 155 156 156 void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 157 157 void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+3 -3
include/linux/iio/buffer_impl.h
··· 53 53 int (*request_update)(struct iio_buffer *buffer); 54 54 55 55 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); 56 - int (*set_length)(struct iio_buffer *buffer, int length); 56 + int (*set_length)(struct iio_buffer *buffer, unsigned int length); 57 57 58 58 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); 59 59 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); ··· 72 72 */ 73 73 struct iio_buffer { 74 74 /** @length: Number of datums in buffer. */ 75 - int length; 75 + unsigned int length; 76 76 77 77 /** @bytes_per_datum: Size of individual datum including timestamp. */ 78 - int bytes_per_datum; 78 + size_t bytes_per_datum; 79 79 80 80 /** 81 81 * @access: Buffer access functions associated with the
+2
include/uapi/linux/bpf.h
··· 1017 1017 __aligned_u64 map_ids; 1018 1018 char name[BPF_OBJ_NAME_LEN]; 1019 1019 __u32 ifindex; 1020 + __u32 :32; 1020 1021 __u64 netns_dev; 1021 1022 __u64 netns_ino; 1022 1023 } __attribute__((aligned(8))); ··· 1031 1030 __u32 map_flags; 1032 1031 char name[BPF_OBJ_NAME_LEN]; 1033 1032 __u32 ifindex; 1033 + __u32 :32; 1034 1034 __u64 netns_dev; 1035 1035 __u64 netns_ino; 1036 1036 } __attribute__((aligned(8)));
+31 -14
kernel/sched/core.c
··· 881 881 } 882 882 883 883 #ifdef CONFIG_SMP 884 + 885 + static inline bool is_per_cpu_kthread(struct task_struct *p) 886 + { 887 + if (!(p->flags & PF_KTHREAD)) 888 + return false; 889 + 890 + if (p->nr_cpus_allowed != 1) 891 + return false; 892 + 893 + return true; 894 + } 895 + 896 + /* 897 + * Per-CPU kthreads are allowed to run on !actie && online CPUs, see 898 + * __set_cpus_allowed_ptr() and select_fallback_rq(). 899 + */ 900 + static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 901 + { 902 + if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 903 + return false; 904 + 905 + if (is_per_cpu_kthread(p)) 906 + return cpu_online(cpu); 907 + 908 + return cpu_active(cpu); 909 + } 910 + 884 911 /* 885 912 * This is how migration works: 886 913 * ··· 965 938 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 966 939 struct task_struct *p, int dest_cpu) 967 940 { 968 - if (p->flags & PF_KTHREAD) { 969 - if (unlikely(!cpu_online(dest_cpu))) 970 - return rq; 971 - } else { 972 - if (unlikely(!cpu_active(dest_cpu))) 973 - return rq; 974 - } 975 - 976 941 /* Affinity changed (again). */ 977 - if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 942 + if (!is_cpu_allowed(p, dest_cpu)) 978 943 return rq; 979 944 980 945 update_rq_clock(rq); ··· 1495 1476 for (;;) { 1496 1477 /* Any allowed, online CPU? */ 1497 1478 for_each_cpu(dest_cpu, &p->cpus_allowed) { 1498 - if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) 1479 + if (!is_cpu_allowed(p, dest_cpu)) 1499 1480 continue; 1500 - if (!cpu_online(dest_cpu)) 1501 - continue; 1481 + 1502 1482 goto out; 1503 1483 } 1504 1484 ··· 1560 1542 * [ this allows ->select_task() to simply return task_cpu(p) and 1561 1543 * not worry about this generic constraint ] 1562 1544 */ 1563 - if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 1564 - !cpu_online(cpu))) 1545 + if (unlikely(!is_cpu_allowed(p, cpu))) 1565 1546 cpu = select_fallback_rq(task_cpu(p), p); 1566 1547 1567 1548 return cpu;
+3 -3
kernel/sched/deadline.c
··· 1259 1259 1260 1260 rq = task_rq_lock(p, &rf); 1261 1261 1262 + sched_clock_tick(); 1263 + update_rq_clock(rq); 1264 + 1262 1265 if (!dl_task(p) || p->state == TASK_DEAD) { 1263 1266 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1264 1267 ··· 1280 1277 } 1281 1278 if (dl_se->dl_non_contending == 0) 1282 1279 goto unlock; 1283 - 1284 - sched_clock_tick(); 1285 - update_rq_clock(rq); 1286 1280 1287 1281 sub_running_bw(dl_se, &rq->dl); 1288 1282 dl_se->dl_non_contending = 0;
+1 -1
kernel/sched/sched.h
··· 983 983 } 984 984 985 985 /* 986 - * See rt task throttoling, which is the only time a skip 986 + * See rt task throttling, which is the only time a skip 987 987 * request is cancelled. 988 988 */ 989 989 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
+6 -6
kernel/trace/trace.c
··· 893 893 EXPORT_SYMBOL_GPL(__trace_bputs); 894 894 895 895 #ifdef CONFIG_TRACER_SNAPSHOT 896 - static void tracing_snapshot_instance(struct trace_array *tr) 896 + void tracing_snapshot_instance(struct trace_array *tr) 897 897 { 898 898 struct tracer *tracer = tr->current_trace; 899 899 unsigned long flags; ··· 949 949 struct trace_buffer *size_buf, int cpu_id); 950 950 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 951 951 952 - static int alloc_snapshot(struct trace_array *tr) 952 + int tracing_alloc_snapshot_instance(struct trace_array *tr) 953 953 { 954 954 int ret; 955 955 ··· 995 995 struct trace_array *tr = &global_trace; 996 996 int ret; 997 997 998 - ret = alloc_snapshot(tr); 998 + ret = tracing_alloc_snapshot_instance(tr); 999 999 WARN_ON(ret < 0); 1000 1000 1001 1001 return ret; ··· 5408 5408 5409 5409 #ifdef CONFIG_TRACER_MAX_TRACE 5410 5410 if (t->use_max_tr && !had_max_tr) { 5411 - ret = alloc_snapshot(tr); 5411 + ret = tracing_alloc_snapshot_instance(tr); 5412 5412 if (ret < 0) 5413 5413 goto out; 5414 5414 } ··· 6451 6451 } 6452 6452 #endif 6453 6453 if (!tr->allocated_snapshot) { 6454 - ret = alloc_snapshot(tr); 6454 + ret = tracing_alloc_snapshot_instance(tr); 6455 6455 if (ret < 0) 6456 6456 break; 6457 6457 } ··· 7179 7179 return ret; 7180 7180 7181 7181 out_reg: 7182 - ret = alloc_snapshot(tr); 7182 + ret = tracing_alloc_snapshot_instance(tr); 7183 7183 if (ret < 0) 7184 7184 goto out; 7185 7185
+11
kernel/trace/trace.h
··· 1817 1817 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 1818 1818 #endif 1819 1819 1820 + #ifdef CONFIG_TRACER_SNAPSHOT 1821 + void tracing_snapshot_instance(struct trace_array *tr); 1822 + int tracing_alloc_snapshot_instance(struct trace_array *tr); 1823 + #else 1824 + static inline void tracing_snapshot_instance(struct trace_array *tr) { } 1825 + static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 1826 + { 1827 + return 0; 1828 + } 1829 + #endif 1830 + 1820 1831 extern struct trace_iterator *tracepoint_print_iter; 1821 1832 1822 1833 #endif /* _LINUX_KERNEL_TRACE_H */
+11 -4
kernel/trace/trace_events_trigger.c
··· 483 483 struct trace_event_file *file; 484 484 485 485 list_for_each_entry(file, &tr->events, list) { 486 - struct event_trigger_data *data; 487 - list_for_each_entry_rcu(data, &file->triggers, list) { 486 + struct event_trigger_data *data, *n; 487 + list_for_each_entry_safe(data, n, &file->triggers, list) { 488 488 trace_event_trigger_enable_disable(file, 0); 489 + list_del_rcu(&data->list); 489 490 if (data->ops->free) 490 491 data->ops->free(data->ops, data); 491 492 } ··· 643 642 trigger_data->count = -1; 644 643 trigger_data->ops = trigger_ops; 645 644 trigger_data->cmd_ops = cmd_ops; 645 + trigger_data->private_data = file; 646 646 INIT_LIST_HEAD(&trigger_data->list); 647 647 INIT_LIST_HEAD(&trigger_data->named_list); 648 648 ··· 1055 1053 snapshot_trigger(struct event_trigger_data *data, void *rec, 1056 1054 struct ring_buffer_event *event) 1057 1055 { 1058 - tracing_snapshot(); 1056 + struct trace_event_file *file = data->private_data; 1057 + 1058 + if (file) 1059 + tracing_snapshot_instance(file->tr); 1060 + else 1061 + tracing_snapshot(); 1059 1062 } 1060 1063 1061 1064 static void ··· 1083 1076 { 1084 1077 int ret = register_trigger(glob, ops, data, file); 1085 1078 1086 - if (ret > 0 && tracing_alloc_snapshot() != 0) { 1079 + if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { 1087 1080 unregister_trigger(glob, ops, data, file); 1088 1081 ret = 0; 1089 1082 }
+1 -1
mm/huge_memory.c
··· 2431 2431 __split_huge_page_tail(head, i, lruvec, list); 2432 2432 /* Some pages can be beyond i_size: drop them from page cache */ 2433 2433 if (head[i].index >= end) { 2434 - __ClearPageDirty(head + i); 2434 + ClearPageDirty(head + i); 2435 2435 __delete_from_page_cache(head + i, NULL); 2436 2436 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2437 2437 shmem_uncharge(head->mapping->host, 1);
+1 -1
mm/vmscan.c
··· 1418 1418 return ret; 1419 1419 1420 1420 mapping = page_mapping(page); 1421 - migrate_dirty = mapping && mapping->a_ops->migratepage; 1421 + migrate_dirty = !mapping || mapping->a_ops->migratepage; 1422 1422 unlock_page(page); 1423 1423 if (!migrate_dirty) 1424 1424 return ret;
+1 -1
net/9p/Kconfig
··· 32 32 33 33 34 34 config NET_9P_RDMA 35 - depends on INET && INFINIBAND_ADDR_TRANS 35 + depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS 36 36 tristate "9P RDMA Transport (Experimental)" 37 37 help 38 38 This builds support for an RDMA transport.
+2 -1
net/bridge/netfilter/ebtables.c
··· 1954 1954 int off, pad = 0; 1955 1955 unsigned int size_kern, match_size = mwt->match_size; 1956 1956 1957 - strlcpy(name, mwt->u.name, sizeof(name)); 1957 + if (strscpy(name, mwt->u.name, sizeof(name)) < 0) 1958 + return -EINVAL; 1958 1959 1959 1960 if (state->buf_kern_start) 1960 1961 dst = state->buf_kern_start + state->buf_kern_offset;
+3 -3
net/core/net-sysfs.c
··· 1214 1214 cpumask_var_t mask; 1215 1215 unsigned long index; 1216 1216 1217 - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1218 - return -ENOMEM; 1219 - 1220 1217 index = get_netdev_queue_index(queue); 1221 1218 1222 1219 if (dev->num_tc) { ··· 1222 1225 if (tc < 0) 1223 1226 return -EINVAL; 1224 1227 } 1228 + 1229 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1230 + return -ENOMEM; 1225 1231 1226 1232 rcu_read_lock(); 1227 1233 dev_maps = rcu_dereference(dev->xps_maps);
+4 -4
net/ipv4/ip_tunnel.c
··· 328 328 329 329 if (tdev) { 330 330 hlen = tdev->hard_header_len + tdev->needed_headroom; 331 - mtu = tdev->mtu; 331 + mtu = min(tdev->mtu, IP_MAX_MTU); 332 332 } 333 333 334 334 dev->needed_headroom = t_hlen + hlen; ··· 362 362 nt = netdev_priv(dev); 363 363 t_hlen = nt->hlen + sizeof(struct iphdr); 364 364 dev->min_mtu = ETH_MIN_MTU; 365 - dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 365 + dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; 366 366 ip_tunnel_add(itn, nt); 367 367 return nt; 368 368 ··· 930 930 { 931 931 struct ip_tunnel *tunnel = netdev_priv(dev); 932 932 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 933 - int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 933 + int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; 934 934 935 935 if (new_mtu < ETH_MIN_MTU) 936 936 return -EINVAL; ··· 1107 1107 1108 1108 mtu = ip_tunnel_bind_dev(dev); 1109 1109 if (tb[IFLA_MTU]) { 1110 - unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; 1110 + unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; 1111 1111 1112 1112 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, 1113 1113 (unsigned int)(max - sizeof(struct iphdr)));
+8 -3
net/ipv6/ip6_tunnel.c
··· 1692 1692 if (new_mtu < ETH_MIN_MTU) 1693 1693 return -EINVAL; 1694 1694 } 1695 - if (new_mtu > 0xFFF8 - dev->hard_header_len) 1696 - return -EINVAL; 1695 + if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { 1696 + if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) 1697 + return -EINVAL; 1698 + } else { 1699 + if (new_mtu > IP_MAX_MTU - dev->hard_header_len) 1700 + return -EINVAL; 1701 + } 1697 1702 dev->mtu = new_mtu; 1698 1703 return 0; 1699 1704 } ··· 1846 1841 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1847 1842 dev->mtu -= 8; 1848 1843 dev->min_mtu = ETH_MIN_MTU; 1849 - dev->max_mtu = 0xFFF8 - dev->hard_header_len; 1844 + dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; 1850 1845 1851 1846 return 0; 1852 1847
+2 -2
net/ipv6/seg6_iptunnel.c
··· 103 103 hdrlen = (osrh->hdrlen + 1) << 3; 104 104 tot_len = hdrlen + sizeof(*hdr); 105 105 106 - err = skb_cow_head(skb, tot_len); 106 + err = skb_cow_head(skb, tot_len + skb->mac_len); 107 107 if (unlikely(err)) 108 108 return err; 109 109 ··· 161 161 162 162 hdrlen = (osrh->hdrlen + 1) << 3; 163 163 164 - err = skb_cow_head(skb, hdrlen); 164 + err = skb_cow_head(skb, hdrlen + skb->mac_len); 165 165 if (unlikely(err)) 166 166 return err; 167 167
+3 -2
net/ipv6/sit.c
··· 1371 1371 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1372 1372 dev->mtu = ETH_DATA_LEN - t_hlen; 1373 1373 dev->min_mtu = IPV6_MIN_MTU; 1374 - dev->max_mtu = 0xFFF8 - t_hlen; 1374 + dev->max_mtu = IP6_MAX_MTU - t_hlen; 1375 1375 dev->flags = IFF_NOARP; 1376 1376 netif_keep_dst(dev); 1377 1377 dev->addr_len = 4; ··· 1583 1583 if (tb[IFLA_MTU]) { 1584 1584 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 1585 1585 1586 - if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) 1586 + if (mtu >= IPV6_MIN_MTU && 1587 + mtu <= IP6_MAX_MTU - dev->hard_header_len) 1587 1588 dev->mtu = mtu; 1588 1589 } 1589 1590
+1 -1
net/ipv6/xfrm6_policy.c
··· 126 126 struct flowi6 *fl6 = &fl->u.ip6; 127 127 int onlyproto = 0; 128 128 const struct ipv6hdr *hdr = ipv6_hdr(skb); 129 - u16 offset = sizeof(*hdr); 129 + u32 offset = sizeof(*hdr); 130 130 struct ipv6_opt_hdr *exthdr; 131 131 const unsigned char *nh = skb_network_header(skb); 132 132 u16 nhoff = IP6CB(skb)->nhoff;
+1 -1
net/kcm/kcmsock.c
··· 1671 1671 __module_get(newsock->ops->owner); 1672 1672 1673 1673 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, 1674 - &kcm_proto, true); 1674 + &kcm_proto, false); 1675 1675 if (!newsk) { 1676 1676 sock_release(newsock); 1677 1677 return ERR_PTR(-ENOMEM);
+1 -1
net/ncsi/ncsi-netlink.c
··· 215 215 static int ncsi_pkg_info_all_nl(struct sk_buff *skb, 216 216 struct netlink_callback *cb) 217 217 { 218 - struct nlattr *attrs[NCSI_ATTR_MAX]; 218 + struct nlattr *attrs[NCSI_ATTR_MAX + 1]; 219 219 struct ncsi_package *np, *package; 220 220 struct ncsi_dev_priv *ndp; 221 221 unsigned int package_id;
+15 -6
net/netfilter/ipvs/ip_vs_ctl.c
··· 2381 2381 struct ipvs_sync_daemon_cfg cfg; 2382 2382 2383 2383 memset(&cfg, 0, sizeof(cfg)); 2384 - strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2385 - sizeof(cfg.mcast_ifn)); 2384 + ret = -EINVAL; 2385 + if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, 2386 + sizeof(cfg.mcast_ifn)) <= 0) 2387 + goto out_dec; 2386 2388 cfg.syncid = dm->syncid; 2387 2389 ret = start_sync_thread(ipvs, &cfg, dm->state); 2388 2390 } else { ··· 2422 2420 } 2423 2421 } 2424 2422 2423 + if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) && 2424 + strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) == 2425 + IP_VS_SCHEDNAME_MAXLEN) { 2426 + ret = -EINVAL; 2427 + goto out_unlock; 2428 + } 2429 + 2425 2430 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ 2426 2431 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && 2427 2432 usvc.protocol != IPPROTO_SCTP) { 2428 - pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2433 + pr_err("set_ctl: invalid protocol: %d %pI4:%d\n", 2429 2434 usvc.protocol, &usvc.addr.ip, 2430 - ntohs(usvc.port), usvc.sched_name); 2435 + ntohs(usvc.port)); 2431 2436 ret = -EFAULT; 2432 2437 goto out_unlock; 2433 2438 } ··· 2856 2847 static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { 2857 2848 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, 2858 2849 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, 2859 - .len = IP_VS_IFNAME_MAXLEN }, 2850 + .len = IP_VS_IFNAME_MAXLEN - 1 }, 2860 2851 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, 2861 2852 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, 2862 2853 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, ··· 2874 2865 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, 2875 2866 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, 2876 2867 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, 2877 - .len = IP_VS_SCHEDNAME_MAXLEN }, 2868 + .len = IP_VS_SCHEDNAME_MAXLEN - 1 }, 2878 2869 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, 2879 2870 .len = IP_VS_PENAME_MAXLEN }, 2880 2871 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
+5 -3
net/netfilter/nf_tables_api.c
··· 1298 1298 rcu_assign_pointer(chain->stats, newstats); 1299 1299 synchronize_rcu(); 1300 1300 free_percpu(oldstats); 1301 - } else 1301 + } else { 1302 1302 rcu_assign_pointer(chain->stats, newstats); 1303 + static_branch_inc(&nft_counters_enabled); 1304 + } 1303 1305 } 1304 1306 1305 1307 static void nf_tables_chain_destroy(struct nft_ctx *ctx) ··· 4708 4706 if (idx > s_idx) 4709 4707 memset(&cb->args[1], 0, 4710 4708 sizeof(cb->args) - sizeof(cb->args[0])); 4711 - if (filter && filter->table[0] && 4709 + if (filter && filter->table && 4712 4710 strcmp(filter->table, table->name)) 4713 4711 goto cont; 4714 4712 if (filter && ··· 5382 5380 if (idx > s_idx) 5383 5381 memset(&cb->args[1], 0, 5384 5382 sizeof(cb->args) - sizeof(cb->args[0])); 5385 - if (filter && filter->table[0] && 5383 + if (filter && filter->table && 5386 5384 strcmp(filter->table, table->name)) 5387 5385 goto cont; 5388 5386
+2 -2
net/netfilter/nf_tables_core.c
··· 126 126 if (!base_chain->stats) 127 127 return; 128 128 129 + local_bh_disable(); 129 130 stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); 130 131 if (stats) { 131 - local_bh_disable(); 132 132 u64_stats_update_begin(&stats->syncp); 133 133 stats->pkts++; 134 134 stats->bytes += pkt->skb->len; 135 135 u64_stats_update_end(&stats->syncp); 136 - local_bh_enable(); 137 136 } 137 + local_bh_enable(); 138 138 } 139 139 140 140 struct nft_jumpstack {
+12 -8
net/netfilter/nft_ct.c
··· 880 880 struct nft_object *obj, bool reset) 881 881 { 882 882 const struct nft_ct_helper_obj *priv = nft_obj_data(obj); 883 - const struct nf_conntrack_helper *helper = priv->helper4; 883 + const struct nf_conntrack_helper *helper; 884 884 u16 family; 885 + 886 + if (priv->helper4 && priv->helper6) { 887 + family = NFPROTO_INET; 888 + helper = priv->helper4; 889 + } else if (priv->helper6) { 890 + family = NFPROTO_IPV6; 891 + helper = priv->helper6; 892 + } else { 893 + family = NFPROTO_IPV4; 894 + helper = priv->helper4; 895 + } 885 896 886 897 if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) 887 898 return -1; 888 899 889 900 if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) 890 901 return -1; 891 - 892 - if (priv->helper4 && priv->helper6) 893 - family = NFPROTO_INET; 894 - else if (priv->helper6) 895 - family = NFPROTO_IPV6; 896 - else 897 - family = NFPROTO_IPV4; 898 902 899 903 if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) 900 904 return -1;
+24 -14
net/netfilter/nft_limit.c
··· 51 51 return !limit->invert; 52 52 } 53 53 54 + /* Use same default as in iptables. */ 55 + #define NFT_LIMIT_PKT_BURST_DEFAULT 5 56 + 54 57 static int nft_limit_init(struct nft_limit *limit, 55 - const struct nlattr * const tb[]) 58 + const struct nlattr * const tb[], bool pkts) 56 59 { 57 - u64 unit; 60 + u64 unit, tokens; 58 61 59 62 if (tb[NFTA_LIMIT_RATE] == NULL || 60 63 tb[NFTA_LIMIT_UNIT] == NULL) ··· 71 68 72 69 if (tb[NFTA_LIMIT_BURST]) 73 70 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); 74 - else 75 - limit->burst = 0; 71 + 72 + if (pkts && limit->burst == 0) 73 + limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT; 76 74 77 75 if (limit->rate + limit->burst < limit->rate) 78 76 return -EOVERFLOW; 79 77 80 - /* The token bucket size limits the number of tokens can be 81 - * accumulated. tokens_max specifies the bucket size. 82 - * tokens_max = unit * (rate + burst) / rate. 83 - */ 84 - limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 85 - limit->rate); 78 + if (pkts) { 79 + tokens = div_u64(limit->nsecs, limit->rate) * limit->burst; 80 + } else { 81 + /* The token bucket size limits the number of tokens can be 82 + * accumulated. tokens_max specifies the bucket size. 83 + * tokens_max = unit * (rate + burst) / rate. 84 + */ 85 + tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 86 + limit->rate); 87 + } 88 + 89 + limit->tokens = tokens; 86 90 limit->tokens_max = limit->tokens; 87 91 88 92 if (tb[NFTA_LIMIT_FLAGS]) { ··· 154 144 struct nft_limit_pkts *priv = nft_expr_priv(expr); 155 145 int err; 156 146 157 - err = nft_limit_init(&priv->limit, tb); 147 + err = nft_limit_init(&priv->limit, tb, true); 158 148 if (err < 0) 159 149 return err; 160 150 ··· 195 185 { 196 186 struct nft_limit *priv = nft_expr_priv(expr); 197 187 198 - return nft_limit_init(priv, tb); 188 + return nft_limit_init(priv, tb, false); 199 189 } 200 190 201 191 static int nft_limit_bytes_dump(struct sk_buff *skb, ··· 256 246 struct nft_limit_pkts *priv = nft_obj_data(obj); 257 247 int err; 258 248 259 - err = nft_limit_init(&priv->limit, tb); 249 + err = nft_limit_init(&priv->limit, tb, true); 260 250 if (err < 0) 261 251 return err; 262 252 ··· 299 289 { 300 290 struct nft_limit *priv = nft_obj_data(obj); 301 291 302 - return nft_limit_init(priv, tb); 292 + return nft_limit_init(priv, tb, false); 303 293 } 304 294 305 295 static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
+8 -6
net/netfilter/nft_meta.c
··· 234 234 struct sk_buff *skb = pkt->skb; 235 235 u32 *sreg = &regs->data[meta->sreg]; 236 236 u32 value = *sreg; 237 - u8 pkt_type; 237 + u8 value8; 238 238 239 239 switch (meta->key) { 240 240 case NFT_META_MARK: ··· 244 244 skb->priority = value; 245 245 break; 246 246 case NFT_META_PKTTYPE: 247 - pkt_type = nft_reg_load8(sreg); 247 + value8 = nft_reg_load8(sreg); 248 248 249 - if (skb->pkt_type != pkt_type && 250 - skb_pkt_type_ok(pkt_type) && 249 + if (skb->pkt_type != value8 && 250 + skb_pkt_type_ok(value8) && 251 251 skb_pkt_type_ok(skb->pkt_type)) 252 - skb->pkt_type = pkt_type; 252 + skb->pkt_type = value8; 253 253 break; 254 254 case NFT_META_NFTRACE: 255 - skb->nf_trace = !!value; 255 + value8 = nft_reg_load8(sreg); 256 + 257 + skb->nf_trace = !!value8; 256 258 break; 257 259 default: 258 260 WARN_ON(1);
+1 -1
net/rds/Kconfig
··· 8 8 9 9 config RDS_RDMA 10 10 tristate "RDS over Infiniband" 11 - depends on RDS && INFINIBAND_ADDR_TRANS 11 + depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 12 12 ---help--- 13 13 Allow RDS to use Infiniband as a transport. 14 14 This transport supports RDMA operations.
+1 -1
net/sched/cls_flower.c
··· 977 977 return 0; 978 978 979 979 errout_idr: 980 - if (fnew->handle) 980 + if (!fold) 981 981 idr_remove(&head->handle_idr, fnew->handle); 982 982 errout: 983 983 tcf_exts_destroy(&fnew->exts);
+1 -1
net/sunrpc/Kconfig
··· 50 50 51 51 config SUNRPC_XPRT_RDMA 52 52 tristate "RPC-over-RDMA transport" 53 - depends on SUNRPC && INFINIBAND_ADDR_TRANS 53 + depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS 54 54 default SUNRPC && INFINIBAND 55 55 select SG_POOL 56 56 help
+2 -3
net/xfrm/xfrm_policy.c
··· 1658 1658 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 1659 1659 } 1660 1660 1661 - out: 1662 1661 return &xdst0->u.dst; 1663 1662 1664 1663 put_states: ··· 1666 1667 free_dst: 1667 1668 if (xdst0) 1668 1669 dst_release_immediate(&xdst0->u.dst); 1669 - xdst0 = ERR_PTR(err); 1670 - goto out; 1670 + 1671 + return ERR_PTR(err); 1671 1672 } 1672 1673 1673 1674 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
+1 -1
security/selinux/ss/services.c
··· 1494 1494 scontext_len, &context, def_sid); 1495 1495 if (rc == -EINVAL && force) { 1496 1496 context.str = str; 1497 - context.len = scontext_len; 1497 + context.len = strlen(str) + 1; 1498 1498 str = NULL; 1499 1499 } else if (rc) 1500 1500 goto out_unlock;
+14 -6
tools/arch/x86/include/asm/cpufeatures.h
··· 198 198 #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 199 199 #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 200 200 #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ 201 - 202 201 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 203 202 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 204 203 #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ ··· 206 207 #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ 207 208 #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 208 209 #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ 209 - 210 + #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ 211 + #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ 210 212 #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 211 213 #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ 212 214 #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ 213 - 214 215 #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 215 216 #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ 217 + #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ 218 + #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ 219 + #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ 220 + #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ 221 + #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ 222 + #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ 216 223 217 224 /* Virtualization flags: Linux defined, word 8 */ 218 225 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ ··· 279 274 #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 280 275 #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 281 276 #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ 282 - #define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ 283 - #define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ 284 - #define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ 277 + #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ 278 + #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ 279 + #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ 280 + #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 285 281 286 282 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ 287 283 #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ ··· 340 334 #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 341 335 #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 342 336 #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ 337 + #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ 343 338 344 339 /* 345 340 * BUG word(s) ··· 370 363 #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ 371 364 #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ 372 365 #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ 366 + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ 373 367 374 368 #endif /* _ASM_X86_CPUFEATURES_H */
+2
tools/include/uapi/linux/bpf.h
··· 1017 1017 __aligned_u64 map_ids; 1018 1018 char name[BPF_OBJ_NAME_LEN]; 1019 1019 __u32 ifindex; 1020 + __u32 :32; 1020 1021 __u64 netns_dev; 1021 1022 __u64 netns_ino; 1022 1023 } __attribute__((aligned(8))); ··· 1031 1030 __u32 map_flags; 1032 1031 char name[BPF_OBJ_NAME_LEN]; 1033 1032 __u32 ifindex; 1033 + __u32 :32; 1034 1034 __u64 netns_dev; 1035 1035 __u64 netns_ino; 1036 1036 } __attribute__((aligned(8)));
+12
tools/include/uapi/linux/prctl.h
··· 207 207 # define PR_SVE_VL_LEN_MASK 0xffff 208 208 # define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ 209 209 210 + /* Per task speculation control */ 211 + #define PR_GET_SPECULATION_CTRL 52 212 + #define PR_SET_SPECULATION_CTRL 53 213 + /* Speculation control variants */ 214 + # define PR_SPEC_STORE_BYPASS 0 215 + /* Return and control values for PR_SET/GET_SPECULATION_CTRL */ 216 + # define PR_SPEC_NOT_AFFECTED 0 217 + # define PR_SPEC_PRCTL (1UL << 0) 218 + # define PR_SPEC_ENABLE (1UL << 1) 219 + # define PR_SPEC_DISABLE (1UL << 2) 220 + # define PR_SPEC_FORCE_DISABLE (1UL << 3) 221 + 210 222 #endif /* _LINUX_PRCTL_H */
+9 -1
tools/perf/Documentation/perf.data-file-format.txt
··· 111 111 A structure defining the number of CPUs. 112 112 113 113 struct nr_cpus { 114 - uint32_t nr_cpus_online; 115 114 uint32_t nr_cpus_available; /* CPUs not yet onlined */ 115 + uint32_t nr_cpus_online; 116 116 }; 117 117 118 118 HEADER_CPUDESC = 8, ··· 153 153 HEADER_CPU_TOPOLOGY = 13, 154 154 155 155 String lists defining the core and CPU threads topology. 156 + The string lists are followed by a variable length array 157 + which contains core_id and socket_id of each cpu. 158 + The number of entries can be determined by the size of the 159 + section minus the sizes of both string lists. 156 160 157 161 struct { 158 162 struct perf_header_string_list cores; /* Variable length */ 159 163 struct perf_header_string_list threads; /* Variable length */ 164 + struct { 165 + uint32_t core_id; 166 + uint32_t socket_id; 167 + } cpus[nr]; /* Variable length records */ 160 168 }; 161 169 162 170 Example:
+24 -6
tools/perf/tests/topology.c
··· 70 70 session = perf_session__new(&data, false, NULL); 71 71 TEST_ASSERT_VAL("can't get session", session); 72 72 73 + /* On platforms with large numbers of CPUs process_cpu_topology() 74 + * might issue an error while reading the perf.data file section 75 + * HEADER_CPU_TOPOLOGY and the cpu_topology_map pointed to by member 76 + * cpu is a NULL pointer. 77 + * Example: On s390 78 + * CPU 0 is on core_id 0 and physical_package_id 6 79 + * CPU 1 is on core_id 1 and physical_package_id 3 80 + * 81 + * Core_id and physical_package_id are platform and architecture 82 + * dependend and might have higher numbers than the CPU id. 83 + * This actually depends on the configuration. 84 + * 85 + * In this case process_cpu_topology() prints error message: 86 + * "socket_id number is too big. You may need to upgrade the 87 + * perf tool." 88 + * 89 + * This is the reason why this test might be skipped. 90 + */ 91 + if (!session->header.env.cpu) 92 + return TEST_SKIP; 93 + 73 94 for (i = 0; i < session->header.env.nr_cpus_avail; i++) { 74 95 if (!cpu_map__has(map, i)) 75 96 continue; ··· 116 95 { 117 96 char path[PATH_MAX]; 118 97 struct cpu_map *map; 119 - int ret = -1; 98 + int ret = TEST_FAIL; 120 99 121 100 TEST_ASSERT_VAL("can't get templ file", !get_temp(path)); 122 101 ··· 131 110 goto free_path; 132 111 } 133 112 134 - if (check_cpu_topology(path, map)) 135 - goto free_map; 136 - ret = 0; 137 - 138 - free_map: 113 + ret = check_cpu_topology(path, map); 139 114 cpu_map__put(map); 115 + 140 116 free_path: 141 117 unlink(path); 142 118 return ret;
+1 -1
tools/perf/trace/beauty/prctl_option.sh
··· 1 1 #!/bin/sh 2 2 3 - header_dir=$1 3 + [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ 4 4 5 5 printf "static const char *prctl_options[] = {\n" 6 6 regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*'
+10 -2
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
··· 96 96 /* Nothing to do, might as well just return */ 97 97 if (decoder->packet_count == 0) 98 98 return 0; 99 + /* 100 + * The queueing process in function cs_etm_decoder__buffer_packet() 101 + * increments the tail *before* using it. This is somewhat counter 102 + * intuitive but it has the advantage of centralizing tail management 103 + * at a single location. Because of that we need to follow the same 104 + * heuristic with the head, i.e we increment it before using its 105 + * value. Otherwise the first element of the packet queue is not 106 + * used. 107 + */ 108 + decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1); 99 109 100 110 *packet = decoder->packet_buffer[decoder->head]; 101 - 102 - decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1); 103 111 104 112 decoder->packet_count--; 105 113
+18
tools/perf/util/intel-pt-decoder/insn.h
··· 208 208 return insn_offset_displacement(insn) + insn->displacement.nbytes; 209 209 } 210 210 211 + #define POP_SS_OPCODE 0x1f 212 + #define MOV_SREG_OPCODE 0x8e 213 + 214 + /* 215 + * Intel SDM Vol.3A 6.8.3 states; 216 + * "Any single-step trap that would be delivered following the MOV to SS 217 + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is 218 + * suppressed." 219 + * This function returns true if @insn is MOV SS or POP SS. On these 220 + * instructions, single stepping is suppressed. 221 + */ 222 + static inline int insn_masking_exception(struct insn *insn) 223 + { 224 + return insn->opcode.bytes[0] == POP_SS_OPCODE || 225 + (insn->opcode.bytes[0] == MOV_SREG_OPCODE && 226 + X86_MODRM_REG(insn->modrm.bytes[0]) == 2); 227 + } 228 + 211 229 #endif /* _ASM_X86_INSN_H */
+2
tools/perf/util/scripting-engines/trace-event-python.c
··· 531 531 PyLong_FromUnsignedLongLong(sample->period)); 532 532 pydict_set_item_string_decref(dict_sample, "phys_addr", 533 533 PyLong_FromUnsignedLongLong(sample->phys_addr)); 534 + pydict_set_item_string_decref(dict_sample, "addr", 535 + PyLong_FromUnsignedLongLong(sample->addr)); 534 536 set_sample_read_in_dict(dict_sample, sample, evsel); 535 537 pydict_set_item_string_decref(dict, "sample", dict_sample); 536 538