Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'acpi-pm' into pm-sleep

+5242 -3286
+3
Documentation/devicetree/bindings/arm/arch_timer.txt
··· 19 19 20 20 - clock-frequency : The frequency of the main counter, in Hz. Optional. 21 21 22 + - always-on : a boolean property. If present, the timer is powered through an 23 + always-on power domain, therefore it never loses context. 24 + 22 25 Example: 23 26 24 27 timer {
+3
Documentation/devicetree/bindings/ata/apm-xgene.txt
··· 24 24 * "sata-phy" for the SATA 6.0Gbps PHY 25 25 26 26 Optional properties: 27 + - dma-coherent : Present if dma operations are coherent 27 28 - status : Shall be "ok" if enabled or "disabled" if disabled. 28 29 Default is "ok". 29 30 ··· 56 55 <0x0 0x1f22e000 0x0 0x1000>, 57 56 <0x0 0x1f227000 0x0 0x1000>; 58 57 interrupts = <0x0 0x87 0x4>; 58 + dma-coherent; 59 59 status = "ok"; 60 60 clocks = <&sataclk 0>; 61 61 phys = <&phy2 0>; ··· 71 69 <0x0 0x1f23e000 0x0 0x1000>, 72 70 <0x0 0x1f237000 0x0 0x1000>; 73 71 interrupts = <0x0 0x88 0x4>; 72 + dma-coherent; 74 73 status = "ok"; 75 74 clocks = <&sataclk 0>; 76 75 phys = <&phy3 0>;
+10 -2
Documentation/devicetree/bindings/net/arc_emac.txt
··· 4 4 - compatible: Should be "snps,arc-emac" 5 5 - reg: Address and length of the register set for the device 6 6 - interrupts: Should contain the EMAC interrupts 7 - - clock-frequency: CPU frequency. It is needed to calculate and set polling 8 - period of EMAC. 9 7 - max-speed: see ethernet.txt file in the same directory. 10 8 - phy: see ethernet.txt file in the same directory. 9 + 10 + Clock handling: 11 + The clock frequency is needed to calculate and set polling period of EMAC. 12 + It must be provided by one of: 13 + - clock-frequency: CPU frequency. 14 + - clocks: reference to the clock supplying the EMAC. 11 15 12 16 Child nodes of the driver are the individual PHY devices connected to the 13 17 MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus. ··· 23 19 reg = <0xc0fc2000 0x3c>; 24 20 interrupts = <6>; 25 21 mac-address = [ 00 11 22 33 44 55 ]; 22 + 26 23 clock-frequency = <80000000>; 24 + /* or */ 25 + clocks = <&emac_clock>; 26 + 27 27 max-speed = <100>; 28 28 phy = <&phy0>; 29 29
+1 -1
Documentation/devicetree/bindings/net/socfpga-dwmac.txt
··· 23 23 interrupt-names = "macirq"; 24 24 mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */ 25 25 clocks = <&emac_0_clk>; 26 - clocks-names = "stmmaceth"; 26 + clock-names = "stmmaceth"; 27 27 };
+1 -1
Documentation/devicetree/bindings/net/stmmac.txt
··· 33 33 - max-frame-size: See ethernet.txt file in the same directory 34 34 - clocks: If present, the first clock should be the GMAC main clock, 35 35 further clocks may be specified in derived bindings. 36 - - clocks-names: One name for each entry in the clocks property, the 36 + - clock-names: One name for each entry in the clocks property, the 37 37 first one should be "stmmaceth". 38 38 39 39 Examples:
+2 -2
Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
··· 83 83 reg = <0xfe61f080 0x4>; 84 84 reg-names = "irqmux"; 85 85 interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; 86 - interrupts-names = "irqmux"; 86 + interrupt-names = "irqmux"; 87 87 ranges = <0 0xfe610000 0x5000>; 88 88 89 89 PIO0: gpio@fe610000 { ··· 165 165 interrupt-parent = <&PIO3>; 166 166 #interrupt-cells = <2>; 167 167 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */ 168 - interrupts-names = "card-detect"; 168 + interrupt-names = "card-detect"; 169 169 pinctrl-names = "default"; 170 170 pinctrl-0 = <&pinctrl_mmc>; 171 171 };
+1 -1
Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
··· 47 47 reg = <0x100000 0x3000>; 48 48 reg-names "mpu"; 49 49 interrupts = <82>, <83>; 50 - interrupts-names = "tx", "rx"; 50 + interrupt-names = "tx", "rx"; 51 51 op-mode = <0>; /* MCASP_IIS_MODE */ 52 52 tdm-slots = <2>; 53 53 serial-dir = <
+3 -3
Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
··· 13 13 "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP) 14 14 15 15 - reg - <int> - I2C slave address 16 + - HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply, 17 + DVDD-supply : power supplies for the device as covered in 18 + Documentation/devicetree/bindings/regulator/regulator.txt 16 19 17 20 18 21 Optional properties: ··· 27 24 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD 28 25 If this node is not mentioned or if the value is unknown, then 29 26 micbias is set to 2.0V. 30 - - HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply, 31 - DVDD-supply : power supplies for the device as covered in 32 - Documentation/devicetree/bindings/regulator/regulator.txt 33 27 34 28 CODEC output pins: 35 29 * HPL
+4 -1
Documentation/input/elantech.txt
··· 504 504 * reg_10 505 505 506 506 bit 7 6 5 4 3 2 1 0 507 - 0 0 0 0 0 0 0 A 507 + 0 0 0 0 R F T A 508 508 509 509 A: 1 = enable absolute tracking 510 + T: 1 = enable two finger mode auto correct 511 + F: 1 = disable ABS Position Filter 512 + R: 1 = enable real hardware resolution 510 513 511 514 6.2 Native absolute mode 6 byte packet format 512 515 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1 -1
Documentation/networking/scaling.txt
··· 429 429 (therbert@google.com) 430 430 431 431 Accelerated RFS was introduced in 2.6.35. Original patches were 432 - submitted by Ben Hutchings (bhutchings@solarflare.com) 432 + submitted by Ben Hutchings (bwh@kernel.org) 433 433 434 434 Authors: 435 435 Tom Herbert (therbert@google.com)
+15 -6
MAINTAINERS
··· 3485 3485 F: drivers/extcon/ 3486 3486 F: Documentation/extcon/ 3487 3487 3488 + EXYNOS DP DRIVER 3489 + M: Jingoo Han <jg1.han@samsung.com> 3490 + L: dri-devel@lists.freedesktop.org 3491 + S: Maintained 3492 + F: drivers/gpu/drm/exynos/exynos_dp* 3493 + 3488 3494 EXYNOS MIPI DISPLAY DRIVERS 3489 3495 M: Inki Dae <inki.dae@samsung.com> 3490 3496 M: Donghwa Lee <dh09.lee@samsung.com> ··· 3556 3550 F: include/uapi/scsi/fc/ 3557 3551 3558 3552 FILE LOCKING (flock() and fcntl()/lockf()) 3559 - M: Jeff Layton <jlayton@redhat.com> 3553 + M: Jeff Layton <jlayton@poochiereds.net> 3560 3554 M: J. Bruce Fields <bfields@fieldses.org> 3561 3555 L: linux-fsdevel@vger.kernel.org 3562 3556 S: Maintained ··· 5114 5108 5115 5109 KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5116 5110 M: Christoffer Dall <christoffer.dall@linaro.org> 5111 + M: Marc Zyngier <marc.zyngier@arm.com> 5112 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5117 5113 L: kvmarm@lists.cs.columbia.edu 5118 5114 W: http://systems.cs.columbia.edu/projects/kvm-arm 5119 5115 S: Supported 5120 5116 F: arch/arm/include/uapi/asm/kvm* 5121 5117 F: arch/arm/include/asm/kvm* 5122 5118 F: arch/arm/kvm/ 5119 + F: virt/kvm/arm/ 5120 + F: include/kvm/arm_* 5123 5121 5124 5122 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 5123 + M: Christoffer Dall <christoffer.dall@linaro.org> 5125 5124 M: Marc Zyngier <marc.zyngier@arm.com> 5126 5125 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5127 5126 L: kvmarm@lists.cs.columbia.edu ··· 7288 7277 RALINK RT2X00 WIRELESS LAN DRIVER 7289 7278 P: rt2x00 project 7290 7279 M: Ivo van Doorn <IvDoorn@gmail.com> 7291 - M: Gertjan van Wingerde <gwingerde@gmail.com> 7292 7280 M: Helmut Schaa <helmut.schaa@googlemail.com> 7293 7281 L: linux-wireless@vger.kernel.org 7294 7282 L: users@rt2x00.serialmonkey.com (moderated for non-subscribers) ··· 7303 7293 F: drivers/block/brd.c 7304 7294 7305 7295 RANDOM NUMBER DRIVER 7306 - M: Theodore Ts'o" <tytso@mit.edu> 7296 + M: "Theodore Ts'o" <tytso@mit.edu> 7307 7297 S: Maintained 7308 7298 F: drivers/char/random.c 7309 7299 ··· 7684 7674 SAMSUNG SXGBE DRIVERS 7685 7675 M: Byungho An <bh74.an@samsung.com> 7686 7676 M: Girish K S <ks.giri@samsung.com> 7687 - M: Siva Reddy Kallam <siva.kallam@samsung.com> 7688 7677 M: Vipul Pandya <vipul.pandya@samsung.com> 7689 7678 S: Supported 7690 7679 L: netdev@vger.kernel.org ··· 9960 9951 F: drivers/net/hamradio/z8530.h 9961 9952 9962 9953 ZBUD COMPRESSED PAGE ALLOCATOR 9963 - M: Seth Jennings <sjenning@linux.vnet.ibm.com> 9954 + M: Seth Jennings <sjennings@variantweb.net> 9964 9955 L: linux-mm@kvack.org 9965 9956 S: Maintained 9966 9957 F: mm/zbud.c ··· 10005 9996 F: include/linux/zsmalloc.h 10006 9997 10007 9998 ZSWAP COMPRESSED SWAP CACHING 10008 - M: Seth Jennings <sjenning@linux.vnet.ibm.com> 9999 + M: Seth Jennings <sjennings@variantweb.net> 10009 10000 L: linux-mm@kvack.org 10010 10001 S: Maintained 10011 10002 F: mm/zswap.c
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 15 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc5 5 5 NAME = Shuffling Zombie Juror 6 6 7 7 # *DOCUMENTATION*
+5 -3
arch/arc/kernel/entry.S
··· 614 614 615 615 resume_kernel_mode: 616 616 617 - #ifdef CONFIG_PREEMPT 618 - 619 - ; This is a must for preempt_schedule_irq() 617 + ; Disable Interrupts from this point on 618 + ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() 619 + ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe 620 620 IRQ_DISABLE r9 621 + 622 + #ifdef CONFIG_PREEMPT 621 623 622 624 ; Can't preempt if preemption disabled 623 625 GET_CURR_THR_INFO_FROM_SP r10
+2 -2
arch/arm/boot/dts/am33xx.dtsi
··· 802 802 <0x46000000 0x400000>; 803 803 reg-names = "mpu", "dat"; 804 804 interrupts = <80>, <81>; 805 - interrupts-names = "tx", "rx"; 805 + interrupt-names = "tx", "rx"; 806 806 status = "disabled"; 807 807 dmas = <&edma 8>, 808 808 <&edma 9>; ··· 816 816 <0x46400000 0x400000>; 817 817 reg-names = "mpu", "dat"; 818 818 interrupts = <82>, <83>; 819 - interrupts-names = "tx", "rx"; 819 + interrupt-names = "tx", "rx"; 820 820 status = "disabled"; 821 821 dmas = <&edma 10>, 822 822 <&edma 11>;
+2 -2
arch/arm/boot/dts/am4372.dtsi
··· 691 691 <0x46000000 0x400000>; 692 692 reg-names = "mpu", "dat"; 693 693 interrupts = <80>, <81>; 694 - interrupts-names = "tx", "rx"; 694 + interrupt-names = "tx", "rx"; 695 695 status = "disabled"; 696 696 dmas = <&edma 8>, 697 697 <&edma 9>; ··· 705 705 <0x46400000 0x400000>; 706 706 reg-names = "mpu", "dat"; 707 707 interrupts = <82>, <83>; 708 - interrupts-names = "tx", "rx"; 708 + interrupt-names = "tx", "rx"; 709 709 status = "disabled"; 710 710 dmas = <&edma 10>, 711 711 <&edma 11>;
+5 -5
arch/arm/boot/dts/stih415-pinctrl.dtsi
··· 49 49 reg = <0xfe61f080 0x4>; 50 50 reg-names = "irqmux"; 51 51 interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; 52 - interrupts-names = "irqmux"; 52 + interrupt-names = "irqmux"; 53 53 ranges = <0 0xfe610000 0x5000>; 54 54 55 55 PIO0: gpio@fe610000 { ··· 187 187 reg = <0xfee0f080 0x4>; 188 188 reg-names = "irqmux"; 189 189 interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>; 190 - interrupts-names = "irqmux"; 190 + interrupt-names = "irqmux"; 191 191 ranges = <0 0xfee00000 0x8000>; 192 192 193 193 PIO5: gpio@fee00000 { ··· 282 282 reg = <0xfe82f080 0x4>; 283 283 reg-names = "irqmux"; 284 284 interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; 285 - interrupts-names = "irqmux"; 285 + interrupt-names = "irqmux"; 286 286 ranges = <0 0xfe820000 0x8000>; 287 287 288 288 PIO13: gpio@fe820000 { ··· 423 423 reg = <0xfd6bf080 0x4>; 424 424 reg-names = "irqmux"; 425 425 interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; 426 - interrupts-names = "irqmux"; 426 + interrupt-names = "irqmux"; 427 427 ranges = <0 0xfd6b0000 0x3000>; 428 428 429 429 PIO100: gpio@fd6b0000 { ··· 460 460 reg = <0xfd33f080 0x4>; 461 461 reg-names = "irqmux"; 462 462 interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; 463 - interrupts-names = "irqmux"; 463 + interrupt-names = "irqmux"; 464 464 ranges = <0 0xfd330000 0x5000>; 465 465 466 466 PIO103: gpio@fd330000 {
+5 -5
arch/arm/boot/dts/stih416-pinctrl.dtsi
··· 53 53 reg = <0xfe61f080 0x4>; 54 54 reg-names = "irqmux"; 55 55 interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; 56 - interrupts-names = "irqmux"; 56 + interrupt-names = "irqmux"; 57 57 ranges = <0 0xfe610000 0x6000>; 58 58 59 59 PIO0: gpio@fe610000 { ··· 201 201 reg = <0xfee0f080 0x4>; 202 202 reg-names = "irqmux"; 203 203 interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; 204 - interrupts-names = "irqmux"; 204 + interrupt-names = "irqmux"; 205 205 ranges = <0 0xfee00000 0x10000>; 206 206 207 207 PIO5: gpio@fee00000 { ··· 333 333 reg = <0xfe82f080 0x4>; 334 334 reg-names = "irqmux"; 335 335 interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; 336 - interrupts-names = "irqmux"; 336 + interrupt-names = "irqmux"; 337 337 ranges = <0 0xfe820000 0x6000>; 338 338 339 339 PIO13: gpio@fe820000 { ··· 461 461 reg = <0xfd6bf080 0x4>; 462 462 reg-names = "irqmux"; 463 463 interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; 464 - interrupts-names = "irqmux"; 464 + interrupt-names = "irqmux"; 465 465 ranges = <0 0xfd6b0000 0x3000>; 466 466 467 467 PIO100: gpio@fd6b0000 { ··· 498 498 reg = <0xfd33f080 0x4>; 499 499 reg-names = "irqmux"; 500 500 interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; 501 - interrupts-names = "irqmux"; 501 + interrupt-names = "irqmux"; 502 502 ranges = <0 0xfd330000 0x5000>; 503 503 504 504 PIO103: gpio@fd330000 {
+1 -1
arch/arm/kvm/Kconfig
··· 23 23 select HAVE_KVM_CPU_RELAX_INTERCEPT 24 24 select KVM_MMIO 25 25 select KVM_ARM_HOST 26 - depends on ARM_VIRT_EXT && ARM_LPAE 26 + depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN 27 27 ---help--- 28 28 Support hosting virtualized guest machines. You will also 29 29 need to select one or more of the processor modules below.
+9 -6
arch/arm/kvm/mmu.c
··· 42 42 static unsigned long hyp_idmap_end; 43 43 static phys_addr_t hyp_idmap_vector; 44 44 45 + #define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) 46 + 45 47 #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) 46 48 47 49 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ··· 295 293 if (boot_hyp_pgd) { 296 294 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); 297 295 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 298 - kfree(boot_hyp_pgd); 296 + free_pages((unsigned long)boot_hyp_pgd, pgd_order); 299 297 boot_hyp_pgd = NULL; 300 298 } 301 299 302 300 if (hyp_pgd) 303 301 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 304 302 305 - kfree(init_bounce_page); 303 + free_page((unsigned long)init_bounce_page); 306 304 init_bounce_page = NULL; 307 305 308 306 mutex_unlock(&kvm_hyp_pgd_mutex); ··· 332 330 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 333 331 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 334 332 335 - kfree(hyp_pgd); 333 + free_pages((unsigned long)hyp_pgd, pgd_order); 336 334 hyp_pgd = NULL; 337 335 } 338 336 ··· 1026 1024 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; 1027 1025 phys_addr_t phys_base; 1028 1026 1029 - init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 1027 + init_bounce_page = (void *)__get_free_page(GFP_KERNEL); 1030 1028 if (!init_bounce_page) { 1031 1029 kvm_err("Couldn't allocate HYP init bounce page\n"); 1032 1030 err = -ENOMEM; ··· 1052 1050 (unsigned long)phys_base); 1053 1051 } 1054 1052 1055 - hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); 1056 - boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); 1053 + hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); 1054 + boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); 1055 + 1057 1056 if (!hyp_pgd || !boot_hyp_pgd) { 1058 1057 kvm_err("Hyp mode PGD not allocated\n"); 1059 1058 err = -ENOMEM;
+3
arch/arm64/boot/dts/apm-storm.dtsi
··· 307 307 <0x0 0x1f21e000 0x0 0x1000>, 308 308 <0x0 0x1f217000 0x0 0x1000>; 309 309 interrupts = <0x0 0x86 0x4>; 310 + dma-coherent; 310 311 status = "disabled"; 311 312 clocks = <&sata01clk 0>; 312 313 phys = <&phy1 0>; ··· 322 321 <0x0 0x1f22e000 0x0 0x1000>, 323 322 <0x0 0x1f227000 0x0 0x1000>; 324 323 interrupts = <0x0 0x87 0x4>; 324 + dma-coherent; 325 325 status = "ok"; 326 326 clocks = <&sata23clk 0>; 327 327 phys = <&phy2 0>; ··· 336 334 <0x0 0x1f23d000 0x0 0x1000>, 337 335 <0x0 0x1f23e000 0x0 0x1000>; 338 336 interrupts = <0x0 0x88 0x4>; 337 + dma-coherent; 339 338 status = "ok"; 340 339 clocks = <&sata45clk 0>; 341 340 phys = <&phy3 0>;
+2 -4
arch/arm64/kernel/early_printk.c
··· 143 143 } 144 144 /* no options parsing yet */ 145 145 146 - if (paddr) { 147 - set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr); 148 - early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE); 149 - } 146 + if (paddr) 147 + early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr); 150 148 151 149 printch = match->printch; 152 150 early_console = &early_console_dev;
+1 -1
arch/arm64/kernel/setup.c
··· 396 396 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 397 397 return 0; 398 398 } 399 - arch_initcall(arm64_device_init); 399 + arch_initcall_sync(arm64_device_init); 400 400 401 401 static DEFINE_PER_CPU(struct cpu, cpu_data); 402 402
+33 -2
arch/arm64/mm/dma-mapping.c
··· 22 22 #include <linux/slab.h> 23 23 #include <linux/dma-mapping.h> 24 24 #include <linux/dma-contiguous.h> 25 + #include <linux/of.h> 26 + #include <linux/platform_device.h> 25 27 #include <linux/vmalloc.h> 26 28 #include <linux/swiotlb.h> 29 + #include <linux/amba/bus.h> 27 30 28 31 #include <asm/cacheflush.h> 29 32 ··· 308 305 }; 309 306 EXPORT_SYMBOL(coherent_swiotlb_dma_ops); 310 307 308 + static int dma_bus_notifier(struct notifier_block *nb, 309 + unsigned long event, void *_dev) 310 + { 311 + struct device *dev = _dev; 312 + 313 + if (event != BUS_NOTIFY_ADD_DEVICE) 314 + return NOTIFY_DONE; 315 + 316 + if (of_property_read_bool(dev->of_node, "dma-coherent")) 317 + set_dma_ops(dev, &coherent_swiotlb_dma_ops); 318 + 319 + return NOTIFY_OK; 320 + } 321 + 322 + static struct notifier_block platform_bus_nb = { 323 + .notifier_call = dma_bus_notifier, 324 + }; 325 + 326 + static struct notifier_block amba_bus_nb = { 327 + .notifier_call = dma_bus_notifier, 328 + }; 329 + 311 330 extern int swiotlb_late_init_with_default_size(size_t default_size); 312 331 313 332 static int __init swiotlb_late_init(void) 314 333 { 315 334 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); 316 335 317 - dma_ops = &coherent_swiotlb_dma_ops; 336 + /* 337 + * These must be registered before of_platform_populate(). 338 + */ 339 + bus_register_notifier(&platform_bus_type, &platform_bus_nb); 340 + bus_register_notifier(&amba_bustype, &amba_bus_nb); 341 + 342 + dma_ops = &noncoherent_swiotlb_dma_ops; 318 343 319 344 return swiotlb_late_init_with_default_size(swiotlb_size); 320 345 } 321 - subsys_initcall(swiotlb_late_init); 346 + arch_initcall(swiotlb_late_init); 322 347 323 348 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 324 349
+3
arch/arm64/mm/mmu.c
··· 374 374 if (pmd_none(*pmd)) 375 375 return 0; 376 376 377 + if (pmd_sect(*pmd)) 378 + return pfn_valid(pmd_pfn(*pmd)); 379 + 377 380 pte = pte_offset_kernel(pmd, addr); 378 381 if (pte_none(*pte)) 379 382 return 0;
-37
arch/hexagon/include/asm/barrier.h
··· 1 - /* 2 - * Memory barrier definitions for the Hexagon architecture 3 - * 4 - * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 and 8 - * only version 2 as published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 - * 02110-1301, USA. 19 - */ 20 - 21 - #ifndef _ASM_BARRIER_H 22 - #define _ASM_BARRIER_H 23 - 24 - #define rmb() barrier() 25 - #define read_barrier_depends() barrier() 26 - #define wmb() barrier() 27 - #define mb() barrier() 28 - #define smp_rmb() barrier() 29 - #define smp_read_barrier_depends() barrier() 30 - #define smp_wmb() barrier() 31 - #define smp_mb() barrier() 32 - 33 - /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ 34 - #define set_mb(var, value) \ 35 - do { var = value; mb(); } while (0) 36 - 37 - #endif /* _ASM_BARRIER_H */
+2 -1
arch/parisc/include/uapi/asm/Kbuild
··· 1 1 # UAPI Header export list 2 2 include include/uapi/asm-generic/Kbuild.asm 3 3 4 + generic-y += resource.h 5 + 4 6 header-y += bitsperlong.h 5 7 header-y += byteorder.h 6 8 header-y += errno.h ··· 15 13 header-y += pdc.h 16 14 header-y += posix_types.h 17 15 header-y += ptrace.h 18 - header-y += resource.h 19 16 header-y += sembuf.h 20 17 header-y += setup.h 21 18 header-y += shmbuf.h
-7
arch/parisc/include/uapi/asm/resource.h
··· 1 - #ifndef _ASM_PARISC_RESOURCE_H 2 - #define _ASM_PARISC_RESOURCE_H 3 - 4 - #define _STK_LIM_MAX 10 * _STK_LIM 5 - #include <asm-generic/resource.h> 6 - 7 - #endif
+4 -4
arch/powerpc/boot/main.c
··· 139 139 * edit the command line passed to vmlinux (by setting /chosen/bootargs). 140 140 * The buffer is put in it's own section so that tools may locate it easier. 141 141 */ 142 - static char cmdline[COMMAND_LINE_SIZE] 142 + static char cmdline[BOOT_COMMAND_LINE_SIZE] 143 143 __attribute__((__section__("__builtin_cmdline"))); 144 144 145 145 static void prep_cmdline(void *chosen) 146 146 { 147 147 if (cmdline[0] == '\0') 148 - getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); 148 + getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); 149 149 150 150 printf("\n\rLinux/PowerPC load: %s", cmdline); 151 151 /* If possible, edit the command line */ 152 152 if (console_ops.edit_cmdline) 153 - console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE); 153 + console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE); 154 154 printf("\n\r"); 155 155 156 156 /* Put the command line back into the devtree for the kernel */ ··· 174 174 * built-in command line wasn't set by an external tool */ 175 175 if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0')) 176 176 memmove(cmdline, loader_info.cmdline, 177 - min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1)); 177 + min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1)); 178 178 179 179 if (console_ops.open && (console_ops.open() < 0)) 180 180 exit();
+1 -1
arch/powerpc/boot/ops.h
··· 15 15 #include "types.h" 16 16 #include "string.h" 17 17 18 - #define COMMAND_LINE_SIZE 512 18 + #define BOOT_COMMAND_LINE_SIZE 2048 19 19 #define MAX_PATH_LEN 256 20 20 #define MAX_PROP_LEN 256 /* What should this be? */ 21 21
+2 -2
arch/powerpc/boot/ps3.c
··· 47 47 * The buffer is put in it's own section so that tools may locate it easier. 48 48 */ 49 49 50 - static char cmdline[COMMAND_LINE_SIZE] 50 + static char cmdline[BOOT_COMMAND_LINE_SIZE] 51 51 __attribute__((__section__("__builtin_cmdline"))); 52 52 53 53 static void prep_cmdline(void *chosen) 54 54 { 55 55 if (cmdline[0] == '\0') 56 - getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); 56 + getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); 57 57 else 58 58 setprop_str(chosen, "bootargs", cmdline); 59 59
+19 -23
arch/powerpc/include/asm/opal.h
··· 41 41 * size except the last one in the list to be as well. 42 42 */ 43 43 struct opal_sg_entry { 44 - void *data; 45 - long length; 44 + __be64 data; 45 + __be64 length; 46 46 }; 47 47 48 - /* sg list */ 48 + /* SG list */ 49 49 struct opal_sg_list { 50 - unsigned long num_entries; 51 - struct opal_sg_list *next; 50 + __be64 length; 51 + __be64 next; 52 52 struct opal_sg_entry entry[]; 53 53 }; 54 54 ··· 858 858 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, 859 859 uint32_t addr, __be32 *data, uint32_t sz); 860 860 861 - int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id); 862 - int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type); 861 + int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id); 862 + int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type); 863 863 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); 864 864 int64_t opal_send_ack_elog(uint64_t log_id); 865 865 void opal_resend_pending_logs(void); ··· 868 868 int64_t opal_manage_flash(uint8_t op); 869 869 int64_t opal_update_flash(uint64_t blk_list); 870 870 int64_t opal_dump_init(uint8_t dump_type); 871 - int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size); 872 - int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type); 871 + int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size); 872 + int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type); 873 873 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); 874 874 int64_t opal_dump_ack(uint32_t dump_id); 875 875 int64_t opal_dump_resend_notification(void); 876 876 877 - int64_t opal_get_msg(uint64_t buffer, size_t size); 878 - int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token); 877 + int64_t opal_get_msg(uint64_t buffer, uint64_t size); 878 + int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token); 879 879 int64_t opal_sync_host_reboot(void); 880 880 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, 881 - size_t length); 881 + uint64_t length); 882 882 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, 883 - size_t length); 883 + uint64_t length); 884 884 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); 885 885 886 886 /* Internal functions */ 887 - extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 887 + extern int early_init_dt_scan_opal(unsigned long node, const char *uname, 888 + int depth, void *data); 888 889 extern int early_init_dt_scan_recoverable_ranges(unsigned long node, 889 890 const char *uname, int depth, void *data); 890 891 ··· 893 892 extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); 894 893 895 894 extern void hvc_opal_init_early(void); 896 - 897 - /* Internal functions */ 898 - extern int early_init_dt_scan_opal(unsigned long node, const char *uname, 899 - int depth, void *data); 900 895 901 896 extern int opal_notifier_register(struct notifier_block *nb); 902 897 extern int opal_notifier_unregister(struct notifier_block *nb); ··· 903 906 extern void opal_notifier_disable(void); 904 907 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); 905 908 906 - extern int opal_get_chars(uint32_t vtermno, char *buf, int count); 907 - extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); 908 - 909 909 extern int __opal_async_get_token(void); 910 910 extern int opal_async_get_token_interruptible(void); 911 911 extern int __opal_async_release_token(int token); 912 912 extern int opal_async_release_token(int token); 913 913 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); 914 914 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); 915 - 916 - extern void hvc_opal_init_early(void); 917 915 918 916 struct rtc_time; 919 917 extern int opal_set_rtc_time(struct rtc_time *tm); ··· 928 936 extern int opal_resync_timebase(void); 929 937 930 938 extern void opal_lpc_init(void); 939 + 940 + struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 941 + unsigned long vmalloc_size); 942 + void opal_free_sg_list(struct opal_sg_list *sg); 931 943 932 944 #endif /* __ASSEMBLY__ */ 933 945
+6 -1
arch/powerpc/include/uapi/asm/setup.h
··· 1 - #include <asm-generic/setup.h> 1 + #ifndef _UAPI_ASM_POWERPC_SETUP_H 2 + #define _UAPI_ASM_POWERPC_SETUP_H 3 + 4 + #define COMMAND_LINE_SIZE 2048 5 + 6 + #endif /* _UAPI_ASM_POWERPC_SETUP_H */
+1
arch/powerpc/kernel/ppc_ksyms.c
··· 120 120 EXPORT_SYMBOL(flush_instruction_cache); 121 121 #endif 122 122 EXPORT_SYMBOL(flush_dcache_range); 123 + EXPORT_SYMBOL(flush_icache_range); 123 124 124 125 #ifdef CONFIG_SMP 125 126 #ifdef CONFIG_PPC32
+1 -1
arch/powerpc/kernel/rtas_flash.c
··· 705 705 if (rtas_token("ibm,update-flash-64-and-reboot") == 706 706 RTAS_UNKNOWN_SERVICE) { 707 707 pr_info("rtas_flash: no firmware flash support\n"); 708 - return 1; 708 + return -EINVAL; 709 709 } 710 710 711 711 rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
+17 -1
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 242 242 */ 243 243 .globl kvm_start_guest 244 244 kvm_start_guest: 245 + 246 + /* Set runlatch bit the minute you wake up from nap */ 247 + mfspr r1, SPRN_CTRLF 248 + ori r1, r1, 1 249 + mtspr SPRN_CTRLT, r1 250 + 245 251 ld r2,PACATOC(r13) 246 252 247 253 li r0,KVM_HWTHREAD_IN_KVM ··· 315 309 li r0, KVM_HWTHREAD_IN_NAP 316 310 stb r0, HSTATE_HWTHREAD_STATE(r13) 317 311 kvm_do_nap: 312 + /* Clear the runlatch bit before napping */ 313 + mfspr r2, SPRN_CTRLF 314 + clrrdi r2, r2, 1 315 + mtspr SPRN_CTRLT, r2 316 + 318 317 li r3, LPCR_PECE0 319 318 mfspr r4, SPRN_LPCR 320 319 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 ··· 2010 1999 2011 2000 /* 2012 2001 * Take a nap until a decrementer or external or doobell interrupt 2013 - * occurs, with PECE1, PECE0 and PECEDP set in LPCR 2002 + * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the 2003 + * runlatch bit before napping. 2014 2004 */ 2005 + mfspr r2, SPRN_CTRLF 2006 + clrrdi r2, r2, 1 2007 + mtspr SPRN_CTRLT, r2 2008 + 2015 2009 li r0,1 2016 2010 stb r0,HSTATE_HWTHREAD_REQ(r13) 2017 2011 mfspr r5,SPRN_LPCR
+16 -22
arch/powerpc/mm/hash_native_64.c
··· 82 82 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); 83 83 va |= penc << 12; 84 84 va |= ssize << 8; 85 - /* Add AVAL part */ 86 - if (psize != apsize) { 87 - /* 88 - * MPSS, 64K base page size and 16MB parge page size 89 - * We don't need all the bits, but rest of the bits 90 - * must be ignored by the processor. 91 - * vpn cover upto 65 bits of va. (0...65) and we need 92 - * 58..64 bits of va. 93 - */ 94 - va |= (vpn & 0xfe); 95 - } 85 + /* 86 + * AVAL bits: 87 + * We don't need all the bits, but rest of the bits 88 + * must be ignored by the processor. 89 + * vpn cover upto 65 bits of va. (0...65) and we need 90 + * 58..64 bits of va. 91 + */ 92 + va |= (vpn & 0xfe); /* AVAL */ 96 93 va |= 1; /* L */ 97 94 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) 98 95 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) ··· 130 133 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); 131 134 va |= penc << 12; 132 135 va |= ssize << 8; 133 - /* Add AVAL part */ 134 - if (psize != apsize) { 135 - /* 136 - * MPSS, 64K base page size and 16MB parge page size 137 - * We don't need all the bits, but rest of the bits 138 - * must be ignored by the processor. 139 - * vpn cover upto 65 bits of va. (0...65) and we need 140 - * 58..64 bits of va. 141 - */ 142 - va |= (vpn & 0xfe); 143 - } 136 + /* 137 + * AVAL bits: 138 + * We don't need all the bits, but rest of the bits 139 + * must be ignored by the processor. 140 + * vpn cover upto 65 bits of va. (0...65) and we need 141 + * 58..64 bits of va. 142 + */ 143 + va |= (vpn & 0xfe); 144 144 va |= 1; /* L */ 145 145 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" 146 146 : : "r"(va) : "memory");
+25 -12
arch/powerpc/perf/hv-24x7.c
··· 155 155 return copy_len; 156 156 } 157 157 158 - static unsigned long h_get_24x7_catalog_page(char page[static 4096], 159 - u32 version, u32 index) 158 + static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096, 159 + unsigned long version, 160 + unsigned long index) 160 161 { 161 - WARN_ON(!IS_ALIGNED((unsigned long)page, 4096)); 162 - return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, 163 - virt_to_phys(page), 162 + pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)", 163 + phys_4096, 164 164 version, 165 165 index); 166 + WARN_ON(!IS_ALIGNED(phys_4096, 4096)); 167 + return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, 168 + phys_4096, 169 + version, 170 + index); 171 + } 172 + 173 + static unsigned long h_get_24x7_catalog_page(char page[], 174 + u64 version, u32 index) 175 + { 176 + return h_get_24x7_catalog_page_(virt_to_phys(page), 177 + version, index); 166 178 } 167 179 168 180 static ssize_t catalog_read(struct file *filp, struct kobject *kobj, ··· 185 173 ssize_t ret = 0; 186 174 size_t catalog_len = 0, catalog_page_len = 0, page_count = 0; 187 175 loff_t page_offset = 0; 188 - uint32_t catalog_version_num = 0; 176 + uint64_t catalog_version_num = 0; 189 177 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); 190 178 struct hv_24x7_catalog_page_0 *page_0 = page; 191 179 if (!page) ··· 197 185 goto e_free; 198 186 } 199 187 200 - catalog_version_num = be32_to_cpu(page_0->version); 188 + catalog_version_num = be64_to_cpu(page_0->version); 201 189 catalog_page_len = be32_to_cpu(page_0->length); 202 190 catalog_len = catalog_page_len * 4096; 203 191 ··· 220 208 page, 4096, page_offset * 4096); 221 209 e_free: 222 210 if (hret) 223 - pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n", 224 - catalog_version_num, page_offset, hret); 211 + pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:" 212 + " rc=%ld\n", 213 + catalog_version_num, page_offset, hret); 225 214 kfree(page); 226 215 227 216 pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n", ··· 256 243 static DEVICE_ATTR_RO(_name) 257 244 258 245 PAGE_0_ATTR(catalog_version, "%lld\n", 259 - (unsigned long long)be32_to_cpu(page_0->version)); 246 + (unsigned long long)be64_to_cpu(page_0->version)); 260 247 PAGE_0_ATTR(catalog_len, "%lld\n", 261 248 (unsigned long long)be32_to_cpu(page_0->length) * 4096); 262 249 static BIN_ATTR_RO(catalog, 0/* real length varies */); ··· 498 485 struct hv_perf_caps caps; 499 486 500 487 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 501 - pr_info("not a virtualized system, not enabling\n"); 488 + pr_debug("not a virtualized system, not enabling\n"); 502 489 return -ENODEV; 503 490 } 504 491 505 492 hret = hv_perf_caps_get(&caps); 506 493 if (hret) { 507 - pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", 494 + pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", 508 495 hret); 509 496 return -ENODEV; 510 497 }
+3 -3
arch/powerpc/perf/hv-gpci.c
··· 78 78 return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); 79 79 } 80 80 81 - DEVICE_ATTR_RO(kernel_version); 81 + static DEVICE_ATTR_RO(kernel_version); 82 82 HV_CAPS_ATTR(version, "0x%x\n"); 83 83 HV_CAPS_ATTR(ga, "%d\n"); 84 84 HV_CAPS_ATTR(expanded, "%d\n"); ··· 273 273 struct hv_perf_caps caps; 274 274 275 275 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 276 - pr_info("not a virtualized system, not enabling\n"); 276 + pr_debug("not a virtualized system, not enabling\n"); 277 277 return -ENODEV; 278 278 } 279 279 280 280 hret = hv_perf_caps_get(&caps); 281 281 if (hret) { 282 - pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", 282 + pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", 283 283 hret); 284 284 return -ENODEV; 285 285 }
+11 -83
arch/powerpc/platforms/powernv/opal-dump.c
··· 209 209 .default_attrs = dump_default_attrs, 210 210 }; 211 211 212 - static void free_dump_sg_list(struct opal_sg_list *list) 212 + static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type) 213 213 { 214 - struct opal_sg_list *sg1; 215 - while (list) { 216 - sg1 = list->next; 217 - kfree(list); 218 - list = sg1; 219 - } 220 - list = NULL; 221 - } 222 - 223 - static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump) 224 - { 225 - struct opal_sg_list *sg1, *list = NULL; 226 - void *addr; 227 - int64_t size; 228 - 229 - addr = dump->buffer; 230 - size = dump->size; 231 - 232 - sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 233 - if (!sg1) 234 - goto nomem; 235 - 236 - list = sg1; 237 - sg1->num_entries = 0; 238 - while (size > 0) { 239 - /* Translate virtual address to physical address */ 240 - sg1->entry[sg1->num_entries].data = 241 - (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); 242 - 243 - if (size > PAGE_SIZE) 244 - sg1->entry[sg1->num_entries].length = PAGE_SIZE; 245 - else 246 - sg1->entry[sg1->num_entries].length = size; 247 - 248 - sg1->num_entries++; 249 - if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { 250 - sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL); 251 - if (!sg1->next) 252 - goto nomem; 253 - 254 - sg1 = sg1->next; 255 - sg1->num_entries = 0; 256 - } 257 - addr += PAGE_SIZE; 258 - size -= PAGE_SIZE; 259 - } 260 - return list; 261 - 262 - nomem: 263 - pr_err("%s : Failed to allocate memory\n", __func__); 264 - free_dump_sg_list(list); 265 - return NULL; 266 - } 267 - 268 - static void sglist_to_phy_addr(struct opal_sg_list *list) 269 - { 270 - struct opal_sg_list *sg, *next; 271 - 272 - for (sg = list; sg; sg = next) { 273 - next = sg->next; 274 - /* Don't translate NULL pointer for last entry */ 275 - if (sg->next) 276 - sg->next = (struct opal_sg_list *)__pa(sg->next); 277 - else 278 - sg->next = NULL; 279 - 280 - /* Convert num_entries to length */ 281 - sg->num_entries = 282 - sg->num_entries * sizeof(struct opal_sg_entry) + 16; 283 - } 284 - } 285 - 286 - static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type) 287 - { 214 + __be32 id, size, type; 288 215 int rc; 289 - *type = 0xffffffff; 290 216 291 - rc = opal_dump_info2(id, size, type); 217 + type = cpu_to_be32(0xffffffff); 292 218 219 + rc = opal_dump_info2(&id, &size, &type); 293 220 if (rc == OPAL_PARAMETER) 294 - rc = opal_dump_info(id, size); 221 + rc = opal_dump_info(&id, &size); 222 + 223 + *dump_id = be32_to_cpu(id); 224 + *dump_size = be32_to_cpu(size); 225 + *dump_type = be32_to_cpu(type); 295 226 296 227 if (rc) 297 228 pr_warn("%s: Failed to get dump info (%d)\n", ··· 245 314 } 246 315 247 316 /* Generate SG list */ 248 - list = dump_data_to_sglist(dump); 317 + list = opal_vmalloc_to_sg_list(dump->buffer, dump->size); 249 318 if (!list) { 250 319 rc = -ENOMEM; 251 320 goto out; 252 321 } 253 - 254 - /* Translate sg list addr to real address */ 255 - sglist_to_phy_addr(list); 256 322 257 323 /* First entry address */ 258 324 addr = __pa(list); ··· 269 341 __func__, dump->id); 270 342 271 343 /* Free SG list */ 272 - free_dump_sg_list(list); 344 + opal_free_sg_list(list); 273 345 274 346 out: 275 347 return rc;
+9 -2
arch/powerpc/platforms/powernv/opal-elog.c
··· 238 238 239 239 static void elog_work_fn(struct work_struct *work) 240 240 { 241 - size_t elog_size; 241 + __be64 size; 242 + __be64 id; 243 + __be64 type; 244 + uint64_t elog_size; 242 245 uint64_t log_id; 243 246 uint64_t elog_type; 244 247 int rc; 245 248 char name[2+16+1]; 246 249 247 - rc = opal_get_elog_size(&log_id, &elog_size, &elog_type); 250 + rc = opal_get_elog_size(&id, &size, &type); 248 251 if (rc != OPAL_SUCCESS) { 249 252 pr_err("ELOG: Opal log read failed\n"); 250 253 return; 251 254 } 255 + 256 + elog_size = be64_to_cpu(size); 257 + log_id = be64_to_cpu(id); 258 + elog_type = be64_to_cpu(type); 252 259 253 260 BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); 254 261
+10 -108
arch/powerpc/platforms/powernv/opal-flash.c
··· 79 79 /* XXX: Assume candidate image size is <= 1GB */ 80 80 #define MAX_IMAGE_SIZE 0x40000000 81 81 82 - /* Flash sg list version */ 83 - #define SG_LIST_VERSION (1UL) 84 - 85 82 /* Image status */ 86 83 enum { 87 84 IMAGE_INVALID, ··· 128 131 */ 129 132 static inline void opal_flash_validate(void) 130 133 { 131 - struct validate_flash_t *args_buf = &validate_flash_data; 134 + long ret; 135 + void *buf = validate_flash_data.buf; 136 + __be32 size, result; 132 137 133 - args_buf->status = opal_validate_flash(__pa(args_buf->buf), 134 - &(args_buf->buf_size), 135 - &(args_buf->result)); 138 + ret = opal_validate_flash(__pa(buf), &size, &result); 139 + 140 + validate_flash_data.status = ret; 141 + validate_flash_data.buf_size = be32_to_cpu(size); 142 + validate_flash_data.result = be32_to_cpu(result); 136 143 } 137 144 138 145 /* ··· 269 268 } 270 269 271 270 /* 272 - * Free sg list 273 - */ 274 - static void free_sg_list(struct opal_sg_list *list) 275 - { 276 - struct opal_sg_list *sg1; 277 - while (list) { 278 - sg1 = list->next; 279 - kfree(list); 280 - list = sg1; 281 - } 282 - list = NULL; 283 - } 284 - 285 - /* 286 - * Build candidate image scatter gather list 287 - * 288 - * list format: 289 - * ----------------------------------- 290 - * | VER (8) | Entry length in bytes | 291 - * ----------------------------------- 292 - * | Pointer to next entry | 293 - * ----------------------------------- 294 - * | Address of memory area 1 | 295 - * ----------------------------------- 296 - * | Length of memory area 1 | 297 - * ----------------------------------- 298 - * | ......... | 299 - * ----------------------------------- 300 - * | ......... | 301 - * ----------------------------------- 302 - * | Address of memory area N | 303 - * ----------------------------------- 304 - * | Length of memory area N | 305 - * ----------------------------------- 306 - */ 307 - static struct opal_sg_list *image_data_to_sglist(void) 308 - { 309 - struct opal_sg_list *sg1, *list = NULL; 310 - void *addr; 311 - int size; 312 - 313 - addr = image_data.data; 314 - size = image_data.size; 315 - 316 - sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 317 - if (!sg1) 318 - return NULL; 319 - 320 - list = sg1; 321 - sg1->num_entries = 0; 322 - while (size > 0) { 323 - /* Translate virtual address to physical address */ 324 - sg1->entry[sg1->num_entries].data = 325 - (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); 326 - 327 - if (size > PAGE_SIZE) 328 - sg1->entry[sg1->num_entries].length = PAGE_SIZE; 329 - else 330 - sg1->entry[sg1->num_entries].length = size; 331 - 332 - sg1->num_entries++; 333 - if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { 334 - sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL); 335 - if (!sg1->next) { 336 - pr_err("%s : Failed to allocate memory\n", 337 - __func__); 338 - goto nomem; 339 - } 340 - 341 - sg1 = sg1->next; 342 - sg1->num_entries = 0; 343 - } 344 - addr += PAGE_SIZE; 345 - size -= PAGE_SIZE; 346 - } 347 - return list; 348 - nomem: 349 - free_sg_list(list); 350 - return NULL; 351 - } 352 - 353 - /* 354 271 * OPAL update flash 355 272 */ 356 273 static int opal_flash_update(int op) 357 274 { 358 - struct opal_sg_list *sg, *list, *next; 275 + struct opal_sg_list *list; 359 276 unsigned long addr; 360 277 int64_t rc = OPAL_PARAMETER; 361 278 ··· 283 364 goto flash; 284 365 } 285 366 286 - list = image_data_to_sglist(); 367 + list = opal_vmalloc_to_sg_list(image_data.data, image_data.size); 287 368 if (!list) 288 369 goto invalid_img; 289 370 290 371 /* First entry address */ 291 372 addr = __pa(list); 292 - 293 - /* Translate sg list address to absolute */ 294 - for (sg = list; sg; sg = next) { 295 - next = sg->next; 296 - /* Don't translate NULL pointer for last entry */ 297 - if (sg->next) 298 - sg->next = (struct opal_sg_list *)__pa(sg->next); 299 - else 300 - sg->next = NULL; 301 - 302 - /* 303 - * Convert num_entries to version/length format 304 - * to satisfy OPAL. 305 - */ 306 - sg->num_entries = (SG_LIST_VERSION << 56) | 307 - (sg->num_entries * sizeof(struct opal_sg_entry) + 16); 308 - } 309 373 310 374 pr_alert("FLASH: Image is %u bytes\n", image_data.size); 311 375 pr_alert("FLASH: Image update requested\n");
+23 -9
arch/powerpc/platforms/powernv/opal-sysparam.c
··· 39 39 struct kobj_attribute kobj_attr; 40 40 }; 41 41 42 - static int opal_get_sys_param(u32 param_id, u32 length, void *buffer) 42 + static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer) 43 43 { 44 44 struct opal_msg msg; 45 - int ret, token; 45 + ssize_t ret; 46 + int token; 46 47 47 48 token = opal_async_get_token_interruptible(); 48 49 if (token < 0) { ··· 60 59 61 60 ret = opal_async_wait_response(token, &msg); 62 61 if (ret) { 63 - pr_err("%s: Failed to wait for the async response, %d\n", 62 + pr_err("%s: Failed to wait for the async response, %zd\n", 64 63 __func__, ret); 65 64 goto out_token; 66 65 } ··· 112 111 { 113 112 struct param_attr *attr = container_of(kobj_attr, struct param_attr, 114 113 kobj_attr); 115 - int ret; 114 + ssize_t ret; 116 115 117 116 mutex_lock(&opal_sysparam_mutex); 118 117 ret = opal_get_sys_param(attr->param_id, attr->param_size, ··· 122 121 123 122 memcpy(buf, param_data_buf, attr->param_size); 124 123 124 + ret = attr->param_size; 125 125 out: 126 126 mutex_unlock(&opal_sysparam_mutex); 127 - return ret ? ret : attr->param_size; 127 + return ret; 128 128 } 129 129 130 130 static ssize_t sys_param_store(struct kobject *kobj, ··· 133 131 { 134 132 struct param_attr *attr = container_of(kobj_attr, struct param_attr, 135 133 kobj_attr); 136 - int ret; 134 + ssize_t ret; 135 + 136 + /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */ 137 + if (count > MAX_PARAM_DATA_LEN) 138 + count = MAX_PARAM_DATA_LEN; 137 139 138 140 mutex_lock(&opal_sysparam_mutex); 139 141 memcpy(param_data_buf, buf, count); 140 142 ret = opal_set_sys_param(attr->param_id, attr->param_size, 141 143 param_data_buf); 142 144 mutex_unlock(&opal_sysparam_mutex); 143 - return ret ? ret : count; 145 + if (!ret) 146 + ret = count; 147 + return ret; 144 148 } 145 149 146 150 void __init opal_sys_param_init(void) ··· 222 214 } 223 215 224 216 if (of_property_read_u32_array(sysparam, "param-len", size, count)) { 225 - pr_err("SYSPARAM: Missing propery param-len in the DT\n"); 217 + pr_err("SYSPARAM: Missing property param-len in the DT\n"); 226 218 goto out_free_perm; 227 219 } 228 220 229 221 230 222 if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) { 231 - pr_err("SYSPARAM: Missing propery param-perm in the DT\n"); 223 + pr_err("SYSPARAM: Missing property param-perm in the DT\n"); 232 224 goto out_free_perm; 233 225 } 234 226 ··· 241 233 242 234 /* For each of the parameters, populate the parameter attributes */ 243 235 for (i = 0; i < count; i++) { 236 + if (size[i] > MAX_PARAM_DATA_LEN) { 237 + pr_warn("SYSPARAM: Not creating parameter %d as size " 238 + "exceeds buffer length\n", i); 239 + continue; 240 + } 241 + 244 242 sysfs_attr_init(&attr[i].kobj_attr.attr); 245 243 attr[i].param_id = id[i]; 246 244 attr[i].param_size = size[i];
+66 -3
arch/powerpc/platforms/powernv/opal.c
··· 242 242 void opal_notifier_enable(void) 243 243 { 244 244 int64_t rc; 245 - uint64_t evt = 0; 245 + __be64 evt = 0; 246 246 247 247 atomic_set(&opal_notifier_hold, 0); 248 248 249 249 /* Process pending events */ 250 250 rc = opal_poll_events(&evt); 251 251 if (rc == OPAL_SUCCESS && evt) 252 - opal_do_notifier(evt); 252 + opal_do_notifier(be64_to_cpu(evt)); 253 253 } 254 254 255 255 void opal_notifier_disable(void) ··· 529 529 530 530 opal_handle_interrupt(virq_to_hw(irq), &events); 531 531 532 - opal_do_notifier(events); 532 + opal_do_notifier(be64_to_cpu(events)); 533 533 534 534 return IRQ_HANDLED; 535 535 } ··· 638 638 639 639 /* Export this so that test modules can use it */ 640 640 EXPORT_SYMBOL_GPL(opal_invalid_call); 641 + 642 + /* Convert a region of vmalloc memory to an opal sg list */ 643 + struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 644 + unsigned long vmalloc_size) 645 + { 646 + struct opal_sg_list *sg, *first = NULL; 647 + unsigned long i = 0; 648 + 649 + sg = kzalloc(PAGE_SIZE, GFP_KERNEL); 650 + if (!sg) 651 + goto nomem; 652 + 653 + first = sg; 654 + 655 + while (vmalloc_size > 0) { 656 + uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; 657 + uint64_t length = min(vmalloc_size, PAGE_SIZE); 658 + 659 + sg->entry[i].data = cpu_to_be64(data); 660 + sg->entry[i].length = cpu_to_be64(length); 661 + i++; 662 + 663 + if (i >= SG_ENTRIES_PER_NODE) { 664 + struct opal_sg_list *next; 665 + 666 + next = kzalloc(PAGE_SIZE, GFP_KERNEL); 667 + if (!next) 668 + goto nomem; 669 + 670 + sg->length = cpu_to_be64( 671 + i * sizeof(struct opal_sg_entry) + 16); 672 + i = 0; 673 + sg->next = cpu_to_be64(__pa(next)); 674 + sg = next; 675 + } 676 + 677 + vmalloc_addr += length; 678 + vmalloc_size -= length; 679 + } 680 + 681 + sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); 682 + 683 + return first; 684 + 685 + nomem: 686 + pr_err("%s : Failed to allocate memory\n", __func__); 687 + opal_free_sg_list(first); 688 + return NULL; 689 + } 690 + 691 + void opal_free_sg_list(struct opal_sg_list *sg) 692 + { 693 + while (sg) { 694 + uint64_t next = be64_to_cpu(sg->next); 695 + 696 + kfree(sg); 697 + 698 + if (next) 699 + sg = __va(next); 700 + else 701 + sg = NULL; 702 + } 703 + }
+1 -2
arch/powerpc/platforms/powernv/pci-ioda.c
··· 343 343 pci_name(dev)); 344 344 continue; 345 345 } 346 - pci_dev_get(dev); 347 346 pdn->pcidev = dev; 348 347 pdn->pe_number = pe->pe_number; 349 348 pe->dma_weight += pnv_ioda_dma_weight(dev); ··· 461 462 462 463 pe = &phb->ioda.pe_array[pdn->pe_number]; 463 464 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); 464 - set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); 465 + set_iommu_table_base(&pdev->dev, &pe->tce32_table); 465 466 } 466 467 467 468 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+46 -2
arch/powerpc/platforms/powernv/setup.c
··· 162 162 } 163 163 164 164 #ifdef CONFIG_KEXEC 165 + static void pnv_kexec_wait_secondaries_down(void) 166 + { 167 + int my_cpu, i, notified = -1; 168 + 169 + my_cpu = get_cpu(); 170 + 171 + for_each_online_cpu(i) { 172 + uint8_t status; 173 + int64_t rc; 174 + 175 + if (i == my_cpu) 176 + continue; 177 + 178 + for (;;) { 179 + rc = opal_query_cpu_status(get_hard_smp_processor_id(i), 180 + &status); 181 + if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED) 182 + break; 183 + barrier(); 184 + if (i != notified) { 185 + printk(KERN_INFO "kexec: waiting for cpu %d " 186 + "(physical %d) to enter OPAL\n", 187 + i, paca[i].hw_cpu_id); 188 + notified = i; 189 + } 190 + } 191 + } 192 + } 193 + 165 194 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) 166 195 { 167 196 xics_kexec_teardown_cpu(secondary); 168 197 169 - /* Return secondary CPUs to firmware on OPAL v3 */ 170 - if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) { 198 + /* On OPAL v3, we return all CPUs to firmware */ 199 + 200 + if (!firmware_has_feature(FW_FEATURE_OPALv3)) 201 + return; 202 + 203 + if (secondary) { 204 + /* Return secondary CPUs to firmware on OPAL v3 */ 171 205 mb(); 172 206 get_paca()->kexec_state = KEXEC_STATE_REAL_MODE; 173 207 mb(); 174 208 175 209 /* Return the CPU to OPAL */ 176 210 opal_return_cpu(); 211 + } else if (crash_shutdown) { 212 + /* 213 + * On crash, we don't wait for secondaries to go 214 + * down as they might be unreachable or hung, so 215 + * instead we just wait a bit and move on. 216 + */ 217 + mdelay(1); 218 + } else { 219 + /* Primary waits for the secondaries to have reached OPAL */ 220 + pnv_kexec_wait_secondaries_down(); 177 221 } 178 222 } 179 223 #endif /* CONFIG_KEXEC */
+3
arch/powerpc/platforms/powernv/smp.c
··· 30 30 #include <asm/cputhreads.h> 31 31 #include <asm/xics.h> 32 32 #include <asm/opal.h> 33 + #include <asm/runlatch.h> 33 34 34 35 #include "powernv.h" 35 36 ··· 157 156 */ 158 157 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 159 158 while (!generic_check_cpu_restart(cpu)) { 159 + ppc64_runlatch_off(); 160 160 power7_nap(); 161 + ppc64_runlatch_on(); 161 162 if (!generic_check_cpu_restart(cpu)) { 162 163 DBG("CPU%d Unexpected exit while offline !\n", cpu); 163 164 /* We may be getting an IPI, so we re-enable
+3 -2
arch/powerpc/platforms/pseries/hotplug-cpu.c
··· 88 88 89 89 static void rtas_stop_self(void) 90 90 { 91 - struct rtas_args args = { 92 - .token = cpu_to_be32(rtas_stop_self_token), 91 + static struct rtas_args args = { 93 92 .nargs = 0, 94 93 .nret = 1, 95 94 .rets = &args.args[0], 96 95 }; 96 + 97 + args.token = cpu_to_be32(rtas_stop_self_token); 97 98 98 99 local_irq_disable(); 99 100
+6 -4
arch/powerpc/platforms/pseries/hotplug-memory.c
··· 100 100 101 101 start_pfn = base >> PAGE_SHIFT; 102 102 103 - if (!pfn_valid(start_pfn)) { 104 - memblock_remove(base, memblock_size); 105 - return 0; 106 - } 103 + lock_device_hotplug(); 104 + 105 + if (!pfn_valid(start_pfn)) 106 + goto out; 107 107 108 108 block_sz = memory_block_size_bytes(); 109 109 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; ··· 114 114 base += MIN_MEMORY_BLOCK_SIZE; 115 115 } 116 116 117 + out: 117 118 /* Update memory regions for memory remove */ 118 119 memblock_remove(base, memblock_size); 120 + unlock_device_hotplug(); 119 121 return 0; 120 122 } 121 123
+1 -1
arch/powerpc/sysdev/ppc4xx_pci.c
··· 1058 1058 return 1; 1059 1059 } 1060 1060 1061 - static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) 1061 + static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) 1062 1062 { 1063 1063 u32 val; 1064 1064
-1
arch/s390/net/bpf_jit_comp.c
··· 276 276 case BPF_S_LD_W_IND: 277 277 case BPF_S_LD_H_IND: 278 278 case BPF_S_LD_B_IND: 279 - case BPF_S_LDX_B_MSH: 280 279 case BPF_S_LD_IMM: 281 280 case BPF_S_LD_MEM: 282 281 case BPF_S_MISC_TXA:
+46 -37
arch/sparc/include/asm/pgtable_64.h
··· 71 71 72 72 #include <linux/sched.h> 73 73 74 + extern unsigned long sparc64_valid_addr_bitmap[]; 75 + 76 + /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 77 + static inline bool __kern_addr_valid(unsigned long paddr) 78 + { 79 + if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) 80 + return false; 81 + return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); 82 + } 83 + 84 + static inline bool kern_addr_valid(unsigned long addr) 85 + { 86 + unsigned long paddr = __pa(addr); 87 + 88 + return __kern_addr_valid(paddr); 89 + } 90 + 74 91 /* Entries per page directory level. */ 75 92 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) 76 93 #define PTRS_PER_PMD (1UL << PMD_BITS) ··· 96 79 /* Kernel has a separate 44bit address space. */ 97 80 #define FIRST_USER_ADDRESS 0 98 81 99 - #define pte_ERROR(e) __builtin_trap() 100 - #define pmd_ERROR(e) __builtin_trap() 101 - #define pgd_ERROR(e) __builtin_trap() 82 + #define pmd_ERROR(e) \ 83 + pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ 84 + __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) 85 + #define pgd_ERROR(e) \ 86 + pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ 87 + __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) 102 88 103 89 #endif /* !(__ASSEMBLY__) */ 104 90 ··· 278 258 { 279 259 unsigned long mask, tmp; 280 260 281 - /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) 282 - * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) 261 + /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7) 262 + * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8) 283 263 * 284 264 * Even if we use negation tricks the result is still a 6 285 265 * instruction sequence, so don't try to play fancy and just ··· 309 289 " .previous\n" 310 290 : "=r" (mask), "=r" (tmp) 311 291 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | 312 - _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | 292 + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | 313 293 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), 314 294 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 315 - _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | 295 + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | 316 296 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); 317 297 318 298 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); ··· 653 633 { 654 634 pte_t pte = __pte(pmd_val(pmd)); 655 635 656 - return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); 636 + return pte_val(pte) & _PAGE_PMD_HUGE; 657 637 } 658 638 659 639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE ··· 739 719 return __pmd(pte_val(pte)); 740 720 } 741 721 742 - static inline pmd_t pmd_mknotpresent(pmd_t pmd) 743 - { 744 - unsigned long mask; 745 - 746 - if (tlb_type == hypervisor) 747 - mask = _PAGE_PRESENT_4V; 748 - else 749 - mask = _PAGE_PRESENT_4U; 750 - 751 - pmd_val(pmd) &= ~mask; 752 - 753 - return pmd; 754 - } 755 - 756 722 static inline pmd_t pmd_mksplitting(pmd_t pmd) 757 723 { 758 724 pte_t pte = __pte(pmd_val(pmd)); ··· 762 756 } 763 757 764 758 #define pmd_none(pmd) (!pmd_val(pmd)) 759 + 760 + /* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is 761 + * very simple, it's just the physical address. PTE tables are of 762 + * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and 763 + * the top bits outside of the range of any physical address size we 764 + * support are clear as well. We also validate the physical itself. 765 + */ 766 + #define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ 767 + !__kern_addr_valid(pmd_val(pmd))) 768 + 769 + #define pud_none(pud) (!pud_val(pud)) 770 + 771 + #define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ 772 + !__kern_addr_valid(pud_val(pud))) 765 773 766 774 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 767 775 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, ··· 810 790 #define pud_page_vaddr(pud) \ 811 791 ((unsigned long) __va(pud_val(pud))) 812 792 #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) 813 - #define pmd_bad(pmd) (0) 814 793 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) 815 - #define pud_none(pud) (!pud_val(pud)) 816 - #define pud_bad(pud) (0) 817 794 #define pud_present(pud) (pud_val(pud) != 0U) 818 795 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 819 796 ··· 910 893 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 911 894 pmd_t *pmd); 912 895 896 + #define __HAVE_ARCH_PMDP_INVALIDATE 897 + extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 898 + pmd_t *pmdp); 899 + 913 900 #define __HAVE_ARCH_PGTABLE_DEPOSIT 914 901 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 915 902 pgtable_t pgtable); ··· 939 918 #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) 940 919 extern pte_t pgoff_to_pte(unsigned long); 941 920 #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 942 - 943 - extern unsigned long sparc64_valid_addr_bitmap[]; 944 - 945 - /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 946 - static inline bool kern_addr_valid(unsigned long addr) 947 - { 948 - unsigned long paddr = __pa(addr); 949 - 950 - if ((paddr >> 41UL) != 0UL) 951 - return false; 952 - return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); 953 - } 954 921 955 922 extern int page_in_phys_avail(unsigned long paddr); 956 923
+2 -1
arch/sparc/include/asm/tsb.h
··· 171 171 andcc REG1, REG2, %g0; \ 172 172 be,pt %xcc, 700f; \ 173 173 sethi %hi(4 * 1024 * 1024), REG2; \ 174 - andn REG1, REG2, REG1; \ 174 + brgez,pn REG1, FAIL_LABEL; \ 175 + andn REG1, REG2, REG1; \ 175 176 and VADDR, REG2, REG2; \ 176 177 brlz,pt REG1, PTE_LABEL; \ 177 178 or REG1, REG2, REG1; \
+2 -2
arch/sparc/kernel/head_64.S
··· 282 282 stx %l2, [%l4 + 0x0] 283 283 ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low 284 284 /* 4MB align */ 285 - srlx %l3, 22, %l3 286 - sllx %l3, 22, %l3 285 + srlx %l3, ILOG2_4MB, %l3 286 + sllx %l3, ILOG2_4MB, %l3 287 287 stx %l3, [%l4 + 0x8] 288 288 289 289 /* Leave service as-is, "call-method" */
+1 -1
arch/sparc/kernel/ktlb.S
··· 277 277 #ifdef CONFIG_SPARSEMEM_VMEMMAP 278 278 kvmap_vmemmap: 279 279 sub %g4, %g5, %g5 280 - srlx %g5, 22, %g5 280 + srlx %g5, ILOG2_4MB, %g5 281 281 sethi %hi(vmemmap_table), %g1 282 282 sllx %g5, 3, %g5 283 283 or %g1, %lo(vmemmap_table), %g1
+5 -16
arch/sparc/kernel/nmi.c
··· 68 68 69 69 static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) 70 70 { 71 + int this_cpu = smp_processor_id(); 72 + 71 73 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 72 74 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 73 75 return; 74 76 75 - console_verbose(); 76 - bust_spinlocks(1); 77 - 78 - printk(KERN_EMERG "%s", str); 79 - printk(" on CPU%d, ip %08lx, registers:\n", 80 - smp_processor_id(), regs->tpc); 81 - show_regs(regs); 82 - dump_stack(); 83 - 84 - bust_spinlocks(0); 85 - 86 77 if (do_panic || panic_on_oops) 87 - panic("Non maskable interrupt"); 88 - 89 - nmi_exit(); 90 - local_irq_enable(); 91 - do_exit(SIGBUS); 78 + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); 79 + else 80 + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 92 81 } 93 82 94 83 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
+3 -3
arch/sparc/kernel/smp_64.c
··· 149 149 #define NUM_ROUNDS 64 /* magic value */ 150 150 #define NUM_ITERS 5 /* likewise */ 151 151 152 - static DEFINE_SPINLOCK(itc_sync_lock); 152 + static DEFINE_RAW_SPINLOCK(itc_sync_lock); 153 153 static unsigned long go[SLAVE + 1]; 154 154 155 155 #define DEBUG_TICK_SYNC 0 ··· 257 257 go[MASTER] = 0; 258 258 membar_safe("#StoreLoad"); 259 259 260 - spin_lock_irqsave(&itc_sync_lock, flags); 260 + raw_spin_lock_irqsave(&itc_sync_lock, flags); 261 261 { 262 262 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 263 263 while (!go[MASTER]) ··· 268 268 membar_safe("#StoreLoad"); 269 269 } 270 270 } 271 - spin_unlock_irqrestore(&itc_sync_lock, flags); 271 + raw_spin_unlock_irqrestore(&itc_sync_lock, flags); 272 272 } 273 273 274 274 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+1 -1
arch/sparc/kernel/sys32.S
··· 44 44 SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) 45 45 SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) 46 46 SIGN1(sys32_select, compat_sys_select, %o0) 47 - SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) 47 + SIGN1(sys32_futex, compat_sys_futex, %o1) 48 48 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 49 49 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 50 50 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+9 -3
arch/sparc/kernel/unaligned_64.c
··· 166 166 unsigned long compute_effective_address(struct pt_regs *regs, 167 167 unsigned int insn, unsigned int rd) 168 168 { 169 + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 169 170 unsigned int rs1 = (insn >> 14) & 0x1f; 170 171 unsigned int rs2 = insn & 0x1f; 171 - int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 172 + unsigned long addr; 172 173 173 174 if (insn & 0x2000) { 174 175 maybe_flush_windows(rs1, 0, rd, from_kernel); 175 - return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); 176 + addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); 176 177 } else { 177 178 maybe_flush_windows(rs1, rs2, rd, from_kernel); 178 - return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); 179 + addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); 179 180 } 181 + 182 + if (!from_kernel && test_thread_flag(TIF_32BIT)) 183 + addr &= 0xffffffff; 184 + 185 + return addr; 180 186 } 181 187 182 188 /* This is just to make gcc think die_if_kernel does return... */
+53 -31
arch/sparc/mm/fault_64.c
··· 96 96 pte_t *ptep, pte; 97 97 unsigned long pa; 98 98 u32 insn = 0; 99 - unsigned long pstate; 100 99 101 - if (pgd_none(*pgdp)) 102 - goto outret; 100 + if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) 101 + goto out; 103 102 pudp = pud_offset(pgdp, tpc); 104 - if (pud_none(*pudp)) 105 - goto outret; 106 - pmdp = pmd_offset(pudp, tpc); 107 - if (pmd_none(*pmdp)) 108 - goto outret; 109 - 110 - /* This disables preemption for us as well. */ 111 - __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 112 - __asm__ __volatile__("wrpr %0, %1, %%pstate" 113 - : : "r" (pstate), "i" (PSTATE_IE)); 114 - ptep = pte_offset_map(pmdp, tpc); 115 - pte = *ptep; 116 - if (!pte_present(pte)) 103 + if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) 117 104 goto out; 118 105 119 - pa = (pte_pfn(pte) << PAGE_SHIFT); 120 - pa += (tpc & ~PAGE_MASK); 106 + /* This disables preemption for us as well. */ 107 + local_irq_disable(); 121 108 122 - /* Use phys bypass so we don't pollute dtlb/dcache. */ 123 - __asm__ __volatile__("lduwa [%1] %2, %0" 124 - : "=r" (insn) 125 - : "r" (pa), "i" (ASI_PHYS_USE_EC)); 109 + pmdp = pmd_offset(pudp, tpc); 110 + if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) 111 + goto out_irq_enable; 126 112 113 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 114 + if (pmd_trans_huge(*pmdp)) { 115 + if (pmd_trans_splitting(*pmdp)) 116 + goto out_irq_enable; 117 + 118 + pa = pmd_pfn(*pmdp) << PAGE_SHIFT; 119 + pa += tpc & ~HPAGE_MASK; 120 + 121 + /* Use phys bypass so we don't pollute dtlb/dcache. */ 122 + __asm__ __volatile__("lduwa [%1] %2, %0" 123 + : "=r" (insn) 124 + : "r" (pa), "i" (ASI_PHYS_USE_EC)); 125 + } else 126 + #endif 127 + { 128 + ptep = pte_offset_map(pmdp, tpc); 129 + pte = *ptep; 130 + if (pte_present(pte)) { 131 + pa = (pte_pfn(pte) << PAGE_SHIFT); 132 + pa += (tpc & ~PAGE_MASK); 133 + 134 + /* Use phys bypass so we don't pollute dtlb/dcache. */ 135 + __asm__ __volatile__("lduwa [%1] %2, %0" 136 + : "=r" (insn) 137 + : "r" (pa), "i" (ASI_PHYS_USE_EC)); 138 + } 139 + pte_unmap(ptep); 140 + } 141 + out_irq_enable: 142 + local_irq_enable(); 127 143 out: 128 - pte_unmap(ptep); 129 - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 130 - outret: 131 144 return insn; 132 145 } 133 146 ··· 166 153 } 167 154 168 155 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, 169 - unsigned int insn, int fault_code) 156 + unsigned long fault_addr, unsigned int insn, 157 + int fault_code) 170 158 { 171 159 unsigned long addr; 172 160 siginfo_t info; ··· 175 161 info.si_code = code; 176 162 info.si_signo = sig; 177 163 info.si_errno = 0; 178 - if (fault_code & FAULT_CODE_ITLB) 164 + if (fault_code & FAULT_CODE_ITLB) { 179 165 addr = regs->tpc; 180 - else 181 - addr = compute_effective_address(regs, insn, 0); 166 + } else { 167 + /* If we were able to probe the faulting instruction, use it 168 + * to compute a precise fault address. Otherwise use the fault 169 + * time provided address which may only have page granularity. 170 + */ 171 + if (insn) 172 + addr = compute_effective_address(regs, insn, 0); 173 + else 174 + addr = fault_addr; 175 + } 182 176 info.si_addr = (void __user *) addr; 183 177 info.si_trapno = 0; 184 178 ··· 261 239 /* The si_code was set to make clear whether 262 240 * this was a SEGV_MAPERR or SEGV_ACCERR fault. 263 241 */ 264 - do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); 242 + do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); 265 243 return; 266 244 } 267 245 ··· 547 525 * Send a sigbus, regardless of whether we were in kernel 548 526 * or user mode. 549 527 */ 550 - do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); 528 + do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); 551 529 552 530 /* Kernel mode? Handle exceptions or die */ 553 531 if (regs->tstate & TSTATE_PRIV)
+1 -1
arch/sparc/mm/gup.c
··· 73 73 struct page *head, *page, *tail; 74 74 int refs; 75 75 76 - if (!pmd_large(pmd)) 76 + if (!(pmd_val(pmd) & _PAGE_VALID)) 77 77 return 0; 78 78 79 79 if (write && !pmd_write(pmd))
+6 -6
arch/sparc/mm/init_64.c
··· 588 588 int i, tlb_ent = sparc64_highest_locked_tlbent(); 589 589 590 590 tte_vaddr = (unsigned long) KERNBASE; 591 - phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 591 + phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; 592 592 tte_data = kern_large_tte(phys_page); 593 593 594 594 kern_locked_tte_data = tte_data; ··· 1881 1881 1882 1882 BUILD_BUG_ON(NR_CPUS > 4096); 1883 1883 1884 - kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1884 + kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; 1885 1885 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1886 1886 1887 1887 /* Invalidate both kernel TSBs. */ ··· 1937 1937 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1938 1938 1939 1939 real_end = (unsigned long)_end; 1940 - num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1940 + num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); 1941 1941 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1942 1942 num_kernel_image_mappings); 1943 1943 ··· 2094 2094 2095 2095 if (new_start <= old_start && 2096 2096 new_end >= (old_start + PAGE_SIZE)) { 2097 - set_bit(old_start >> 22, bitmap); 2097 + set_bit(old_start >> ILOG2_4MB, bitmap); 2098 2098 goto do_next_page; 2099 2099 } 2100 2100 } ··· 2143 2143 addr = PAGE_OFFSET + kern_base; 2144 2144 last = PAGE_ALIGN(kern_size) + addr; 2145 2145 while (addr < last) { 2146 - set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); 2146 + set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap); 2147 2147 addr += PAGE_SIZE; 2148 2148 } 2149 2149 ··· 2267 2267 void *block; 2268 2268 2269 2269 if (!(*vmem_pp & _PAGE_VALID)) { 2270 - block = vmemmap_alloc_block(1UL << 22, node); 2270 + block = vmemmap_alloc_block(1UL << ILOG2_4MB, node); 2271 2271 if (!block) 2272 2272 return -ENOMEM; 2273 2273
+20 -6
arch/sparc/mm/tlb.c
··· 134 134 135 135 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 136 136 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, 137 - pmd_t pmd, bool exec) 137 + pmd_t pmd) 138 138 { 139 139 unsigned long end; 140 140 pte_t *pte; ··· 142 142 pte = pte_offset_map(&pmd, vaddr); 143 143 end = vaddr + HPAGE_SIZE; 144 144 while (vaddr < end) { 145 - if (pte_val(*pte) & _PAGE_VALID) 145 + if (pte_val(*pte) & _PAGE_VALID) { 146 + bool exec = pte_exec(*pte); 147 + 146 148 tlb_batch_add_one(mm, vaddr, exec); 149 + } 147 150 pte++; 148 151 vaddr += PAGE_SIZE; 149 152 } ··· 180 177 } 181 178 182 179 if (!pmd_none(orig)) { 183 - pte_t orig_pte = __pte(pmd_val(orig)); 184 - bool exec = pte_exec(orig_pte); 185 - 186 180 addr &= HPAGE_MASK; 187 181 if (pmd_trans_huge(orig)) { 182 + pte_t orig_pte = __pte(pmd_val(orig)); 183 + bool exec = pte_exec(orig_pte); 184 + 188 185 tlb_batch_add_one(mm, addr, exec); 189 186 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); 190 187 } else { 191 - tlb_batch_pmd_scan(mm, addr, orig, exec); 188 + tlb_batch_pmd_scan(mm, addr, orig); 192 189 } 193 190 } 191 + } 192 + 193 + void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 194 + pmd_t *pmdp) 195 + { 196 + pmd_t entry = *pmdp; 197 + 198 + pmd_val(entry) &= ~_PAGE_VALID; 199 + 200 + set_pmd_at(vma->vm_mm, address, pmdp, entry); 201 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 194 202 } 195 203 196 204 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+4 -1
arch/x86/Makefile
··· 79 79 UTS_MACHINE := x86_64 80 80 CHECKFLAGS += -D__x86_64__ -m64 81 81 82 + biarch := -m64 82 83 KBUILD_AFLAGS += -m64 83 84 KBUILD_CFLAGS += -m64 84 85 85 86 # Don't autogenerate traditional x87, MMX or SSE instructions 86 - KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387 87 + KBUILD_CFLAGS += -mno-mmx -mno-sse 88 + KBUILD_CFLAGS += $(call cc-option,-mno-80387) 89 + KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387) 87 90 88 91 # Use -mpreferred-stack-boundary=3 if supported. 89 92 KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+2 -2
arch/x86/boot/Makefile
··· 71 71 72 72 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) 73 73 74 - sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p' 74 + sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p' 75 75 76 76 quiet_cmd_voffset = VOFFSET $@ 77 77 cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@ ··· 80 80 $(obj)/voffset.h: vmlinux FORCE 81 81 $(call if_changed,voffset) 82 82 83 - sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 83 + sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 84 84 85 85 quiet_cmd_zoffset = ZOFFSET $@ 86 86 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+1 -1
arch/x86/boot/compressed/misc.c
··· 354 354 free(phdrs); 355 355 } 356 356 357 - asmlinkage void *decompress_kernel(void *rmode, memptr heap, 357 + asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, 358 358 unsigned char *input_data, 359 359 unsigned long input_len, 360 360 unsigned char *output,
+1
arch/x86/include/asm/hpet.h
··· 63 63 /* hpet memory map physical address */ 64 64 extern unsigned long hpet_address; 65 65 extern unsigned long force_hpet_address; 66 + extern int boot_hpet_disable; 66 67 extern u8 hpet_blockid; 67 68 extern int hpet_force_user; 68 69 extern u8 hpet_msi_disable;
+1 -1
arch/x86/include/uapi/asm/msr-index.h
··· 384 384 #define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 385 385 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) 386 386 #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 387 - #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT); 387 + #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) 388 388 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 389 389 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) 390 390 #define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
+1 -1
arch/x86/kernel/acpi/sleep.c
··· 31 31 * 32 32 * Wrapper around acpi_enter_sleep_state() to be called by assmebly. 33 33 */ 34 - acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state) 34 + acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state) 35 35 { 36 36 return acpi_enter_sleep_state(state); 37 37 }
+6 -1
arch/x86/kernel/apic/io_apic.c
··· 2189 2189 cfg->move_in_progress = 0; 2190 2190 } 2191 2191 2192 - asmlinkage void smp_irq_move_cleanup_interrupt(void) 2192 + asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) 2193 2193 { 2194 2194 unsigned vector, me; 2195 2195 ··· 3423 3423 int get_nr_irqs_gsi(void) 3424 3424 { 3425 3425 return nr_irqs_gsi; 3426 + } 3427 + 3428 + unsigned int arch_dynirq_lower_bound(unsigned int from) 3429 + { 3430 + return from < nr_irqs_gsi ? nr_irqs_gsi : from; 3426 3431 } 3427 3432 3428 3433 int __init arch_probe_nr_irqs(void)
+2 -2
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 429 429 smp_thermal_vector(); 430 430 } 431 431 432 - asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) 432 + asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs) 433 433 { 434 434 entering_irq(); 435 435 __smp_thermal_interrupt(); 436 436 exiting_ack_irq(); 437 437 } 438 438 439 - asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs) 439 + asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs) 440 440 { 441 441 entering_irq(); 442 442 trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
+2 -2
arch/x86/kernel/cpu/mcheck/threshold.c
··· 24 24 mce_threshold_vector(); 25 25 } 26 26 27 - asmlinkage void smp_threshold_interrupt(void) 27 + asmlinkage __visible void smp_threshold_interrupt(void) 28 28 { 29 29 entering_irq(); 30 30 __smp_threshold_interrupt(); 31 31 exiting_ack_irq(); 32 32 } 33 33 34 - asmlinkage void smp_trace_threshold_interrupt(void) 34 + asmlinkage __visible void smp_trace_threshold_interrupt(void) 35 35 { 36 36 entering_irq(); 37 37 trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
+2 -1
arch/x86/kernel/cpu/perf_event_intel_rapl.c
··· 543 543 if (phys_id < 0) 544 544 return -1; 545 545 546 - if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) 546 + /* protect rdmsrl() to handle virtualization */ 547 + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) 547 548 return -1; 548 549 549 550 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+16
arch/x86/kernel/early-quirks.c
··· 17 17 #include <asm/dma.h> 18 18 #include <asm/io_apic.h> 19 19 #include <asm/apic.h> 20 + #include <asm/hpet.h> 20 21 #include <asm/iommu.h> 21 22 #include <asm/gart.h> 22 23 #include <asm/irq_remapping.h> ··· 531 530 } 532 531 } 533 532 533 + static void __init force_disable_hpet(int num, int slot, int func) 534 + { 535 + #ifdef CONFIG_HPET_TIMER 536 + boot_hpet_disable = 1; 537 + pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); 538 + #endif 539 + } 540 + 541 + 534 542 #define QFLAG_APPLY_ONCE 0x1 535 543 #define QFLAG_APPLIED 0x2 536 544 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) ··· 577 567 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 578 568 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 579 569 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 570 + /* 571 + * HPET on current version of Baytrail platform has accuracy 572 + * problems, disable it for now: 573 + */ 574 + { PCI_VENDOR_ID_INTEL, 0x0f00, 575 + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, 580 576 {} 581 577 }; 582 578
+1 -1
arch/x86/kernel/head32.c
··· 29 29 reserve_ebda_region(); 30 30 } 31 31 32 - asmlinkage void __init i386_start_kernel(void) 32 + asmlinkage __visible void __init i386_start_kernel(void) 33 33 { 34 34 sanitize_boot_params(&boot_params); 35 35
+1 -1
arch/x86/kernel/head64.c
··· 137 137 } 138 138 } 139 139 140 - asmlinkage void __init x86_64_start_kernel(char * real_mode_data) 140 + asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) 141 141 { 142 142 int i; 143 143
+1 -1
arch/x86/kernel/hpet.c
··· 88 88 /* 89 89 * HPET command line enable / disable 90 90 */ 91 - static int boot_hpet_disable; 91 + int boot_hpet_disable; 92 92 int hpet_force_user; 93 93 static int hpet_verbose; 94 94
+1 -1
arch/x86/kernel/process_64.c
··· 52 52 53 53 asmlinkage extern void ret_from_fork(void); 54 54 55 - asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp); 55 + __visible DEFINE_PER_CPU(unsigned long, old_rsp); 56 56 57 57 /* Prints also some state that isn't saved in the pt_regs */ 58 58 void __show_regs(struct pt_regs *regs, int all)
+10
arch/x86/kernel/reboot.c
··· 191 191 }, 192 192 }, 193 193 194 + /* Certec */ 195 + { /* Handle problems with rebooting on Certec BPC600 */ 196 + .callback = set_pci_reboot, 197 + .ident = "Certec BPC600", 198 + .matches = { 199 + DMI_MATCH(DMI_SYS_VENDOR, "Certec"), 200 + DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"), 201 + }, 202 + }, 203 + 194 204 /* Dell */ 195 205 { /* Handle problems with rebooting on Dell DXP061 */ 196 206 .callback = set_bios_reboot,
+1 -1
arch/x86/kernel/smp.c
··· 168 168 * this function calls the 'stop' function on all other CPUs in the system. 169 169 */ 170 170 171 - asmlinkage void smp_reboot_interrupt(void) 171 + asmlinkage __visible void smp_reboot_interrupt(void) 172 172 { 173 173 ack_APIC_irq(); 174 174 irq_enter();
+3 -3
arch/x86/kernel/traps.c
··· 357 357 * for scheduling or signal handling. The actual stack switch is done in 358 358 * entry.S 359 359 */ 360 - asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 360 + asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 361 361 { 362 362 struct pt_regs *regs = eregs; 363 363 /* Did already sync */ ··· 601 601 #endif 602 602 } 603 603 604 - asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 604 + asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) 605 605 { 606 606 } 607 607 608 - asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 608 + asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) 609 609 { 610 610 } 611 611
+13 -4
arch/x86/kernel/vsmp_64.c
··· 26 26 27 27 #define TOPOLOGY_REGISTER_OFFSET 0x10 28 28 29 + /* Flag below is initialized once during vSMP PCI initialization. */ 30 + static int irq_routing_comply = 1; 31 + 29 32 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT 30 33 /* 31 34 * Interrupt control on vSMPowered systems: ··· 36 33 * and vice versa. 37 34 */ 38 35 39 - asmlinkage unsigned long vsmp_save_fl(void) 36 + asmlinkage __visible unsigned long vsmp_save_fl(void) 40 37 { 41 38 unsigned long flags = native_save_fl(); 42 39 ··· 56 53 } 57 54 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); 58 55 59 - asmlinkage void vsmp_irq_disable(void) 56 + asmlinkage __visible void vsmp_irq_disable(void) 60 57 { 61 58 unsigned long flags = native_save_fl(); 62 59 ··· 64 61 } 65 62 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); 66 63 67 - asmlinkage void vsmp_irq_enable(void) 64 + asmlinkage __visible void vsmp_irq_enable(void) 68 65 { 69 66 unsigned long flags = native_save_fl(); 70 67 ··· 104 101 #ifdef CONFIG_SMP 105 102 if (cap & ctl & BIT(8)) { 106 103 ctl &= ~BIT(8); 104 + 105 + /* Interrupt routing set to ignore */ 106 + irq_routing_comply = 0; 107 + 107 108 #ifdef CONFIG_PROC_FS 108 109 /* Don't let users change irq affinity via procfs */ 109 110 no_irq_affinity = 1; ··· 225 218 { 226 219 /* need to update phys_pkg_id */ 227 220 apic->phys_pkg_id = apicid_phys_pkg_id; 228 - apic->vector_allocation_domain = fill_vector_allocation_domain; 221 + 222 + if (!irq_routing_comply) 223 + apic->vector_allocation_domain = fill_vector_allocation_domain; 229 224 } 230 225 231 226 void __init vsmp_init(void)
+1 -1
arch/x86/kernel/vsyscall_gtod.c
··· 43 43 vdata->monotonic_time_sec = tk->xtime_sec 44 44 + tk->wall_to_monotonic.tv_sec; 45 45 vdata->monotonic_time_snsec = tk->xtime_nsec 46 - + (tk->wall_to_monotonic.tv_nsec 46 + + ((u64)tk->wall_to_monotonic.tv_nsec 47 47 << tk->shift); 48 48 while (vdata->monotonic_time_snsec >= 49 49 (((u64)NSEC_PER_SEC) << tk->shift)) {
+41 -12
arch/x86/kvm/vmx.c
··· 503 503 [number##_HIGH] = VMCS12_OFFSET(name)+4 504 504 505 505 506 - static const unsigned long shadow_read_only_fields[] = { 506 + static unsigned long shadow_read_only_fields[] = { 507 507 /* 508 508 * We do NOT shadow fields that are modified when L0 509 509 * traps and emulates any vmx instruction (e.g. VMPTRLD, ··· 526 526 GUEST_LINEAR_ADDRESS, 527 527 GUEST_PHYSICAL_ADDRESS 528 528 }; 529 - static const int max_shadow_read_only_fields = 529 + static int max_shadow_read_only_fields = 530 530 ARRAY_SIZE(shadow_read_only_fields); 531 531 532 - static const unsigned long shadow_read_write_fields[] = { 532 + static unsigned long shadow_read_write_fields[] = { 533 533 GUEST_RIP, 534 534 GUEST_RSP, 535 535 GUEST_CR0, ··· 558 558 HOST_FS_SELECTOR, 559 559 HOST_GS_SELECTOR 560 560 }; 561 - static const int max_shadow_read_write_fields = 561 + static int max_shadow_read_write_fields = 562 562 ARRAY_SIZE(shadow_read_write_fields); 563 563 564 564 static const unsigned short vmcs_field_to_offset_table[] = { ··· 3009 3009 } 3010 3010 } 3011 3011 3012 + static void init_vmcs_shadow_fields(void) 3013 + { 3014 + int i, j; 3015 + 3016 + /* No checks for read only fields yet */ 3017 + 3018 + for (i = j = 0; i < max_shadow_read_write_fields; i++) { 3019 + switch (shadow_read_write_fields[i]) { 3020 + case GUEST_BNDCFGS: 3021 + if (!vmx_mpx_supported()) 3022 + continue; 3023 + break; 3024 + default: 3025 + break; 3026 + } 3027 + 3028 + if (j < i) 3029 + shadow_read_write_fields[j] = 3030 + shadow_read_write_fields[i]; 3031 + j++; 3032 + } 3033 + max_shadow_read_write_fields = j; 3034 + 3035 + /* shadowed fields guest access without vmexit */ 3036 + for (i = 0; i < max_shadow_read_write_fields; i++) { 3037 + clear_bit(shadow_read_write_fields[i], 3038 + vmx_vmwrite_bitmap); 3039 + clear_bit(shadow_read_write_fields[i], 3040 + vmx_vmread_bitmap); 3041 + } 3042 + for (i = 0; i < max_shadow_read_only_fields; i++) 3043 + clear_bit(shadow_read_only_fields[i], 3044 + vmx_vmread_bitmap); 3045 + } 3046 + 3012 3047 static __init int alloc_kvm_area(void) 3013 3048 { 3014 3049 int cpu; ··· 3074 3039 enable_vpid = 0; 3075 3040 if (!cpu_has_vmx_shadow_vmcs()) 3076 3041 enable_shadow_vmcs = 0; 3042 + if (enable_shadow_vmcs) 3043 + init_vmcs_shadow_fields(); 3077 3044 3078 3045 if (!cpu_has_vmx_ept() || 3079 3046 !cpu_has_vmx_ept_4levels()) { ··· 8840 8803 8841 8804 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 8842 8805 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 8843 - /* shadowed read/write fields */ 8844 - for (i = 0; i < max_shadow_read_write_fields; i++) { 8845 - clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); 8846 - clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); 8847 - } 8848 - /* shadowed read only fields */ 8849 - for (i = 0; i < max_shadow_read_only_fields; i++) 8850 - clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); 8851 8806 8852 8807 /* 8853 8808 * Allow direct access to the PC debug port (it is often used for I/O
+1 -1
arch/x86/kvm/x86.c
··· 280 280 } 281 281 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 282 282 283 - asmlinkage void kvm_spurious_fault(void) 283 + asmlinkage __visible void kvm_spurious_fault(void) 284 284 { 285 285 /* Fault while not rebooting. We want the trace. */ 286 286 BUG();
+2 -2
arch/x86/lguest/boot.c
··· 233 233 * flags word contains all kind of stuff, but in practice Linux only cares 234 234 * about the interrupt flag. Our "save_flags()" just returns that. 235 235 */ 236 - asmlinkage unsigned long lguest_save_fl(void) 236 + asmlinkage __visible unsigned long lguest_save_fl(void) 237 237 { 238 238 return lguest_data.irq_enabled; 239 239 } 240 240 241 241 /* Interrupts go off... */ 242 - asmlinkage void lguest_irq_disable(void) 242 + asmlinkage __visible void lguest_irq_disable(void) 243 243 { 244 244 lguest_data.irq_enabled = 0; 245 245 }
+1 -1
arch/x86/lib/msr.c
··· 76 76 if (m1.q == m.q) 77 77 return 0; 78 78 79 - err = msr_write(msr, &m); 79 + err = msr_write(msr, &m1); 80 80 if (err) 81 81 return err; 82 82
+8 -8
arch/x86/math-emu/errors.c
··· 302 302 0x242 in div_Xsig.S 303 303 */ 304 304 305 - asmlinkage void FPU_exception(int n) 305 + asmlinkage __visible void FPU_exception(int n) 306 306 { 307 307 int i, int_type; 308 308 ··· 492 492 493 493 /* Invalid arith operation on Valid registers */ 494 494 /* Returns < 0 if the exception is unmasked */ 495 - asmlinkage int arith_invalid(int deststnr) 495 + asmlinkage __visible int arith_invalid(int deststnr) 496 496 { 497 497 498 498 EXCEPTION(EX_Invalid); ··· 507 507 } 508 508 509 509 /* Divide a finite number by zero */ 510 - asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign) 510 + asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign) 511 511 { 512 512 FPU_REG *dest = &st(deststnr); 513 513 int tag = TAG_Valid; ··· 539 539 } 540 540 541 541 /* This may be called often, so keep it lean */ 542 - asmlinkage void set_precision_flag_up(void) 542 + asmlinkage __visible void set_precision_flag_up(void) 543 543 { 544 544 if (control_word & CW_Precision) 545 545 partial_status |= (SW_Precision | SW_C1); /* The masked response */ ··· 548 548 } 549 549 550 550 /* This may be called often, so keep it lean */ 551 - asmlinkage void set_precision_flag_down(void) 551 + asmlinkage __visible void set_precision_flag_down(void) 552 552 { 553 553 if (control_word & CW_Precision) { /* The masked response */ 554 554 partial_status &= ~SW_C1; ··· 557 557 EXCEPTION(EX_Precision); 558 558 } 559 559 560 - asmlinkage int denormal_operand(void) 560 + asmlinkage __visible int denormal_operand(void) 561 561 { 562 562 if (control_word & CW_Denormal) { /* The masked response */ 563 563 partial_status |= SW_Denorm_Op; ··· 568 568 } 569 569 } 570 570 571 - asmlinkage int arith_overflow(FPU_REG *dest) 571 + asmlinkage __visible int arith_overflow(FPU_REG *dest) 572 572 { 573 573 int tag = TAG_Valid; 574 574 ··· 596 596 597 597 } 598 598 599 - asmlinkage int arith_underflow(FPU_REG *dest) 599 + asmlinkage __visible int arith_underflow(FPU_REG *dest) 600 600 { 601 601 int tag = TAG_Valid; 602 602
+64 -19
arch/x86/platform/efi/early_printk.c
··· 14 14 15 15 static const struct font_desc *font; 16 16 static u32 efi_x, efi_y; 17 + static void *efi_fb; 18 + static bool early_efi_keep; 17 19 18 - static __init void early_efi_clear_scanline(unsigned int y) 20 + /* 21 + * efi earlyprintk need use early_ioremap to map the framebuffer. 22 + * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should 23 + * be used instead. ioremap will be available after paging_init() which is 24 + * earlier than initcall callbacks. Thus adding this early initcall function 25 + * early_efi_map_fb to map the whole efi framebuffer. 26 + */ 27 + static __init int early_efi_map_fb(void) 19 28 { 20 - unsigned long base, *dst; 21 - u16 len; 29 + unsigned long base, size; 30 + 31 + if (!early_efi_keep) 32 + return 0; 22 33 23 34 base = boot_params.screen_info.lfb_base; 24 - len = boot_params.screen_info.lfb_linelength; 35 + size = boot_params.screen_info.lfb_size; 36 + efi_fb = ioremap(base, size); 25 37 26 - dst = early_ioremap(base + y*len, len); 38 + return efi_fb ? 0 : -ENOMEM; 39 + } 40 + early_initcall(early_efi_map_fb); 41 + 42 + /* 43 + * early_efi_map maps efi framebuffer region [start, start + len -1] 44 + * In case earlyprintk=efi,keep we have the whole framebuffer mapped already 45 + * so just return the offset efi_fb + start. 46 + */ 47 + static __init_refok void *early_efi_map(unsigned long start, unsigned long len) 48 + { 49 + unsigned long base; 50 + 51 + base = boot_params.screen_info.lfb_base; 52 + 53 + if (efi_fb) 54 + return (efi_fb + start); 55 + else 56 + return early_ioremap(base + start, len); 57 + } 58 + 59 + static __init_refok void early_efi_unmap(void *addr, unsigned long len) 60 + { 61 + if (!efi_fb) 62 + early_iounmap(addr, len); 63 + } 64 + 65 + static void early_efi_clear_scanline(unsigned int y) 66 + { 67 + unsigned long *dst; 68 + u16 len; 69 + 70 + len = boot_params.screen_info.lfb_linelength; 71 + dst = early_efi_map(y*len, len); 27 72 if (!dst) 28 73 return; 29 74 30 75 memset(dst, 0, len); 31 - early_iounmap(dst, len); 76 + early_efi_unmap(dst, len); 32 77 } 33 78 34 - static __init void early_efi_scroll_up(void) 79 + static void early_efi_scroll_up(void) 35 80 { 36 - unsigned long base, *dst, *src; 81 + unsigned long *dst, *src; 37 82 u16 len; 38 83 u32 i, height; 39 84 40 - base = boot_params.screen_info.lfb_base; 41 85 len = boot_params.screen_info.lfb_linelength; 42 86 height = boot_params.screen_info.lfb_height; 43 87 44 88 for (i = 0; i < height - font->height; i++) { 45 - dst = early_ioremap(base + i*len, len); 89 + dst = early_efi_map(i*len, len); 46 90 if (!dst) 47 91 return; 48 92 49 - src = early_ioremap(base + (i + font->height) * len, len); 93 + src = early_efi_map((i + font->height) * len, len); 50 94 if (!src) { 51 - early_iounmap(dst, len); 95 + early_efi_unmap(dst, len); 52 96 return; 53 97 } 54 98 55 99 memmove(dst, src, len); 56 100 57 - early_iounmap(src, len); 58 - early_iounmap(dst, len); 101 + early_efi_unmap(src, len); 102 + early_efi_unmap(dst, len); 59 103 } 60 104 } 61 105 ··· 123 79 } 124 80 } 125 81 126 - static __init void 82 + static void 127 83 early_efi_write(struct console *con, const char *str, unsigned int num) 128 84 { 129 85 struct screen_info *si; 130 - unsigned long base; 131 86 unsigned int len; 132 87 const char *s; 133 88 void *dst; 134 89 135 - base = boot_params.screen_info.lfb_base; 136 90 si = &boot_params.screen_info; 137 91 len = si->lfb_linelength; 138 92 ··· 151 109 for (h = 0; h < font->height; h++) { 152 110 unsigned int n, x; 153 111 154 - dst = early_ioremap(base + (efi_y + h) * len, len); 112 + dst = early_efi_map((efi_y + h) * len, len); 155 113 if (!dst) 156 114 return; 157 115 ··· 165 123 s++; 166 124 } 167 125 168 - early_iounmap(dst, len); 126 + early_efi_unmap(dst, len); 169 127 } 170 128 171 129 num -= count; ··· 221 179 for (i = 0; i < (yres - efi_y) / font->height; i++) 222 180 early_efi_scroll_up(); 223 181 182 + /* early_console_register will unset CON_BOOT in case ,keep */ 183 + if (!(con->flags & CON_BOOT)) 184 + early_efi_keep = true; 224 185 return 0; 225 186 } 226 187
+1 -1
arch/x86/platform/olpc/olpc-xo1-pm.c
··· 75 75 return 0; 76 76 } 77 77 78 - asmlinkage int xo1_do_sleep(u8 sleep_state) 78 + asmlinkage __visible int xo1_do_sleep(u8 sleep_state) 79 79 { 80 80 void *pgd_addr = __va(read_cr3()); 81 81
+1 -1
arch/x86/power/hibernate_64.c
··· 23 23 extern __visible const void __nosave_begin, __nosave_end; 24 24 25 25 /* Defined in hibernate_asm_64.S */ 26 - extern asmlinkage int restore_image(void); 26 + extern asmlinkage __visible int restore_image(void); 27 27 28 28 /* 29 29 * Address to jump to in the last phase of restore in order to get to the image
+1 -1
arch/x86/xen/enlighten.c
··· 1515 1515 } 1516 1516 1517 1517 /* First C function to be called on Xen boot */ 1518 - asmlinkage void __init xen_start_kernel(void) 1518 + asmlinkage __visible void __init xen_start_kernel(void) 1519 1519 { 1520 1520 struct physdev_set_iopl set_iopl; 1521 1521 int rc;
+3 -3
arch/x86/xen/irq.c
··· 23 23 (void)HYPERVISOR_xen_version(0, NULL); 24 24 } 25 25 26 - asmlinkage unsigned long xen_save_fl(void) 26 + asmlinkage __visible unsigned long xen_save_fl(void) 27 27 { 28 28 struct vcpu_info *vcpu; 29 29 unsigned long flags; ··· 63 63 } 64 64 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); 65 65 66 - asmlinkage void xen_irq_disable(void) 66 + asmlinkage __visible void xen_irq_disable(void) 67 67 { 68 68 /* There's a one instruction preempt window here. We need to 69 69 make sure we're don't switch CPUs between getting the vcpu ··· 74 74 } 75 75 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); 76 76 77 - asmlinkage void xen_irq_enable(void) 77 + asmlinkage __visible void xen_irq_enable(void) 78 78 { 79 79 struct vcpu_info *vcpu; 80 80
+19 -1
arch/xtensa/Kconfig
··· 14 14 select GENERIC_PCI_IOMAP 15 15 select ARCH_WANT_IPC_PARSE_VERSION 16 16 select ARCH_WANT_OPTIONAL_GPIOLIB 17 + select BUILDTIME_EXTABLE_SORT 17 18 select CLONE_BACKWARDS 18 19 select IRQ_DOMAIN 19 20 select HAVE_OPROFILE ··· 190 189 191 190 If in doubt, say Y. 192 191 192 + config HIGHMEM 193 + bool "High Memory Support" 194 + help 195 + Linux can use the full amount of RAM in the system by 196 + default. However, the default MMUv2 setup only maps the 197 + lowermost 128 MB of memory linearly to the areas starting 198 + at 0xd0000000 (cached) and 0xd8000000 (uncached). 199 + When there are more than 128 MB memory in the system not 200 + all of it can be "permanently mapped" by the kernel. 201 + The physical memory that's not permanently mapped is called 202 + "high memory". 203 + 204 + If you are compiling a kernel which will never run on a 205 + machine with more than 128 MB total physical RAM, answer 206 + N here. 207 + 208 + If unsure, say Y. 209 + 193 210 endmenu 194 211 195 212 config XTENSA_CALIBRATE_CCOUNT ··· 243 224 244 225 config XTENSA_PLATFORM_ISS 245 226 bool "ISS" 246 - depends on TTY 247 227 select XTENSA_CALIBRATE_CCOUNT 248 228 select SERIAL_CONSOLE 249 229 help
+11
arch/xtensa/boot/dts/kc705.dts
··· 1 + /dts-v1/; 2 + /include/ "xtfpga.dtsi" 3 + /include/ "xtfpga-flash-128m.dtsi" 4 + 5 + / { 6 + compatible = "cdns,xtensa-kc705"; 7 + memory@0 { 8 + device_type = "memory"; 9 + reg = <0x00000000 0x08000000>; 10 + }; 11 + };
+28
arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
··· 1 + / { 2 + soc { 3 + flash: flash@00000000 { 4 + #address-cells = <1>; 5 + #size-cells = <1>; 6 + compatible = "cfi-flash"; 7 + reg = <0x00000000 0x08000000>; 8 + bank-width = <2>; 9 + device-width = <2>; 10 + partition@0x0 { 11 + label = "data"; 12 + reg = <0x00000000 0x06000000>; 13 + }; 14 + partition@0x6000000 { 15 + label = "boot loader area"; 16 + reg = <0x06000000 0x00800000>; 17 + }; 18 + partition@0x6800000 { 19 + label = "kernel image"; 20 + reg = <0x06800000 0x017e0000>; 21 + }; 22 + partition@0x7fe0000 { 23 + label = "boot environment"; 24 + reg = <0x07fe0000 0x00020000>; 25 + }; 26 + }; 27 + }; 28 + };
+25 -23
arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
··· 1 1 / { 2 - flash: flash@f8000000 { 3 - #address-cells = <1>; 4 - #size-cells = <1>; 5 - compatible = "cfi-flash"; 6 - reg = <0xf8000000 0x01000000>; 7 - bank-width = <2>; 8 - device-width = <2>; 9 - partition@0x0 { 10 - label = "boot loader area"; 11 - reg = <0x00000000 0x00400000>; 2 + soc { 3 + flash: flash@08000000 { 4 + #address-cells = <1>; 5 + #size-cells = <1>; 6 + compatible = "cfi-flash"; 7 + reg = <0x08000000 0x01000000>; 8 + bank-width = <2>; 9 + device-width = <2>; 10 + partition@0x0 { 11 + label = "boot loader area"; 12 + reg = <0x00000000 0x00400000>; 13 + }; 14 + partition@0x400000 { 15 + label = "kernel image"; 16 + reg = <0x00400000 0x00600000>; 17 + }; 18 + partition@0xa00000 { 19 + label = "data"; 20 + reg = <0x00a00000 0x005e0000>; 21 + }; 22 + partition@0xfe0000 { 23 + label = "boot environment"; 24 + reg = <0x00fe0000 0x00020000>; 25 + }; 12 26 }; 13 - partition@0x400000 { 14 - label = "kernel image"; 15 - reg = <0x00400000 0x00600000>; 16 - }; 17 - partition@0xa00000 { 18 - label = "data"; 19 - reg = <0x00a00000 0x005e0000>; 20 - }; 21 - partition@0xfe0000 { 22 - label = "boot environment"; 23 - reg = <0x00fe0000 0x00020000>; 24 - }; 25 - }; 27 + }; 26 28 };
+17 -15
arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
··· 1 1 / { 2 - flash: flash@f8000000 { 3 - #address-cells = <1>; 4 - #size-cells = <1>; 5 - compatible = "cfi-flash"; 6 - reg = <0xf8000000 0x00400000>; 7 - bank-width = <2>; 8 - device-width = <2>; 9 - partition@0x0 { 10 - label = "boot loader area"; 11 - reg = <0x00000000 0x003f0000>; 2 + soc { 3 + flash: flash@08000000 { 4 + #address-cells = <1>; 5 + #size-cells = <1>; 6 + compatible = "cfi-flash"; 7 + reg = <0x08000000 0x00400000>; 8 + bank-width = <2>; 9 + device-width = <2>; 10 + partition@0x0 { 11 + label = "boot loader area"; 12 + reg = <0x00000000 0x003f0000>; 13 + }; 14 + partition@0x3f0000 { 15 + label = "boot environment"; 16 + reg = <0x003f0000 0x00010000>; 17 + }; 12 18 }; 13 - partition@0x3f0000 { 14 - label = "boot environment"; 15 - reg = <0x003f0000 0x00010000>; 16 - }; 17 - }; 19 + }; 18 20 };
+22 -15
arch/xtensa/boot/dts/xtfpga.dtsi
··· 42 42 }; 43 43 }; 44 44 45 - serial0: serial@fd050020 { 46 - device_type = "serial"; 47 - compatible = "ns16550a"; 48 - no-loopback-test; 49 - reg = <0xfd050020 0x20>; 50 - reg-shift = <2>; 51 - interrupts = <0 1>; /* external irq 0 */ 52 - clocks = <&osc>; 53 - }; 45 + soc { 46 + #address-cells = <1>; 47 + #size-cells = <1>; 48 + compatible = "simple-bus"; 49 + ranges = <0x00000000 0xf0000000 0x10000000>; 54 50 55 - enet0: ethoc@fd030000 { 56 - compatible = "opencores,ethoc"; 57 - reg = <0xfd030000 0x4000 0xfd800000 0x4000>; 58 - interrupts = <1 1>; /* external irq 1 */ 59 - local-mac-address = [00 50 c2 13 6f 00]; 60 - clocks = <&osc>; 51 + serial0: serial@0d050020 { 52 + device_type = "serial"; 53 + compatible = "ns16550a"; 54 + no-loopback-test; 55 + reg = <0x0d050020 0x20>; 56 + reg-shift = <2>; 57 + interrupts = <0 1>; /* external irq 0 */ 58 + clocks = <&osc>; 59 + }; 60 + 61 + enet0: ethoc@0d030000 { 62 + compatible = "opencores,ethoc"; 63 + reg = <0x0d030000 0x4000 0x0d800000 0x4000>; 64 + interrupts = <1 1>; /* external irq 1 */ 65 + local-mac-address = [00 50 c2 13 6f 00]; 66 + clocks = <&osc>; 67 + }; 61 68 }; 62 69 };
+2 -11
arch/xtensa/include/asm/bootparam.h
··· 37 37 unsigned long data[0]; /* data */ 38 38 } bp_tag_t; 39 39 40 - typedef struct meminfo { 40 + struct bp_meminfo { 41 41 unsigned long type; 42 42 unsigned long start; 43 43 unsigned long end; 44 - } meminfo_t; 45 - 46 - #define SYSMEM_BANKS_MAX 5 44 + }; 47 45 48 46 #define MEMORY_TYPE_CONVENTIONAL 0x1000 49 47 #define MEMORY_TYPE_NONE 0x2000 50 - 51 - typedef struct sysmem_info { 52 - int nr_banks; 53 - meminfo_t bank[SYSMEM_BANKS_MAX]; 54 - } sysmem_info_t; 55 - 56 - extern sysmem_info_t sysmem; 57 48 58 49 #endif 59 50 #endif
+58
arch/xtensa/include/asm/fixmap.h
··· 1 + /* 2 + * fixmap.h: compile-time virtual memory allocation 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 1998 Ingo Molnar 9 + * 10 + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 + */ 12 + 13 + #ifndef _ASM_FIXMAP_H 14 + #define _ASM_FIXMAP_H 15 + 16 + #include <asm/pgtable.h> 17 + #ifdef CONFIG_HIGHMEM 18 + #include <linux/threads.h> 19 + #include <asm/kmap_types.h> 20 + #endif 21 + 22 + /* 23 + * Here we define all the compile-time 'special' virtual 24 + * addresses. The point is to have a constant address at 25 + * compile time, but to set the physical address only 26 + * in the boot process. We allocate these special addresses 27 + * from the end of the consistent memory region backwards. 28 + * Also this lets us do fail-safe vmalloc(), we 29 + * can guarantee that these special addresses and 30 + * vmalloc()-ed addresses never overlap. 31 + * 32 + * these 'compile-time allocated' memory buffers are 33 + * fixed-size 4k pages. (or larger if used with an increment 34 + * higher than 1) use fixmap_set(idx,phys) to associate 35 + * physical memory with fixmap indices. 36 + */ 37 + enum fixed_addresses { 38 + #ifdef CONFIG_HIGHMEM 39 + /* reserved pte's for temporary kernel mappings */ 40 + FIX_KMAP_BEGIN, 41 + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 42 + #endif 43 + __end_of_fixed_addresses 44 + }; 45 + 46 + #define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) 47 + #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 48 + #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) 49 + 50 + #include <asm-generic/fixmap.h> 51 + 52 + #define kmap_get_fixmap_pte(vaddr) \ 53 + pte_offset_kernel( \ 54 + pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ 55 + (vaddr) \ 56 + ) 57 + 58 + #endif
+44 -1
arch/xtensa/include/asm/highmem.h
··· 6 6 * this archive for more details. 7 7 * 8 8 * Copyright (C) 2003 - 2005 Tensilica Inc. 9 + * Copyright (C) 2014 Cadence Design Systems Inc. 9 10 */ 10 11 11 12 #ifndef _XTENSA_HIGHMEM_H 12 13 #define _XTENSA_HIGHMEM_H 13 14 14 - extern void flush_cache_kmaps(void); 15 + #include <asm/cacheflush.h> 16 + #include <asm/fixmap.h> 17 + #include <asm/kmap_types.h> 18 + #include <asm/pgtable.h> 19 + 20 + #define PKMAP_BASE (FIXADDR_START - PMD_SIZE) 21 + #define LAST_PKMAP PTRS_PER_PTE 22 + #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 23 + #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 24 + #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 25 + 26 + #define kmap_prot PAGE_KERNEL 27 + 28 + extern pte_t *pkmap_page_table; 29 + 30 + void *kmap_high(struct page *page); 31 + void kunmap_high(struct page *page); 32 + 33 + static inline void *kmap(struct page *page) 34 + { 35 + BUG_ON(in_interrupt()); 36 + if (!PageHighMem(page)) 37 + return page_address(page); 38 + return kmap_high(page); 39 + } 40 + 41 + static inline void kunmap(struct page *page) 42 + { 43 + BUG_ON(in_interrupt()); 44 + if (!PageHighMem(page)) 45 + return; 46 + kunmap_high(page); 47 + } 48 + 49 + static inline void flush_cache_kmaps(void) 50 + { 51 + flush_cache_all(); 52 + } 53 + 54 + void *kmap_atomic(struct page *page); 55 + void __kunmap_atomic(void *kvaddr); 56 + 57 + void kmap_init(void); 15 58 16 59 #endif
+4
arch/xtensa/include/asm/pgtable.h
··· 310 310 update_pte(ptep, pteval); 311 311 } 312 312 313 + static inline void set_pte(pte_t *ptep, pte_t pteval) 314 + { 315 + update_pte(ptep, pteval); 316 + } 313 317 314 318 static inline void 315 319 set_pmd(pmd_t *pmdp, pmd_t pmdval)
+38
arch/xtensa/include/asm/sysmem.h
··· 1 + /* 2 + * sysmem-related prototypes. 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 2014 Cadence Design Systems Inc. 9 + */ 10 + 11 + #ifndef _XTENSA_SYSMEM_H 12 + #define _XTENSA_SYSMEM_H 13 + 14 + #define SYSMEM_BANKS_MAX 31 15 + 16 + struct meminfo { 17 + unsigned long start; 18 + unsigned long end; 19 + }; 20 + 21 + /* 22 + * Bank array is sorted by .start. 23 + * Banks don't overlap and there's at least one page gap 24 + * between adjacent bank entries. 25 + */ 26 + struct sysmem_info { 27 + int nr_banks; 28 + struct meminfo bank[SYSMEM_BANKS_MAX]; 29 + }; 30 + 31 + extern struct sysmem_info sysmem; 32 + 33 + int add_sysmem_bank(unsigned long start, unsigned long end); 34 + int mem_reserve(unsigned long, unsigned long, int); 35 + void bootmem_init(void); 36 + void zones_init(void); 37 + 38 + #endif /* _XTENSA_SYSMEM_H */
+4 -7
arch/xtensa/include/asm/tlbflush.h
··· 36 36 unsigned long page); 37 37 void local_flush_tlb_range(struct vm_area_struct *vma, 38 38 unsigned long start, unsigned long end); 39 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); 39 40 40 41 #ifdef CONFIG_SMP 41 42 ··· 45 44 void flush_tlb_page(struct vm_area_struct *, unsigned long); 46 45 void flush_tlb_range(struct vm_area_struct *, unsigned long, 47 46 unsigned long); 48 - 49 - static inline void flush_tlb_kernel_range(unsigned long start, 50 - unsigned long end) 51 - { 52 - flush_tlb_all(); 53 - } 47 + void flush_tlb_kernel_range(unsigned long start, unsigned long end); 54 48 55 49 #else /* !CONFIG_SMP */ 56 50 ··· 54 58 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) 55 59 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \ 56 60 end) 57 - #define flush_tlb_kernel_range(start, end) local_flush_tlb_all() 61 + #define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \ 62 + end) 58 63 59 64 #endif /* CONFIG_SMP */ 60 65
+11 -35
arch/xtensa/kernel/setup.c
··· 50 50 #include <asm/param.h> 51 51 #include <asm/traps.h> 52 52 #include <asm/smp.h> 53 + #include <asm/sysmem.h> 53 54 54 55 #include <platform/hardware.h> 55 56 ··· 89 88 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 90 89 #endif 91 90 92 - sysmem_info_t __initdata sysmem; 93 - 94 - extern int mem_reserve(unsigned long, unsigned long, int); 95 - extern void bootmem_init(void); 96 - extern void zones_init(void); 97 - 98 91 /* 99 92 * Boot parameter parsing. 100 93 * ··· 108 113 109 114 /* parse current tag */ 110 115 111 - static int __init add_sysmem_bank(unsigned long type, unsigned long start, 112 - unsigned long end) 113 - { 114 - if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) { 115 - printk(KERN_WARNING 116 - "Ignoring memory bank 0x%08lx size %ldKB\n", 117 - start, end - start); 118 - return -EINVAL; 119 - } 120 - sysmem.bank[sysmem.nr_banks].type = type; 121 - sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start); 122 - sysmem.bank[sysmem.nr_banks].end = end & PAGE_MASK; 123 - sysmem.nr_banks++; 124 - 125 - return 0; 126 - } 127 - 128 116 static int __init parse_tag_mem(const bp_tag_t *tag) 129 117 { 130 - meminfo_t *mi = (meminfo_t *)(tag->data); 118 + struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); 131 119 132 120 if (mi->type != MEMORY_TYPE_CONVENTIONAL) 133 121 return -1; 134 122 135 - return add_sysmem_bank(mi->type, mi->start, mi->end); 123 + return add_sysmem_bank(mi->start, mi->end); 136 124 } 137 125 138 126 __tagtable(BP_TAG_MEMORY, parse_tag_mem); ··· 124 146 125 147 static int __init parse_tag_initrd(const bp_tag_t* tag) 126 148 { 127 - meminfo_t* mi; 128 - mi = (meminfo_t*)(tag->data); 149 + struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); 150 + 129 151 initrd_start = (unsigned long)__va(mi->start); 130 152 initrd_end = (unsigned long)__va(mi->end); 131 153 ··· 233 255 return; 234 256 235 257 size &= PAGE_MASK; 236 - add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size); 258 + add_sysmem_bank(base, base + size); 237 259 } 238 260 239 261 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) ··· 270 292 271 293 void __init init_arch(bp_tag_t *bp_start) 272 294 { 273 - sysmem.nr_banks = 0; 274 - 275 295 /* Parse boot parameters */ 276 296 277 297 if (bp_start) ··· 280 304 #endif 281 305 282 306 if (sysmem.nr_banks == 0) { 283 - sysmem.nr_banks = 1; 284 - sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START; 285 - sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START 286 - + PLATFORM_DEFAULT_MEM_SIZE; 307 + add_sysmem_bank(PLATFORM_DEFAULT_MEM_START, 308 + PLATFORM_DEFAULT_MEM_START + 309 + PLATFORM_DEFAULT_MEM_SIZE); 287 310 } 288 311 289 312 #ifdef CONFIG_CMDLINE_BOOL ··· 462 487 #ifdef CONFIG_BLK_DEV_INITRD 463 488 if (initrd_start < initrd_end) { 464 489 initrd_is_mapped = mem_reserve(__pa(initrd_start), 465 - __pa(initrd_end), 0); 490 + __pa(initrd_end), 0) == 0; 466 491 initrd_below_start_ok = 1; 467 492 } else { 468 493 initrd_start = 0; ··· 507 532 __pa(&_Level6InterruptVector_text_end), 0); 508 533 #endif 509 534 535 + parse_early_param(); 510 536 bootmem_init(); 511 537 512 538 unflatten_and_copy_device_tree();
+15
arch/xtensa/kernel/smp.c
··· 496 496 on_each_cpu(ipi_flush_tlb_range, &fd, 1); 497 497 } 498 498 499 + static void ipi_flush_tlb_kernel_range(void *arg) 500 + { 501 + struct flush_data *fd = arg; 502 + local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 503 + } 504 + 505 + void flush_tlb_kernel_range(unsigned long start, unsigned long end) 506 + { 507 + struct flush_data fd = { 508 + .addr1 = start, 509 + .addr2 = end, 510 + }; 511 + on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1); 512 + } 513 + 499 514 /* Cache flush functions */ 500 515 501 516 static void ipi_flush_cache_all(void *arg)
+7
arch/xtensa/kernel/xtensa_ksyms.c
··· 20 20 #include <linux/in6.h> 21 21 22 22 #include <asm/uaccess.h> 23 + #include <asm/cacheflush.h> 23 24 #include <asm/checksum.h> 24 25 #include <asm/dma.h> 25 26 #include <asm/io.h> ··· 106 105 * Architecture-specific symbols 107 106 */ 108 107 EXPORT_SYMBOL(__xtensa_copy_user); 108 + EXPORT_SYMBOL(__invalidate_icache_range); 109 109 110 110 /* 111 111 * Kernel hacking ... ··· 128 126 129 127 #ifdef CONFIG_FUNCTION_TRACER 130 128 EXPORT_SYMBOL(_mcount); 129 + #endif 130 + 131 + EXPORT_SYMBOL(__invalidate_dcache_range); 132 + #if XCHAL_DCACHE_IS_WRITEBACK 133 + EXPORT_SYMBOL(__flush_dcache_range); 131 134 #endif
+1
arch/xtensa/mm/Makefile
··· 4 4 5 5 obj-y := init.o cache.o misc.o 6 6 obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o 7 + obj-$(CONFIG_HIGHMEM) += highmem.o
+6 -1
arch/xtensa/mm/cache.c
··· 59 59 * 60 60 */ 61 61 62 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) 63 + #error "HIGHMEM is not supported on cores with aliasing cache." 64 + #endif 65 + 62 66 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 63 67 64 68 /* ··· 183 179 #else 184 180 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) 185 181 && (vma->vm_flags & VM_EXEC) != 0) { 186 - unsigned long paddr = (unsigned long) page_address(page); 182 + unsigned long paddr = (unsigned long)kmap_atomic(page); 187 183 __flush_dcache_page(paddr); 188 184 __invalidate_icache_page(paddr); 189 185 set_bit(PG_arch_1, &page->flags); 186 + kunmap_atomic((void *)paddr); 190 187 } 191 188 #endif 192 189 }
+72
arch/xtensa/mm/highmem.c
··· 1 + /* 2 + * High memory support for Xtensa architecture 3 + * 4 + * This file is subject to the terms and conditions of the GNU General 5 + * Public License. See the file "COPYING" in the main directory of 6 + * this archive for more details. 7 + * 8 + * Copyright (C) 2014 Cadence Design Systems Inc. 9 + */ 10 + 11 + #include <linux/export.h> 12 + #include <linux/highmem.h> 13 + #include <asm/tlbflush.h> 14 + 15 + static pte_t *kmap_pte; 16 + 17 + void *kmap_atomic(struct page *page) 18 + { 19 + enum fixed_addresses idx; 20 + unsigned long vaddr; 21 + int type; 22 + 23 + pagefault_disable(); 24 + if (!PageHighMem(page)) 25 + return page_address(page); 26 + 27 + type = kmap_atomic_idx_push(); 28 + idx = type + KM_TYPE_NR * smp_processor_id(); 29 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 30 + #ifdef CONFIG_DEBUG_HIGHMEM 31 + BUG_ON(!pte_none(*(kmap_pte - idx))); 32 + #endif 33 + set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); 34 + 35 + return (void *)vaddr; 36 + } 37 + EXPORT_SYMBOL(kmap_atomic); 38 + 39 + void __kunmap_atomic(void *kvaddr) 40 + { 41 + int idx, type; 42 + 43 + if (kvaddr >= (void *)FIXADDR_START && 44 + kvaddr < (void *)FIXADDR_TOP) { 45 + type = kmap_atomic_idx(); 46 + idx = type + KM_TYPE_NR * smp_processor_id(); 47 + 48 + /* 49 + * Force other mappings to Oops if they'll try to access this 50 + * pte without first remap it. Keeping stale mappings around 51 + * is a bad idea also, in case the page changes cacheability 52 + * attributes or becomes a protected page in a hypervisor. 53 + */ 54 + pte_clear(&init_mm, kvaddr, kmap_pte - idx); 55 + local_flush_tlb_kernel_range((unsigned long)kvaddr, 56 + (unsigned long)kvaddr + PAGE_SIZE); 57 + 58 + kmap_atomic_idx_pop(); 59 + } 60 + 61 + pagefault_enable(); 62 + } 63 + EXPORT_SYMBOL(__kunmap_atomic); 64 + 65 + void __init kmap_init(void) 66 + { 67 + unsigned long kmap_vstart; 68 + 69 + /* cache the first kmap pte */ 70 + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 71 + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 72 + }
+255 -48
arch/xtensa/mm/init.c
··· 8 8 * for more details. 9 9 * 10 10 * Copyright (C) 2001 - 2005 Tensilica Inc. 11 + * Copyright (C) 2014 Cadence Design Systems Inc. 11 12 * 12 13 * Chris Zankel <chris@zankel.net> 13 14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> ··· 20 19 #include <linux/errno.h> 21 20 #include <linux/bootmem.h> 22 21 #include <linux/gfp.h> 22 + #include <linux/highmem.h> 23 23 #include <linux/swap.h> 24 24 #include <linux/mman.h> 25 25 #include <linux/nodemask.h> ··· 29 27 #include <asm/bootparam.h> 30 28 #include <asm/page.h> 31 29 #include <asm/sections.h> 30 + #include <asm/sysmem.h> 31 + 32 + struct sysmem_info sysmem __initdata; 33 + 34 + static void __init sysmem_dump(void) 35 + { 36 + unsigned i; 37 + 38 + pr_debug("Sysmem:\n"); 39 + for (i = 0; i < sysmem.nr_banks; ++i) 40 + pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n", 41 + sysmem.bank[i].start, sysmem.bank[i].end, 42 + (sysmem.bank[i].end - sysmem.bank[i].start) >> 10); 43 + } 44 + 45 + /* 46 + * Find bank with maximal .start such that bank.start <= start 47 + */ 48 + static inline struct meminfo * __init find_bank(unsigned long start) 49 + { 50 + unsigned i; 51 + struct meminfo *it = NULL; 52 + 53 + for (i = 0; i < sysmem.nr_banks; ++i) 54 + if (sysmem.bank[i].start <= start) 55 + it = sysmem.bank + i; 56 + else 57 + break; 58 + return it; 59 + } 60 + 61 + /* 62 + * Move all memory banks starting at 'from' to a new place at 'to', 63 + * adjust nr_banks accordingly. 64 + * Both 'from' and 'to' must be inside the sysmem.bank. 65 + * 66 + * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank). 67 + */ 68 + static int __init move_banks(struct meminfo *to, struct meminfo *from) 69 + { 70 + unsigned n = sysmem.nr_banks - (from - sysmem.bank); 71 + 72 + if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX) 73 + return -ENOMEM; 74 + if (to != from) 75 + memmove(to, from, n * sizeof(struct meminfo)); 76 + sysmem.nr_banks += to - from; 77 + return 0; 78 + } 79 + 80 + /* 81 + * Add new bank to sysmem. Resulting sysmem is the union of bytes of the 82 + * original sysmem and the new bank. 83 + * 84 + * Returns: 0 (success), < 0 (error) 85 + */ 86 + int __init add_sysmem_bank(unsigned long start, unsigned long end) 87 + { 88 + unsigned i; 89 + struct meminfo *it = NULL; 90 + unsigned long sz; 91 + unsigned long bank_sz = 0; 92 + 93 + if (start == end || 94 + (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) { 95 + pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n", 96 + start, end - start); 97 + return -EINVAL; 98 + } 99 + 100 + start = PAGE_ALIGN(start); 101 + end &= PAGE_MASK; 102 + sz = end - start; 103 + 104 + it = find_bank(start); 105 + 106 + if (it) 107 + bank_sz = it->end - it->start; 108 + 109 + if (it && bank_sz >= start - it->start) { 110 + if (end - it->start > bank_sz) 111 + it->end = end; 112 + else 113 + return 0; 114 + } else { 115 + if (!it) 116 + it = sysmem.bank; 117 + else 118 + ++it; 119 + 120 + if (it - sysmem.bank < sysmem.nr_banks && 121 + it->start - start <= sz) { 122 + it->start = start; 123 + if (it->end - it->start < sz) 124 + it->end = end; 125 + else 126 + return 0; 127 + } else { 128 + if (move_banks(it + 1, it) < 0) { 129 + pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n", 130 + start, end - start); 131 + return -EINVAL; 132 + } 133 + it->start = start; 134 + it->end = end; 135 + return 0; 136 + } 137 + } 138 + sz = it->end - it->start; 139 + for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i) 140 + if (sysmem.bank[i].start - it->start <= sz) { 141 + if (sz < sysmem.bank[i].end - it->start) 142 + it->end = sysmem.bank[i].end; 143 + } else { 144 + break; 145 + } 146 + 147 + move_banks(it + 1, sysmem.bank + i); 148 + return 0; 149 + } 32 150 33 151 /* 34 152 * mem_reserve(start, end, must_exist) 35 153 * 36 154 * Reserve some memory from the memory pool. 155 + * If must_exist is set and a part of the region being reserved does not exist 156 + * memory map is not altered. 37 157 * 38 158 * Parameters: 39 159 * start Start of region, ··· 163 39 * must_exist Must exist in memory pool. 164 40 * 165 41 * Returns: 166 - * 0 (memory area couldn't be mapped) 167 - * -1 (success) 42 + * 0 (success) 43 + * < 0 (error) 168 44 */ 169 45 170 46 int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) 171 47 { 172 - int i; 173 - 174 - if (start == end) 175 - return 0; 48 + struct meminfo *it; 49 + struct meminfo *rm = NULL; 50 + unsigned long sz; 51 + unsigned long bank_sz = 0; 176 52 177 53 start = start & PAGE_MASK; 178 54 end = PAGE_ALIGN(end); 55 + sz = end - start; 56 + if (!sz) 57 + return -EINVAL; 179 58 180 - for (i = 0; i < sysmem.nr_banks; i++) 181 - if (start < sysmem.bank[i].end 182 - && end >= sysmem.bank[i].start) 183 - break; 59 + it = find_bank(start); 184 60 185 - if (i == sysmem.nr_banks) { 186 - if (must_exist) 187 - printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " 188 - "not in any region!\n", start, end); 189 - return 0; 61 + if (it) 62 + bank_sz = it->end - it->start; 63 + 64 + if ((!it || end - it->start > bank_sz) && must_exist) { 65 + pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n", 66 + start, end); 67 + return -EINVAL; 190 68 } 191 69 192 - if (start > sysmem.bank[i].start) { 193 - if (end < sysmem.bank[i].end) { 194 - /* split entry */ 195 - if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) 196 - panic("meminfo overflow\n"); 197 - sysmem.bank[sysmem.nr_banks].start = end; 198 - sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; 199 - sysmem.nr_banks++; 70 + if (it && start - it->start < bank_sz) { 71 + if (start == it->start) { 72 + if (end - it->start < bank_sz) { 73 + it->start = end; 74 + return 0; 75 + } else { 76 + rm = it; 77 + } 78 + } else { 79 + it->end = start; 80 + if (end - it->start < bank_sz) 81 + return add_sysmem_bank(end, 82 + it->start + bank_sz); 83 + ++it; 200 84 } 201 - sysmem.bank[i].end = start; 202 - 203 - } else if (end < sysmem.bank[i].end) { 204 - sysmem.bank[i].start = end; 205 - 206 - } else { 207 - /* remove entry */ 208 - sysmem.nr_banks--; 209 - sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; 210 - sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; 211 85 } 212 - return -1; 86 + 87 + if (!it) 88 + it = sysmem.bank; 89 + 90 + for (; it < sysmem.bank + sysmem.nr_banks; ++it) { 91 + if (it->end - start <= sz) { 92 + if (!rm) 93 + rm = it; 94 + } else { 95 + if (it->start - start < sz) 96 + it->start = end; 97 + break; 98 + } 99 + } 100 + 101 + if (rm) 102 + move_banks(rm, it); 103 + 104 + return 0; 213 105 } 214 106 215 107 ··· 239 99 unsigned long bootmap_start, bootmap_size; 240 100 int i; 241 101 102 + sysmem_dump(); 242 103 max_low_pfn = max_pfn = 0; 243 104 min_low_pfn = ~0; 244 105 ··· 297 156 298 157 void __init zones_init(void) 299 158 { 300 - unsigned long zones_size[MAX_NR_ZONES]; 301 - int i; 302 - 303 159 /* All pages are DMA-able, so we put them all in the DMA zone. */ 304 - 305 - zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET; 306 - for (i = 1; i < MAX_NR_ZONES; i++) 307 - zones_size[i] = 0; 308 - 160 + unsigned long zones_size[MAX_NR_ZONES] = { 161 + [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET, 309 162 #ifdef CONFIG_HIGHMEM 310 - zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; 163 + [ZONE_HIGHMEM] = max_pfn - max_low_pfn, 311 164 #endif 312 - 165 + }; 313 166 free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); 314 167 } 315 168 ··· 313 178 314 179 void __init mem_init(void) 315 180 { 316 - max_mapnr = max_low_pfn - ARCH_PFN_OFFSET; 317 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 318 - 319 181 #ifdef CONFIG_HIGHMEM 320 - #error HIGHGMEM not implemented in init.c 182 + unsigned long tmp; 183 + 184 + reset_all_zones_managed_pages(); 185 + for (tmp = max_low_pfn; tmp < max_pfn; tmp++) 186 + free_highmem_page(pfn_to_page(tmp)); 321 187 #endif 188 + 189 + max_mapnr = max_pfn - ARCH_PFN_OFFSET; 190 + high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); 322 191 323 192 free_all_bootmem(); 324 193 325 194 mem_init_print_info(NULL); 195 + pr_info("virtual kernel memory layout:\n" 196 + #ifdef CONFIG_HIGHMEM 197 + " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 198 + " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 199 + #endif 200 + " vmalloc : 0x%08x - 0x%08x (%5u MB)\n" 201 + " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n", 202 + #ifdef CONFIG_HIGHMEM 203 + PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, 204 + (LAST_PKMAP*PAGE_SIZE) >> 10, 205 + FIXADDR_START, FIXADDR_TOP, 206 + (FIXADDR_TOP - FIXADDR_START) >> 10, 207 + #endif 208 + VMALLOC_START, VMALLOC_END, 209 + (VMALLOC_END - VMALLOC_START) >> 20, 210 + PAGE_OFFSET, PAGE_OFFSET + 211 + (max_low_pfn - min_low_pfn) * PAGE_SIZE, 212 + ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); 326 213 } 327 214 328 215 #ifdef CONFIG_BLK_DEV_INITRD ··· 361 204 { 362 205 free_initmem_default(-1); 363 206 } 207 + 208 + static void __init parse_memmap_one(char *p) 209 + { 210 + char *oldp; 211 + unsigned long start_at, mem_size; 212 + 213 + if (!p) 214 + return; 215 + 216 + oldp = p; 217 + mem_size = memparse(p, &p); 218 + if (p == oldp) 219 + return; 220 + 221 + switch (*p) { 222 + case '@': 223 + start_at = memparse(p + 1, &p); 224 + add_sysmem_bank(start_at, start_at + mem_size); 225 + break; 226 + 227 + case '$': 228 + start_at = memparse(p + 1, &p); 229 + mem_reserve(start_at, start_at + mem_size, 0); 230 + break; 231 + 232 + case 0: 233 + mem_reserve(mem_size, 0, 0); 234 + break; 235 + 236 + default: 237 + pr_warn("Unrecognized memmap syntax: %s\n", p); 238 + break; 239 + } 240 + } 241 + 242 + static int __init parse_memmap_opt(char *str) 243 + { 244 + while (str) { 245 + char *k = strchr(str, ','); 246 + 247 + if (k) 248 + *k++ = 0; 249 + 250 + parse_memmap_one(str); 251 + str = k; 252 + } 253 + 254 + return 0; 255 + } 256 + early_param("memmap", parse_memmap_opt);
+36
arch/xtensa/mm/mmu.c
··· 3 3 * 4 4 * Extracted from init.c 5 5 */ 6 + #include <linux/bootmem.h> 6 7 #include <linux/percpu.h> 7 8 #include <linux/init.h> 8 9 #include <linux/string.h> ··· 17 16 #include <asm/initialize_mmu.h> 18 17 #include <asm/io.h> 19 18 19 + #if defined(CONFIG_HIGHMEM) 20 + static void * __init init_pmd(unsigned long vaddr) 21 + { 22 + pgd_t *pgd = pgd_offset_k(vaddr); 23 + pmd_t *pmd = pmd_offset(pgd, vaddr); 24 + 25 + if (pmd_none(*pmd)) { 26 + unsigned i; 27 + pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); 28 + 29 + for (i = 0; i < 1024; i++) 30 + pte_clear(NULL, 0, pte + i); 31 + 32 + set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); 33 + BUG_ON(pte != pte_offset_kernel(pmd, 0)); 34 + pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", 35 + __func__, vaddr, pmd, pte); 36 + return pte; 37 + } else { 38 + return pte_offset_kernel(pmd, 0); 39 + } 40 + } 41 + 42 + static void __init fixedrange_init(void) 43 + { 44 + BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); 45 + init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); 46 + } 47 + #endif 48 + 20 49 void __init paging_init(void) 21 50 { 22 51 memset(swapper_pg_dir, 0, PAGE_SIZE); 52 + #ifdef CONFIG_HIGHMEM 53 + fixedrange_init(); 54 + pkmap_page_table = init_pmd(PKMAP_BASE); 55 + kmap_init(); 56 + #endif 23 57 } 24 58 25 59 /*
+15
arch/xtensa/mm/tlb.c
··· 149 149 local_irq_restore(flags); 150 150 } 151 151 152 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 153 + { 154 + if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && 155 + end - start < _TLB_ENTRIES << PAGE_SHIFT) { 156 + start &= PAGE_MASK; 157 + while (start < end) { 158 + invalidate_itlb_mapping(start); 159 + invalidate_dtlb_mapping(start); 160 + start += PAGE_SIZE; 161 + } 162 + } else { 163 + local_flush_tlb_all(); 164 + } 165 + } 166 + 152 167 #ifdef CONFIG_DEBUG_TLB_SANITY 153 168 154 169 static unsigned get_pte_for_vaddr(unsigned vaddr)
+2 -1
arch/xtensa/platforms/iss/Makefile
··· 4 4 # "prom monitor" library routines under Linux. 5 5 # 6 6 7 - obj-y = console.o setup.o 7 + obj-y = setup.o 8 + obj-$(CONFIG_TTY) += console.o 8 9 obj-$(CONFIG_NET) += network.o 9 10 obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
+1 -11
arch/xtensa/platforms/xt2000/setup.c
··· 92 92 93 93 /* early initialization */ 94 94 95 - extern sysmem_info_t __initdata sysmem; 96 - 97 - void platform_init(bp_tag_t* first) 95 + void __init platform_init(bp_tag_t *first) 98 96 { 99 - /* Set default memory block if not provided by the bootloader. */ 100 - 101 - if (sysmem.nr_banks == 0) { 102 - sysmem.nr_banks = 1; 103 - sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START; 104 - sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START 105 - + PLATFORM_DEFAULT_MEM_SIZE; 106 - } 107 97 } 108 98 109 99 /* Heartbeat. Let the LED blink. */
+1 -1
crypto/crypto_user.c
··· 466 466 type -= CRYPTO_MSG_BASE; 467 467 link = &crypto_dispatch[type]; 468 468 469 - if (!capable(CAP_NET_ADMIN)) 469 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 470 470 return -EPERM; 471 471 472 472 if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
+4 -3
drivers/acpi/acpi_processor.c
··· 170 170 acpi_status status; 171 171 int ret; 172 172 173 + if (pr->apic_id == -1) 174 + return -ENODEV; 175 + 173 176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 174 177 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 175 178 return -ENODEV; ··· 263 260 } 264 261 265 262 apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); 266 - if (apic_id < 0) { 263 + if (apic_id < 0) 267 264 acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); 268 - return -ENODEV; 269 - } 270 265 pr->apic_id = apic_id; 271 266 272 267 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+39 -7
drivers/acpi/device_pm.c
··· 900 900 */ 901 901 int acpi_subsys_prepare(struct device *dev) 902 902 { 903 - /* 904 - * Devices having power.ignore_children set may still be necessary for 905 - * suspending their children in the next phase of device suspend. 906 - */ 907 - if (dev->power.ignore_children) 908 - pm_runtime_resume(dev); 903 + struct acpi_device *adev = ACPI_COMPANION(dev); 904 + u32 sys_target; 905 + int ret, state; 909 906 910 - return pm_generic_prepare(dev); 907 + ret = pm_generic_prepare(dev); 908 + if (ret < 0) 909 + return ret; 910 + 911 + if (!adev || !pm_runtime_suspended(dev) 912 + || device_may_wakeup(dev) != !!adev->wakeup.prepare_count) 913 + return 0; 914 + 915 + sys_target = acpi_target_system_state(); 916 + if (sys_target == ACPI_STATE_S0) 917 + return 1; 918 + 919 + if (adev->power.flags.dsw_present) 920 + return 0; 921 + 922 + ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); 923 + return !ret && state == adev->power.state; 911 924 } 912 925 EXPORT_SYMBOL_GPL(acpi_subsys_prepare); 926 + 927 + /** 928 + * acpi_subsys_complete - Finalize device's resume during system resume. 929 + * @dev: Device to handle. 930 + */ 931 + void acpi_subsys_complete(struct device *dev) 932 + { 933 + /* 934 + * If the device had been runtime-suspended before the system went into 935 + * the sleep state it is going out of and it has never been resumed till 936 + * now, resume it in case the firmware powered it up. 937 + */ 938 + if (dev->power.direct_complete) 939 + pm_request_resume(dev); 940 + } 941 + EXPORT_SYMBOL_GPL(acpi_subsys_complete); 913 942 914 943 /** 915 944 * acpi_subsys_suspend - Run the device driver's suspend callback. ··· 952 923 pm_runtime_resume(dev); 953 924 return pm_generic_suspend(dev); 954 925 } 926 + EXPORT_SYMBOL_GPL(acpi_subsys_suspend); 955 927 956 928 /** 957 929 * acpi_subsys_suspend_late - Suspend device using ACPI. ··· 998 968 pm_runtime_resume(dev); 999 969 return pm_generic_freeze(dev); 1000 970 } 971 + EXPORT_SYMBOL_GPL(acpi_subsys_freeze); 1001 972 1002 973 #endif /* CONFIG_PM_SLEEP */ 1003 974 ··· 1010 979 #endif 1011 980 #ifdef CONFIG_PM_SLEEP 1012 981 .prepare = acpi_subsys_prepare, 982 + .complete = acpi_subsys_complete, 1013 983 .suspend = acpi_subsys_suspend, 1014 984 .suspend_late = acpi_subsys_suspend_late, 1015 985 .resume_early = acpi_subsys_resume_early,
+12 -9
drivers/acpi/ec.c
··· 206 206 spin_unlock_irqrestore(&ec->lock, flags); 207 207 } 208 208 209 - static int acpi_ec_sync_query(struct acpi_ec *ec); 209 + static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); 210 210 211 211 static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) 212 212 { 213 213 if (state & ACPI_EC_FLAG_SCI) { 214 214 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 215 - return acpi_ec_sync_query(ec); 215 + return acpi_ec_sync_query(ec, NULL); 216 216 } 217 217 return 0; 218 218 } ··· 443 443 444 444 EXPORT_SYMBOL(ec_get_handle); 445 445 446 - static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); 447 - 448 446 /* 449 - * Clears stale _Q events that might have accumulated in the EC. 447 + * Process _Q events that might have accumulated in the EC. 450 448 * Run with locked ec mutex. 451 449 */ 452 450 static void acpi_ec_clear(struct acpi_ec *ec) ··· 453 455 u8 value = 0; 454 456 455 457 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 456 - status = acpi_ec_query_unlocked(ec, &value); 458 + status = acpi_ec_sync_query(ec, &value); 457 459 if (status || !value) 458 460 break; 459 461 } ··· 580 582 kfree(handler); 581 583 } 582 584 583 - static int acpi_ec_sync_query(struct acpi_ec *ec) 585 + static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) 584 586 { 585 587 u8 value = 0; 586 588 int status; 587 589 struct acpi_ec_query_handler *handler, *copy; 588 - if ((status = acpi_ec_query_unlocked(ec, &value))) 590 + 591 + status = acpi_ec_query_unlocked(ec, &value); 592 + if (data) 593 + *data = value; 594 + if (status) 589 595 return status; 596 + 590 597 list_for_each_entry(handler, &ec->list, node) { 591 598 if (value == handler->query_bit) { 592 599 /* have custom handler for this bit */ ··· 615 612 if (!ec) 616 613 return; 617 614 mutex_lock(&ec->mutex); 618 - acpi_ec_sync_query(ec); 615 + acpi_ec_sync_query(ec, NULL); 619 616 mutex_unlock(&ec->mutex); 620 617 } 621 618
+4
drivers/acpi/scan.c
··· 1551 1551 */ 1552 1552 if (acpi_has_method(device->handle, "_PSC")) 1553 1553 device->power.flags.explicit_get = 1; 1554 + 1554 1555 if (acpi_has_method(device->handle, "_IRC")) 1555 1556 device->power.flags.inrush_current = 1; 1557 + 1558 + if (acpi_has_method(device->handle, "_DSW")) 1559 + device->power.flags.dsw_present = 1; 1556 1560 1557 1561 /* 1558 1562 * Enumerate supported power management states
+19
drivers/acpi/sleep.c
··· 89 89 { 90 90 return acpi_target_sleep_state; 91 91 } 92 + EXPORT_SYMBOL_GPL(acpi_target_system_state); 92 93 93 94 static bool pwr_btn_event_pending; 94 95 ··· 612 611 .recover = acpi_pm_finish, 613 612 }; 614 613 614 + static int acpi_freeze_begin(void) 615 + { 616 + acpi_scan_lock_acquire(); 617 + return 0; 618 + } 619 + 620 + static void acpi_freeze_end(void) 621 + { 622 + acpi_scan_lock_release(); 623 + } 624 + 625 + static const struct platform_freeze_ops acpi_freeze_ops = { 626 + .begin = acpi_freeze_begin, 627 + .end = acpi_freeze_end, 628 + }; 629 + 615 630 static void acpi_sleep_suspend_setup(void) 616 631 { 617 632 int i; ··· 638 621 639 622 suspend_set_ops(old_suspend_ordering ? 640 623 &acpi_suspend_ops_old : &acpi_suspend_ops); 624 + freeze_set_ops(&acpi_freeze_ops); 641 625 } 626 + 642 627 #else /* !CONFIG_SUSPEND */ 643 628 static inline void acpi_sleep_suspend_setup(void) {} 644 629 #endif /* !CONFIG_SUSPEND */
+17
drivers/base/dd.c
··· 52 52 static LIST_HEAD(deferred_probe_pending_list); 53 53 static LIST_HEAD(deferred_probe_active_list); 54 54 static struct workqueue_struct *deferred_wq; 55 + static atomic_t deferred_trigger_count = ATOMIC_INIT(0); 55 56 56 57 /** 57 58 * deferred_probe_work_func() - Retry probing devices in the active list. ··· 136 135 * This functions moves all devices from the pending list to the active 137 136 * list and schedules the deferred probe workqueue to process them. It 138 137 * should be called anytime a driver is successfully bound to a device. 138 + * 139 + * Note, there is a race condition in multi-threaded probe. In the case where 140 + * more than one device is probing at the same time, it is possible for one 141 + * probe to complete successfully while another is about to defer. If the second 142 + * depends on the first, then it will get put on the pending list after the 143 + * trigger event has already occured and will be stuck there. 144 + * 145 + * The atomic 'deferred_trigger_count' is used to determine if a successful 146 + * trigger has occurred in the midst of probing a driver. If the trigger count 147 + * changes in the midst of a probe, then deferred processing should be triggered 148 + * again. 139 149 */ 140 150 static void driver_deferred_probe_trigger(void) 141 151 { ··· 159 147 * into the active list so they can be retried by the workqueue 160 148 */ 161 149 mutex_lock(&deferred_probe_mutex); 150 + atomic_inc(&deferred_trigger_count); 162 151 list_splice_tail_init(&deferred_probe_pending_list, 163 152 &deferred_probe_active_list); 164 153 mutex_unlock(&deferred_probe_mutex); ··· 278 265 static int really_probe(struct device *dev, struct device_driver *drv) 279 266 { 280 267 int ret = 0; 268 + int local_trigger_count = atomic_read(&deferred_trigger_count); 281 269 282 270 atomic_inc(&probe_count); 283 271 pr_debug("bus: '%s': %s: probing driver %s with device %s\n", ··· 324 310 /* Driver requested deferred probing */ 325 311 dev_info(dev, "Driver %s requests probe deferral\n", drv->name); 326 312 driver_deferred_probe_add(dev); 313 + /* Did a trigger occur while probing? Need to re-trigger if yes */ 314 + if (local_trigger_count != atomic_read(&deferred_trigger_count)) 315 + driver_deferred_probe_trigger(); 327 316 } else if (ret != -ENODEV && ret != -ENXIO) { 328 317 /* driver matched but the probe failed */ 329 318 printk(KERN_WARNING
+6 -1
drivers/base/platform.c
··· 13 13 #include <linux/string.h> 14 14 #include <linux/platform_device.h> 15 15 #include <linux/of_device.h> 16 + #include <linux/of_irq.h> 16 17 #include <linux/module.h> 17 18 #include <linux/init.h> 18 19 #include <linux/dma-mapping.h> ··· 88 87 return -ENXIO; 89 88 return dev->archdata.irqs[num]; 90 89 #else 91 - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); 90 + struct resource *r; 91 + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 92 + return of_irq_get(dev->dev.of_node, num); 93 + 94 + r = platform_get_resource(dev, IORESOURCE_IRQ, num); 92 95 93 96 return r ? r->start : -ENXIO; 94 97 #endif
+7 -4
drivers/block/floppy.c
··· 3067 3067 int ret; 3068 3068 3069 3069 while (ptr) { 3070 - ret = copy_to_user(param, ptr, sizeof(*ptr)); 3070 + struct floppy_raw_cmd cmd = *ptr; 3071 + cmd.next = NULL; 3072 + cmd.kernel_data = NULL; 3073 + ret = copy_to_user(param, &cmd, sizeof(cmd)); 3071 3074 if (ret) 3072 3075 return -EFAULT; 3073 3076 param += sizeof(struct floppy_raw_cmd); ··· 3124 3121 return -ENOMEM; 3125 3122 *rcmd = ptr; 3126 3123 ret = copy_from_user(ptr, param, sizeof(*ptr)); 3127 - if (ret) 3128 - return -EFAULT; 3129 3124 ptr->next = NULL; 3130 3125 ptr->buffer_length = 0; 3126 + ptr->kernel_data = NULL; 3127 + if (ret) 3128 + return -EFAULT; 3131 3129 param += sizeof(struct floppy_raw_cmd); 3132 3130 if (ptr->cmd_count > 33) 3133 3131 /* the command may now also take up the space ··· 3144 3140 for (i = 0; i < 16; i++) 3145 3141 ptr->reply[i] = 0; 3146 3142 ptr->resultcode = 0; 3147 - ptr->kernel_data = NULL; 3148 3143 3149 3144 if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { 3150 3145 if (ptr->length <= 0)
+2
drivers/bluetooth/ath3k.c
··· 82 82 { USB_DEVICE(0x04CA, 0x3004) }, 83 83 { USB_DEVICE(0x04CA, 0x3005) }, 84 84 { USB_DEVICE(0x04CA, 0x3006) }, 85 + { USB_DEVICE(0x04CA, 0x3007) }, 85 86 { USB_DEVICE(0x04CA, 0x3008) }, 86 87 { USB_DEVICE(0x04CA, 0x300b) }, 87 88 { USB_DEVICE(0x0930, 0x0219) }, ··· 132 131 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 133 132 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 134 133 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 134 + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, 135 135 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 136 136 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 137 137 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+2 -3
drivers/bluetooth/btusb.c
··· 152 152 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 153 153 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 154 154 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 155 + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, 155 156 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 156 157 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 157 158 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, ··· 1486 1485 if (id->driver_info & BTUSB_BCM92035) 1487 1486 hdev->setup = btusb_setup_bcm92035; 1488 1487 1489 - if (id->driver_info & BTUSB_INTEL) { 1490 - usb_enable_autosuspend(data->udev); 1488 + if (id->driver_info & BTUSB_INTEL) 1491 1489 hdev->setup = btusb_setup_intel; 1492 - } 1493 1490 1494 1491 /* Interface numbers are hardcoded in the specification */ 1495 1492 data->isoc = usb_ifnum_to_if(data->udev, 1);
+1
drivers/char/agp/frontend.c
··· 730 730 731 731 agp_copy_info(agp_bridge, &kerninfo); 732 732 733 + memset(&userinfo, 0, sizeof(userinfo)); 733 734 userinfo.version.major = kerninfo.version.major; 734 735 userinfo.version.minor = kerninfo.version.minor; 735 736 userinfo.bridge_id = kerninfo.device->vendor |
+2
drivers/clk/versatile/clk-vexpress-osc.c
··· 100 100 struct clk *clk; 101 101 u32 range[2]; 102 102 103 + vexpress_sysreg_of_early_init(); 104 + 103 105 osc = kzalloc(sizeof(*osc), GFP_KERNEL); 104 106 if (!osc) 105 107 return;
+5 -1
drivers/clocksource/arm_arch_timer.c
··· 66 66 static struct clock_event_device __percpu *arch_timer_evt; 67 67 68 68 static bool arch_timer_use_virtual = true; 69 + static bool arch_timer_c3stop; 69 70 static bool arch_timer_mem_use_virtual; 70 71 71 72 /* ··· 264 263 clk->features = CLOCK_EVT_FEAT_ONESHOT; 265 264 266 265 if (type == ARCH_CP15_TIMER) { 267 - clk->features |= CLOCK_EVT_FEAT_C3STOP; 266 + if (arch_timer_c3stop) 267 + clk->features |= CLOCK_EVT_FEAT_C3STOP; 268 268 clk->name = "arch_sys_timer"; 269 269 clk->rating = 450; 270 270 clk->cpumask = cpumask_of(smp_processor_id()); ··· 666 664 return; 667 665 } 668 666 } 667 + 668 + arch_timer_c3stop = !of_property_read_bool(np, "always-on"); 669 669 670 670 arch_timer_register(); 671 671 arch_timer_common_init();
+6 -1
drivers/clocksource/zevio-timer.c
··· 212 212 return ret; 213 213 } 214 214 215 - CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); 215 + static void __init zevio_timer_init(struct device_node *node) 216 + { 217 + BUG_ON(zevio_timer_add(node)); 218 + } 219 + 220 + CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
+1 -1
drivers/connector/cn_proc.c
··· 369 369 return; 370 370 371 371 /* Can only change if privileged. */ 372 - if (!capable(CAP_NET_ADMIN)) { 372 + if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) { 373 373 err = EPERM; 374 374 goto out; 375 375 }
+24 -12
drivers/cpufreq/longhaul.c
··· 242 242 * Sets a new clock ratio. 243 243 */ 244 244 245 - static void longhaul_setstate(struct cpufreq_policy *policy, 245 + static int longhaul_setstate(struct cpufreq_policy *policy, 246 246 unsigned int table_index) 247 247 { 248 248 unsigned int mults_index; ··· 258 258 /* Safety precautions */ 259 259 mult = mults[mults_index & 0x1f]; 260 260 if (mult == -1) 261 - return; 261 + return -EINVAL; 262 + 262 263 speed = calc_speed(mult); 263 264 if ((speed > highest_speed) || (speed < lowest_speed)) 264 - return; 265 + return -EINVAL; 266 + 265 267 /* Voltage transition before frequency transition? */ 266 268 if (can_scale_voltage && longhaul_index < table_index) 267 269 dir = 1; 268 270 269 271 freqs.old = calc_speed(longhaul_get_cpu_mult()); 270 272 freqs.new = speed; 271 - 272 - cpufreq_freq_transition_begin(policy, &freqs); 273 273 274 274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 275 275 fsb, mult/10, mult%10, print_speed(speed/1000)); ··· 385 385 goto retry_loop; 386 386 } 387 387 } 388 - /* Report true CPU frequency */ 389 - cpufreq_freq_transition_end(policy, &freqs, 0); 390 388 391 - if (!bm_timeout) 389 + if (!bm_timeout) { 392 390 printk(KERN_INFO PFX "Warning: Timeout while waiting for " 393 391 "idle PCI bus.\n"); 392 + return -EBUSY; 393 + } 394 + 395 + return 0; 394 396 } 395 397 396 398 /* ··· 633 631 unsigned int i; 634 632 unsigned int dir = 0; 635 633 u8 vid, current_vid; 634 + int retval = 0; 636 635 637 636 if (!can_scale_voltage) 638 - longhaul_setstate(policy, table_index); 637 + retval = longhaul_setstate(policy, table_index); 639 638 else { 640 639 /* On test system voltage transitions exceeding single 641 640 * step up or down were turning motherboard off. Both ··· 651 648 while (i != table_index) { 652 649 vid = (longhaul_table[i].driver_data >> 8) & 0x1f; 653 650 if (vid != current_vid) { 654 - longhaul_setstate(policy, i); 651 + retval = longhaul_setstate(policy, i); 655 652 current_vid = vid; 656 653 msleep(200); 657 654 } ··· 660 657 else 661 658 i--; 662 659 } 663 - longhaul_setstate(policy, table_index); 660 + retval = longhaul_setstate(policy, table_index); 664 661 } 662 + 665 663 longhaul_index = table_index; 666 - return 0; 664 + return retval; 667 665 } 668 666 669 667 ··· 972 968 973 969 for (i = 0; i < numscales; i++) { 974 970 if (mults[i] == maxmult) { 971 + struct cpufreq_freqs freqs; 972 + 973 + freqs.old = policy->cur; 974 + freqs.new = longhaul_table[i].frequency; 975 + freqs.flags = 0; 976 + 977 + cpufreq_freq_transition_begin(policy, &freqs); 975 978 longhaul_setstate(policy, i); 979 + cpufreq_freq_transition_end(policy, &freqs, 0); 976 980 break; 977 981 } 978 982 }
+13 -10
drivers/cpufreq/powernow-k6.c
··· 138 138 static int powernow_k6_target(struct cpufreq_policy *policy, 139 139 unsigned int best_i) 140 140 { 141 - struct cpufreq_freqs freqs; 142 141 143 142 if (clock_ratio[best_i].driver_data > max_multiplier) { 144 143 printk(KERN_ERR PFX "invalid target frequency\n"); 145 144 return -EINVAL; 146 145 } 147 146 148 - freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 149 - freqs.new = busfreq * clock_ratio[best_i].driver_data; 150 - 151 - cpufreq_freq_transition_begin(policy, &freqs); 152 - 153 147 powernow_k6_set_cpu_multiplier(best_i); 154 - 155 - cpufreq_freq_transition_end(policy, &freqs, 0); 156 148 157 149 return 0; 158 150 } ··· 219 227 static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) 220 228 { 221 229 unsigned int i; 222 - for (i = 0; i < 8; i++) { 223 - if (i == max_multiplier) 230 + 231 + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 232 + if (clock_ratio[i].driver_data == max_multiplier) { 233 + struct cpufreq_freqs freqs; 234 + 235 + freqs.old = policy->cur; 236 + freqs.new = clock_ratio[i].frequency; 237 + freqs.flags = 0; 238 + 239 + cpufreq_freq_transition_begin(policy, &freqs); 224 240 powernow_k6_target(policy, i); 241 + cpufreq_freq_transition_end(policy, &freqs, 0); 242 + break; 243 + } 225 244 } 226 245 return 0; 227 246 }
-4
drivers/cpufreq/powernow-k7.c
··· 269 269 270 270 freqs.new = powernow_table[index].frequency; 271 271 272 - cpufreq_freq_transition_begin(policy, &freqs); 273 - 274 272 /* Now do the magic poking into the MSRs. */ 275 273 276 274 if (have_a0 == 1) /* A0 errata 5 */ ··· 287 289 288 290 if (have_a0 == 1) 289 291 local_irq_enable(); 290 - 291 - cpufreq_freq_transition_end(policy, &freqs, 0); 292 292 293 293 return 0; 294 294 }
+4 -1
drivers/cpufreq/ppc-corenet-cpufreq.c
··· 138 138 struct cpufreq_frequency_table *table; 139 139 struct cpu_data *data; 140 140 unsigned int cpu = policy->cpu; 141 + u64 transition_latency_hz; 141 142 142 143 np = of_get_cpu_node(cpu, NULL); 143 144 if (!np) ··· 206 205 for_each_cpu(i, per_cpu(cpu_mask, cpu)) 207 206 per_cpu(cpu_data, i) = data; 208 207 208 + transition_latency_hz = 12ULL * NSEC_PER_SEC; 209 209 policy->cpuinfo.transition_latency = 210 - (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq(); 210 + do_div(transition_latency_hz, fsl_get_sys_freq()); 211 + 211 212 of_node_put(np); 212 213 213 214 return 0;
+1
drivers/gpu/drm/exynos/exynos_drm_crtc.c
··· 145 145 146 146 plane->crtc = crtc; 147 147 plane->fb = crtc->primary->fb; 148 + drm_framebuffer_reference(plane->fb); 148 149 149 150 return 0; 150 151 }
+1 -1
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
··· 263 263 buffer->sgt = sgt; 264 264 exynos_gem_obj->base.import_attach = attach; 265 265 266 - DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, 266 + DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, 267 267 buffer->size); 268 268 269 269 return &exynos_gem_obj->base;
+2 -2
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 1426 1426 1427 1427 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1428 1428 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); 1429 - if (!dsi->reg_base) { 1429 + if (IS_ERR(dsi->reg_base)) { 1430 1430 dev_err(&pdev->dev, "failed to remap io region\n"); 1431 - return -EADDRNOTAVAIL; 1431 + return PTR_ERR(dsi->reg_base); 1432 1432 } 1433 1433 1434 1434 dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+1 -1
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 220 220 221 221 win_data->enabled = true; 222 222 223 - DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); 223 + DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); 224 224 225 225 if (ctx->vblank_on) 226 226 schedule_work(&ctx->work);
+3
drivers/gpu/drm/i915/i915_drv.h
··· 1954 1954 #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 1955 1955 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1956 1956 ((dev)->pdev->device & 0x00F0) == 0x0020) 1957 + /* ULX machines are also considered ULT. */ 1958 + #define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ 1959 + (dev)->pdev->device == 0x0A1E) 1957 1960 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1958 1961 1959 1962 /*
+25 -7
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 34 34 35 35 bool intel_enable_ppgtt(struct drm_device *dev, bool full) 36 36 { 37 - if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 37 + if (i915.enable_ppgtt == 0) 38 38 return false; 39 39 40 40 if (i915.enable_ppgtt == 1 && full) 41 41 return false; 42 42 43 + return true; 44 + } 45 + 46 + static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 47 + { 48 + if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 49 + return 0; 50 + 51 + if (enable_ppgtt == 1) 52 + return 1; 53 + 54 + if (enable_ppgtt == 2 && HAS_PPGTT(dev)) 55 + return 2; 56 + 43 57 #ifdef CONFIG_INTEL_IOMMU 44 58 /* Disable ppgtt on SNB if VT-d is on. */ 45 59 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 46 60 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 47 - return false; 61 + return 0; 48 62 } 49 63 #endif 50 64 51 - /* Full ppgtt disabled by default for now due to issues. */ 52 - if (full) 53 - return false; /* HAS_PPGTT(dev) */ 54 - else 55 - return HAS_ALIASING_PPGTT(dev); 65 + return HAS_ALIASING_PPGTT(dev) ? 1 : 0; 56 66 } 57 67 58 68 #define GEN6_PPGTT_PD_ENTRIES 512 ··· 2041 2031 gtt->base.total >> 20); 2042 2032 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 2043 2033 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 2034 + /* 2035 + * i915.enable_ppgtt is read-only, so do an early pass to validate the 2036 + * user's requested state against the hardware/driver capabilities. We 2037 + * do this now so that we can print out any log messages once rather 2038 + * than every time we check intel_enable_ppgtt(). 2039 + */ 2040 + i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); 2041 + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 2044 2042 2045 2043 return 0; 2046 2044 }
+14 -4
drivers/gpu/drm/i915/i915_irq.c
··· 1362 1362 spin_lock(&dev_priv->irq_lock); 1363 1363 for (i = 1; i < HPD_NUM_PINS; i++) { 1364 1364 1365 - WARN_ONCE(hpd[i] & hotplug_trigger && 1366 - dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1367 - "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1368 - hotplug_trigger, i, hpd[i]); 1365 + if (hpd[i] & hotplug_trigger && 1366 + dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1367 + /* 1368 + * On GMCH platforms the interrupt mask bits only 1369 + * prevent irq generation, not the setting of the 1370 + * hotplug bits itself. So only WARN about unexpected 1371 + * interrupts on saner platforms. 1372 + */ 1373 + WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1374 + "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1375 + hotplug_trigger, i, hpd[i]); 1376 + 1377 + continue; 1378 + } 1369 1379 1370 1380 if (!(hpd[i] & hotplug_trigger) || 1371 1381 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+1
drivers/gpu/drm/i915/i915_reg.h
··· 827 827 # define MI_FLUSH_ENABLE (1 << 12) 828 828 # define ASYNC_FLIP_PERF_DISABLE (1 << 14) 829 829 # define MODE_IDLE (1 << 9) 830 + # define STOP_RING (1 << 8) 830 831 831 832 #define GEN6_GT_MODE 0x20d0 832 833 #define GEN7_GT_MODE 0x7008
+32 -17
drivers/gpu/drm/i915/intel_display.c
··· 9654 9654 PIPE_CONF_CHECK_I(pipe_src_w); 9655 9655 PIPE_CONF_CHECK_I(pipe_src_h); 9656 9656 9657 - PIPE_CONF_CHECK_I(gmch_pfit.control); 9658 - /* pfit ratios are autocomputed by the hw on gen4+ */ 9659 - if (INTEL_INFO(dev)->gen < 4) 9660 - PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 9661 - PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 9657 + /* 9658 + * FIXME: BIOS likes to set up a cloned config with lvds+external 9659 + * screen. Since we don't yet re-compute the pipe config when moving 9660 + * just the lvds port away to another pipe the sw tracking won't match. 9661 + * 9662 + * Proper atomic modesets with recomputed global state will fix this. 9663 + * Until then just don't check gmch state for inherited modes. 9664 + */ 9665 + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { 9666 + PIPE_CONF_CHECK_I(gmch_pfit.control); 9667 + /* pfit ratios are autocomputed by the hw on gen4+ */ 9668 + if (INTEL_INFO(dev)->gen < 4) 9669 + PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 9670 + PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 9671 + } 9672 + 9662 9673 PIPE_CONF_CHECK_I(pch_pfit.enabled); 9663 9674 if (current_config->pch_pfit.enabled) { 9664 9675 PIPE_CONF_CHECK_I(pch_pfit.pos); ··· 11395 11384 } 11396 11385 } 11397 11386 11398 - static void 11399 - intel_connector_break_all_links(struct intel_connector *connector) 11400 - { 11401 - connector->base.dpms = DRM_MODE_DPMS_OFF; 11402 - connector->base.encoder = NULL; 11403 - connector->encoder->connectors_active = false; 11404 - connector->encoder->base.crtc = NULL; 11405 - } 11406 - 11407 11387 static void intel_enable_pipe_a(struct drm_device *dev) 11408 11388 { 11409 11389 struct intel_connector *connector; ··· 11476 11474 if (connector->encoder->base.crtc != &crtc->base) 11477 11475 continue; 11478 11476 11479 - intel_connector_break_all_links(connector); 11477 + connector->base.dpms = DRM_MODE_DPMS_OFF; 11478 + connector->base.encoder = NULL; 11480 11479 } 11480 + /* multiple connectors may have the same encoder: 11481 + * handle them and break crtc link separately */ 11482 + list_for_each_entry(connector, &dev->mode_config.connector_list, 11483 + base.head) 11484 + if (connector->encoder->base.crtc == &crtc->base) { 11485 + connector->encoder->base.crtc = NULL; 11486 + connector->encoder->connectors_active = false; 11487 + } 11481 11488 11482 11489 WARN_ON(crtc->active); 11483 11490 crtc->base.enabled = false; ··· 11568 11557 drm_get_encoder_name(&encoder->base)); 11569 11558 encoder->disable(encoder); 11570 11559 } 11560 + encoder->base.crtc = NULL; 11561 + encoder->connectors_active = false; 11571 11562 11572 11563 /* Inconsistent output/port/pipe state happens presumably due to 11573 11564 * a bug in one of the get_hw_state functions. Or someplace else ··· 11580 11567 base.head) { 11581 11568 if (connector->encoder != encoder) 11582 11569 continue; 11583 - 11584 - intel_connector_break_all_links(connector); 11570 + connector->base.dpms = DRM_MODE_DPMS_OFF; 11571 + connector->base.encoder = NULL; 11585 11572 } 11586 11573 } 11587 11574 /* Enabled encoders without active connectors will be fixed in ··· 11628 11615 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 11629 11616 base.head) { 11630 11617 memset(&crtc->config, 0, sizeof(crtc->config)); 11618 + 11619 + crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 11631 11620 11632 11621 crtc->active = dev_priv->display.get_pipe_config(crtc, 11633 11622 &crtc->config);
+12 -2
drivers/gpu/drm/i915/intel_dp.c
··· 105 105 case DP_LINK_BW_2_7: 106 106 break; 107 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 108 - if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && 108 + if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || 109 + INTEL_INFO(dev)->gen >= 8) && 109 110 intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 110 111 max_link_bw = DP_LINK_BW_5_4; 111 112 else ··· 3620 3619 { 3621 3620 struct drm_connector *connector = &intel_connector->base; 3622 3621 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3623 - struct drm_device *dev = intel_dig_port->base.base.dev; 3622 + struct intel_encoder *intel_encoder = &intel_dig_port->base; 3623 + struct drm_device *dev = intel_encoder->base.dev; 3624 3624 struct drm_i915_private *dev_priv = dev->dev_private; 3625 3625 struct drm_display_mode *fixed_mode = NULL; 3626 3626 bool has_dpcd; ··· 3630 3628 3631 3629 if (!is_edp(intel_dp)) 3632 3630 return true; 3631 + 3632 + /* The VDD bit needs a power domain reference, so if the bit is already 3633 + * enabled when we boot, grab this reference. */ 3634 + if (edp_have_panel_vdd(intel_dp)) { 3635 + enum intel_display_power_domain power_domain; 3636 + power_domain = intel_display_port_power_domain(intel_encoder); 3637 + intel_display_power_get(dev_priv, power_domain); 3638 + } 3633 3639 3634 3640 /* Cache DPCD and EDID for edp. */ 3635 3641 intel_edp_panel_vdd_on(intel_dp);
+2 -1
drivers/gpu/drm/i915/intel_drv.h
··· 236 236 * tracked with quirk flags so that fastboot and state checker can act 237 237 * accordingly. 238 238 */ 239 - #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 239 + #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 240 + #define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ 240 241 unsigned long quirks; 241 242 242 243 /* User requested mode, only valid as a starting point to
+10
drivers/gpu/drm/i915/intel_fbdev.c
··· 132 132 133 133 mutex_lock(&dev->struct_mutex); 134 134 135 + if (intel_fb && 136 + (sizes->fb_width > intel_fb->base.width || 137 + sizes->fb_height > intel_fb->base.height)) { 138 + DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," 139 + " releasing it\n", 140 + intel_fb->base.width, intel_fb->base.height, 141 + sizes->fb_width, sizes->fb_height); 142 + drm_framebuffer_unreference(&intel_fb->base); 143 + intel_fb = ifbdev->fb = NULL; 144 + } 135 145 if (!intel_fb || WARN_ON(!intel_fb->obj)) { 136 146 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 137 147 ret = intelfb_alloc(helper, sizes);
+5 -4
drivers/gpu/drm/i915/intel_hdmi.c
··· 821 821 } 822 822 } 823 823 824 - static int hdmi_portclock_limit(struct intel_hdmi *hdmi) 824 + static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) 825 825 { 826 826 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 827 827 828 - if (!hdmi->has_hdmi_sink || IS_G4X(dev)) 828 + if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) 829 829 return 165000; 830 830 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 831 831 return 300000; ··· 837 837 intel_hdmi_mode_valid(struct drm_connector *connector, 838 838 struct drm_display_mode *mode) 839 839 { 840 - if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) 840 + if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), 841 + true)) 841 842 return MODE_CLOCK_HIGH; 842 843 if (mode->clock < 20000) 843 844 return MODE_CLOCK_LOW; ··· 880 879 struct drm_device *dev = encoder->base.dev; 881 880 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 882 881 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; 883 - int portclock_limit = hdmi_portclock_limit(intel_hdmi); 882 + int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); 884 883 int desired_bpp; 885 884 886 885 if (intel_hdmi->color_range_auto) {
+34 -20
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 437 437 I915_WRITE(HWS_PGA, addr); 438 438 } 439 439 440 + static bool stop_ring(struct intel_ring_buffer *ring) 441 + { 442 + struct drm_i915_private *dev_priv = to_i915(ring->dev); 443 + 444 + if (!IS_GEN2(ring->dev)) { 445 + I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 446 + if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 447 + DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 448 + return false; 449 + } 450 + } 451 + 452 + I915_WRITE_CTL(ring, 0); 453 + I915_WRITE_HEAD(ring, 0); 454 + ring->write_tail(ring, 0); 455 + 456 + if (!IS_GEN2(ring->dev)) { 457 + (void)I915_READ_CTL(ring); 458 + I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 459 + } 460 + 461 + return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 462 + } 463 + 440 464 static int init_ring_common(struct intel_ring_buffer *ring) 441 465 { 442 466 struct drm_device *dev = ring->dev; 443 467 struct drm_i915_private *dev_priv = dev->dev_private; 444 468 struct drm_i915_gem_object *obj = ring->obj; 445 469 int ret = 0; 446 - u32 head; 447 470 448 471 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 449 472 450 - /* Stop the ring if it's running. */ 451 - I915_WRITE_CTL(ring, 0); 452 - I915_WRITE_HEAD(ring, 0); 453 - ring->write_tail(ring, 0); 454 - if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) 455 - DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 456 - 457 - if (I915_NEED_GFX_HWS(dev)) 458 - intel_ring_setup_status_page(ring); 459 - else 460 - ring_setup_phys_status_page(ring); 461 - 462 - head = I915_READ_HEAD(ring) & HEAD_ADDR; 463 - 464 - /* G45 ring initialization fails to reset head to zero */ 465 - if (head != 0) { 473 + if (!stop_ring(ring)) { 474 + /* G45 ring initialization often fails to reset head to zero */ 466 475 DRM_DEBUG_KMS("%s head not reset to zero " 467 476 "ctl %08x head %08x tail %08x start %08x\n", 468 477 ring->name, ··· 480 471 I915_READ_TAIL(ring), 481 472 I915_READ_START(ring)); 482 473 483 - I915_WRITE_HEAD(ring, 0); 484 - 485 - if (I915_READ_HEAD(ring) & HEAD_ADDR) { 474 + if (!stop_ring(ring)) { 486 475 DRM_ERROR("failed to set %s head to zero " 487 476 "ctl %08x head %08x tail %08x start %08x\n", 488 477 ring->name, ··· 488 481 I915_READ_HEAD(ring), 489 482 I915_READ_TAIL(ring), 490 483 I915_READ_START(ring)); 484 + ret = -EIO; 485 + goto out; 491 486 } 492 487 } 488 + 489 + if (I915_NEED_GFX_HWS(dev)) 490 + intel_ring_setup_status_page(ring); 491 + else 492 + ring_setup_phys_status_page(ring); 493 493 494 494 /* Initialize the ring. This must happen _after_ we've cleared the ring 495 495 * registers with the above sequence (the readback of the HEAD registers
+1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 34 34 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 35 35 36 36 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 37 + #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 37 38 38 39 enum intel_ring_hangcheck_action { 39 40 HANGCHECK_IDLE = 0,
+3 -6
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
··· 510 510 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); 511 511 } else { 512 512 /* disable cursor: */ 513 - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); 514 - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), 515 - MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB)); 513 + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 514 + mdp4_kms->blank_cursor_iova); 516 515 } 517 516 518 517 /* and drop the iova ref + obj rev when done scanning out: */ ··· 573 574 574 575 if (old_bo) { 575 576 /* drop our previous reference: */ 576 - msm_gem_put_iova(old_bo, mdp4_kms->id); 577 - drm_gem_object_unreference_unlocked(old_bo); 577 + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); 578 578 } 579 579 580 - crtc_flush(crtc); 581 580 request_pending(crtc, PENDING_CURSOR); 582 581 583 582 return 0;
+2 -2
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
··· 70 70 71 71 VERB("status=%08x", status); 72 72 73 + mdp_dispatch_irqs(mdp_kms, status); 74 + 73 75 for (id = 0; id < priv->num_crtcs; id++) 74 76 if (status & mdp4_crtc_vblank(priv->crtcs[id])) 75 77 drm_handle_vblank(dev, id); 76 - 77 - mdp_dispatch_irqs(mdp_kms, status); 78 78 79 79 return IRQ_HANDLED; 80 80 }
+21
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
··· 144 144 static void mdp4_destroy(struct msm_kms *kms) 145 145 { 146 146 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 147 + if (mdp4_kms->blank_cursor_iova) 148 + msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); 149 + if (mdp4_kms->blank_cursor_bo) 150 + drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); 147 151 kfree(mdp4_kms); 148 152 } 149 153 ··· 373 369 ret = modeset_init(mdp4_kms); 374 370 if (ret) { 375 371 dev_err(dev->dev, "modeset_init failed: %d\n", ret); 372 + goto fail; 373 + } 374 + 375 + mutex_lock(&dev->struct_mutex); 376 + mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); 377 + mutex_unlock(&dev->struct_mutex); 378 + if (IS_ERR(mdp4_kms->blank_cursor_bo)) { 379 + ret = PTR_ERR(mdp4_kms->blank_cursor_bo); 380 + dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); 381 + mdp4_kms->blank_cursor_bo = NULL; 382 + goto fail; 383 + } 384 + 385 + ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, 386 + &mdp4_kms->blank_cursor_iova); 387 + if (ret) { 388 + dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); 376 389 goto fail; 377 390 } 378 391
+4
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
··· 44 44 struct clk *lut_clk; 45 45 46 46 struct mdp_irq error_handler; 47 + 48 + /* empty/blank cursor bo to use when cursor is "disabled" */ 49 + struct drm_gem_object *blank_cursor_bo; 50 + uint32_t blank_cursor_iova; 47 51 }; 48 52 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) 49 53
+2 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
··· 71 71 72 72 VERB("status=%08x", status); 73 73 74 + mdp_dispatch_irqs(mdp_kms, status); 75 + 74 76 for (id = 0; id < priv->num_crtcs; id++) 75 77 if (status & mdp5_crtc_vblank(priv->crtcs[id])) 76 78 drm_handle_vblank(dev, id); 77 - 78 - mdp_dispatch_irqs(mdp_kms, status); 79 79 } 80 80 81 81 irqreturn_t mdp5_irq(struct msm_kms *kms)
+1 -4
drivers/gpu/drm/msm/msm_fbdev.c
··· 62 62 dma_addr_t paddr; 63 63 int ret, size; 64 64 65 - /* only doing ARGB32 since this is what is needed to alpha-blend 66 - * with video overlays: 67 - */ 68 65 sizes->surface_bpp = 32; 69 - sizes->surface_depth = 32; 66 + sizes->surface_depth = 24; 70 67 71 68 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 72 69 sizes->surface_height, sizes->surface_bpp,
+3 -1
drivers/gpu/drm/msm/msm_gem.c
··· 118 118 119 119 if (iommu_present(&platform_bus_type)) 120 120 drm_gem_put_pages(obj, msm_obj->pages, true, false); 121 - else 121 + else { 122 122 drm_mm_remove_node(msm_obj->vram_node); 123 + drm_free_large(msm_obj->pages); 124 + } 123 125 124 126 msm_obj->pages = NULL; 125 127 }
+3 -1
drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
··· 863 863 { 864 864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 865 865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 866 - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 866 + mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); 867 867 868 868 mmio_list(0x40800c, 0x00000000, 8, 1); 869 869 mmio_list(0x408010, 0x80000000, 0, 0); ··· 876 876 mmio_list(0x408008, 0x80000030, 0, 0); 877 877 mmio_list(0x418e24, 0x00000000, 8, 0); 878 878 mmio_list(0x418e28, 0x80000030, 0, 0); 879 + 880 + mmio_list(0x4064c8, 0x018002c0, 0, 0); 879 881 880 882 mmio_list(0x418810, 0x80000000, 12, 2); 881 883 mmio_list(0x419848, 0x10000000, 12, 2);
+6 -4
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
··· 168 168 */ 169 169 i = 16; 170 170 do { 171 - if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55) 171 + u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff; 172 + if (data == 0xaa55) 172 173 break; 173 174 } while (i--); 174 175 ··· 177 176 goto out; 178 177 179 178 /* read entire bios image to system memory */ 180 - bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512; 179 + bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff; 180 + bios->size = bios->size * 512; 181 181 if (!bios->size) 182 182 goto out; 183 183 184 184 bios->data = kmalloc(bios->size, GFP_KERNEL); 185 185 if (bios->data) { 186 - for (i = 0; i < bios->size; i+=4) 187 - nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i)); 186 + for (i = 0; i < bios->size; i += 4) 187 + ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i); 188 188 } 189 189 190 190 /* check the PCI record header */
-3
drivers/gpu/drm/nouveau/nouveau_acpi.c
··· 389 389 acpi_status status; 390 390 acpi_handle dhandle, rom_handle; 391 391 392 - if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) 393 - return false; 394 - 395 392 dhandle = ACPI_HANDLE(&pdev->dev); 396 393 if (!dhandle) 397 394 return false;
+1 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 764 764 } 765 765 766 766 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 767 - mutex_unlock(&chan->cli->mutex); 768 767 if (ret) 769 768 goto fail_unreserve; 769 + mutex_unlock(&chan->cli->mutex); 770 770 771 771 /* Update the crtc struct and cleanup */ 772 772 crtc->primary->fb = fb;
+37 -17
drivers/gpu/drm/radeon/atombios_crtc.c
··· 1177 1177 1178 1178 /* Set NUM_BANKS. */ 1179 1179 if (rdev->family >= CHIP_TAHITI) { 1180 - unsigned tileb, index, num_banks, tile_split_bytes; 1180 + unsigned index, num_banks; 1181 1181 1182 - /* Calculate the macrotile mode index. */ 1183 - tile_split_bytes = 64 << tile_split; 1184 - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; 1185 - tileb = min(tile_split_bytes, tileb); 1182 + if (rdev->family >= CHIP_BONAIRE) { 1183 + unsigned tileb, tile_split_bytes; 1186 1184 1187 - for (index = 0; tileb > 64; index++) { 1188 - tileb >>= 1; 1189 - } 1185 + /* Calculate the macrotile mode index. */ 1186 + tile_split_bytes = 64 << tile_split; 1187 + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; 1188 + tileb = min(tile_split_bytes, tileb); 1190 1189 1191 - if (index >= 16) { 1192 - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", 1193 - target_fb->bits_per_pixel, tile_split); 1194 - return -EINVAL; 1195 - } 1190 + for (index = 0; tileb > 64; index++) 1191 + tileb >>= 1; 1196 1192 1197 - if (rdev->family >= CHIP_BONAIRE) 1193 + if (index >= 16) { 1194 + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", 1195 + target_fb->bits_per_pixel, tile_split); 1196 + return -EINVAL; 1197 + } 1198 + 1198 1199 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1199 - else 1200 + } else { 1201 + switch (target_fb->bits_per_pixel) { 1202 + case 8: 1203 + index = 10; 1204 + break; 1205 + case 16: 1206 + index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; 1207 + break; 1208 + default: 1209 + case 32: 1210 + index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; 1211 + break; 1212 + } 1213 + 1200 1214 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; 1215 + } 1216 + 1201 1217 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1202 1218 } else { 1203 1219 /* NI and older. */ ··· 1736 1720 } 1737 1721 /* otherwise, pick one of the plls */ 1738 1722 if ((rdev->family == CHIP_KAVERI) || 1739 - (rdev->family == CHIP_KABINI)) { 1740 - /* KB/KV has PPLL1 and PPLL2 */ 1723 + (rdev->family == CHIP_KABINI) || 1724 + (rdev->family == CHIP_MULLINS)) { 1725 + /* KB/KV/ML has PPLL1 and PPLL2 */ 1741 1726 pll_in_use = radeon_get_pll_use_mask(crtc); 1742 1727 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1743 1728 return ATOM_PPLL2; ··· 1901 1884 if (radeon_encoder->active_device & 1902 1885 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1903 1886 is_tvcv = true; 1887 + 1888 + if (!radeon_crtc->adjusted_clock) 1889 + return -EINVAL; 1904 1890 1905 1891 atombios_crtc_set_pll(crtc, adjusted_mode); 1906 1892
+25 -19
drivers/gpu/drm/radeon/atombios_dp.c
··· 366 366 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 367 367 return; 368 368 369 - if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3)) 369 + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) 370 370 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 371 371 buf[0], buf[1], buf[2]); 372 372 373 - if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3)) 373 + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) 374 374 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 375 375 buf[0], buf[1], buf[2]); 376 376 } ··· 419 419 420 420 if (dp_bridge != ENCODER_OBJECT_ID_NONE) { 421 421 /* DP bridge chips */ 422 - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, 423 - DP_EDP_CONFIGURATION_CAP, &tmp); 424 - if (tmp & 1) 425 - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 426 - else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || 427 - (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) 428 - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 429 - else 430 - panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 422 + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, 423 + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { 424 + if (tmp & 1) 425 + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 426 + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || 427 + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) 428 + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 429 + else 430 + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 431 + } 431 432 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 432 433 /* eDP */ 433 - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, 434 - DP_EDP_CONFIGURATION_CAP, &tmp); 435 - if (tmp & 1) 436 - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 434 + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, 435 + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { 436 + if (tmp & 1) 437 + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 438 + } 437 439 } 438 440 439 441 return panel_mode; ··· 811 809 else 812 810 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 813 811 814 - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp); 815 - if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 816 - dp_info.tp3_supported = true; 817 - else 812 + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) 813 + == 1) { 814 + if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 815 + dp_info.tp3_supported = true; 816 + else 817 + dp_info.tp3_supported = false; 818 + } else { 818 819 dp_info.tp3_supported = false; 820 + } 819 821 820 822 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); 821 823 dp_info.rdev = rdev;
+148
drivers/gpu/drm/radeon/cik.c
··· 63 63 MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 64 64 MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 65 65 MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); 66 + MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); 67 + MODULE_FIRMWARE("radeon/MULLINS_me.bin"); 68 + MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); 69 + MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); 70 + MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); 71 + MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); 66 72 67 73 extern int r600_ih_ring_alloc(struct radeon_device *rdev); 68 74 extern void r600_ih_ring_fini(struct radeon_device *rdev); ··· 1479 1473 0xd80c, 0xff000ff0, 0x00000100 1480 1474 }; 1481 1475 1476 + static const u32 godavari_golden_registers[] = 1477 + { 1478 + 0x55e4, 0xff607fff, 0xfc000100, 1479 + 0x6ed8, 0x00010101, 0x00010000, 1480 + 0x9830, 0xffffffff, 0x00000000, 1481 + 0x98302, 0xf00fffff, 0x00000400, 1482 + 0x6130, 0xffffffff, 0x00010000, 1483 + 0x5bb0, 0x000000f0, 0x00000070, 1484 + 0x5bc0, 0xf0311fff, 0x80300000, 1485 + 0x98f8, 0x73773777, 0x12010001, 1486 + 0x98fc, 0xffffffff, 0x00000010, 1487 + 0x8030, 0x00001f0f, 0x0000100a, 1488 + 0x2f48, 0x73773777, 0x12010001, 1489 + 0x2408, 0x000fffff, 0x000c007f, 1490 + 0x8a14, 0xf000003f, 0x00000007, 1491 + 0x8b24, 0xffffffff, 0x00ff0fff, 1492 + 0x30a04, 0x0000ff0f, 0x00000000, 1493 + 0x28a4c, 0x07ffffff, 0x06000000, 1494 + 0x4d8, 0x00000fff, 0x00000100, 1495 + 0xd014, 0x00010000, 0x00810001, 1496 + 0xd814, 0x00010000, 0x00810001, 1497 + 0x3e78, 0x00000001, 0x00000002, 1498 + 0xc768, 0x00000008, 0x00000008, 1499 + 0xc770, 0x00000f00, 0x00000800, 1500 + 0xc774, 0x00000f00, 0x00000800, 1501 + 0xc798, 0x00ffffff, 0x00ff7fbf, 1502 + 0xc79c, 0x00ffffff, 0x00ff7faf, 1503 + 0x8c00, 0x000000ff, 0x00000001, 1504 + 0x214f8, 0x01ff01ff, 0x00000002, 1505 + 0x21498, 0x007ff800, 0x00200000, 1506 + 0x2015c, 0xffffffff, 0x00000f40, 1507 + 0x88c4, 0x001f3ae3, 0x00000082, 1508 + 0x88d4, 0x0000001f, 0x00000010, 1509 + 0x30934, 0xffffffff, 0x00000000 1510 + }; 1511 + 1512 + 1482 1513 static void cik_init_golden_registers(struct radeon_device *rdev) 1483 1514 { 1484 1515 switch (rdev->family) { ··· 1540 1497 radeon_program_register_sequence(rdev, 1541 1498 kalindi_golden_registers, 1542 1499 (const u32)ARRAY_SIZE(kalindi_golden_registers)); 1500 + radeon_program_register_sequence(rdev, 1501 + kalindi_golden_common_registers, 1502 + (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); 1503 + radeon_program_register_sequence(rdev, 1504 + kalindi_golden_spm_registers, 1505 + (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); 1506 + break; 1507 + case CHIP_MULLINS: 1508 + radeon_program_register_sequence(rdev, 1509 + kalindi_mgcg_cgcg_init, 1510 + (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); 1511 + radeon_program_register_sequence(rdev, 1512 + godavari_golden_registers, 1513 + (const u32)ARRAY_SIZE(godavari_golden_registers)); 1543 1514 radeon_program_register_sequence(rdev, 1544 1515 kalindi_golden_common_registers, 1545 1516 (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); ··· 1889 1832 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1890 1833 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1891 1834 rlc_req_size = KB_RLC_UCODE_SIZE * 4; 1835 + sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1836 + break; 1837 + case CHIP_MULLINS: 1838 + chip_name = "MULLINS"; 1839 + pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1840 + me_req_size = CIK_ME_UCODE_SIZE * 4; 1841 + ce_req_size = CIK_CE_UCODE_SIZE * 4; 1842 + mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1843 + rlc_req_size = ML_RLC_UCODE_SIZE * 4; 1892 1844 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1893 1845 break; 1894 1846 default: BUG(); ··· 3338 3272 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 3339 3273 break; 3340 3274 case CHIP_KABINI: 3275 + case CHIP_MULLINS: 3341 3276 default: 3342 3277 rdev->config.cik.max_shader_engines = 1; 3343 3278 rdev->config.cik.max_tile_pipes = 2; ··· 3769 3702 r = radeon_fence_emit(rdev, fence, ring->idx); 3770 3703 if (r) { 3771 3704 radeon_ring_unlock_undo(rdev, ring); 3705 + radeon_semaphore_free(rdev, &sem, NULL); 3772 3706 return r; 3773 3707 } 3774 3708 ··· 5868 5800 case CHIP_KABINI: 5869 5801 size = KB_RLC_UCODE_SIZE; 5870 5802 break; 5803 + case CHIP_MULLINS: 5804 + size = ML_RLC_UCODE_SIZE; 5805 + break; 5871 5806 } 5872 5807 5873 5808 cik_rlc_stop(rdev); ··· 6619 6548 buffer[count++] = cpu_to_le32(0x00000000); 6620 6549 break; 6621 6550 case CHIP_KABINI: 6551 + case CHIP_MULLINS: 6622 6552 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ 6623 6553 buffer[count++] = cpu_to_le32(0x00000000); 6624 6554 break; ··· 6764 6692 if (rdev->num_crtc >= 6) { 6765 6693 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 6766 6694 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 6695 + } 6696 + /* pflip */ 6697 + if (rdev->num_crtc >= 2) { 6698 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 6699 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 6700 + } 6701 + if (rdev->num_crtc >= 4) { 6702 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 6703 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 6704 + } 6705 + if (rdev->num_crtc >= 6) { 6706 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 6707 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 6767 6708 } 6768 6709 6769 6710 /* dac hotplug */ ··· 7134 7049 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 7135 7050 } 7136 7051 7052 + if (rdev->num_crtc >= 2) { 7053 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 7054 + GRPH_PFLIP_INT_MASK); 7055 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 7056 + GRPH_PFLIP_INT_MASK); 7057 + } 7058 + if (rdev->num_crtc >= 4) { 7059 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 7060 + GRPH_PFLIP_INT_MASK); 7061 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 7062 + GRPH_PFLIP_INT_MASK); 7063 + } 7064 + if (rdev->num_crtc >= 6) { 7065 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 7066 + GRPH_PFLIP_INT_MASK); 7067 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 7068 + GRPH_PFLIP_INT_MASK); 7069 + } 7070 + 7137 7071 WREG32(DC_HPD1_INT_CONTROL, hpd1); 7138 7072 WREG32(DC_HPD2_INT_CONTROL, hpd2); 7139 7073 WREG32(DC_HPD3_INT_CONTROL, hpd3); ··· 7189 7085 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 7190 7086 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); 7191 7087 7088 + rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + 7089 + EVERGREEN_CRTC0_REGISTER_OFFSET); 7090 + rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + 7091 + EVERGREEN_CRTC1_REGISTER_OFFSET); 7092 + if (rdev->num_crtc >= 4) { 7093 + rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + 7094 + EVERGREEN_CRTC2_REGISTER_OFFSET); 7095 + rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + 7096 + EVERGREEN_CRTC3_REGISTER_OFFSET); 7097 + } 7098 + if (rdev->num_crtc >= 6) { 7099 + rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + 7100 + EVERGREEN_CRTC4_REGISTER_OFFSET); 7101 + rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + 7102 + EVERGREEN_CRTC5_REGISTER_OFFSET); 7103 + } 7104 + 7105 + if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) 7106 + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, 7107 + GRPH_PFLIP_INT_CLEAR); 7108 + if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) 7109 + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, 7110 + GRPH_PFLIP_INT_CLEAR); 7192 7111 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) 7193 7112 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 7194 7113 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) ··· 7222 7095 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 7223 7096 7224 7097 if (rdev->num_crtc >= 4) { 7098 + if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) 7099 + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, 7100 + GRPH_PFLIP_INT_CLEAR); 7101 + if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) 7102 + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, 7103 + GRPH_PFLIP_INT_CLEAR); 7225 7104 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 7226 7105 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 7227 7106 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) ··· 7239 7106 } 7240 7107 7241 7108 if (rdev->num_crtc >= 6) { 7109 + if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) 7110 + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, 7111 + GRPH_PFLIP_INT_CLEAR); 7112 + if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) 7113 + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, 7114 + GRPH_PFLIP_INT_CLEAR); 7242 7115 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 7243 7116 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 7244 7117 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) ··· 7595 7456 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 7596 7457 break; 7597 7458 } 7459 + break; 7460 + case 8: /* D1 page flip */ 7461 + case 10: /* D2 page flip */ 7462 + case 12: /* D3 page flip */ 7463 + case 14: /* D4 page flip */ 7464 + case 16: /* D5 page flip */ 7465 + case 18: /* D6 page flip */ 7466 + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); 7467 + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); 7598 7468 break; 7599 7469 case 42: /* HPD hotplug */ 7600 7470 switch (src_data) {
+1
drivers/gpu/drm/radeon/cik_sdma.c
··· 562 562 r = radeon_fence_emit(rdev, fence, ring->idx); 563 563 if (r) { 564 564 radeon_ring_unlock_undo(rdev, ring); 565 + radeon_semaphore_free(rdev, &sem, NULL); 565 566 return r; 566 567 } 567 568
+9
drivers/gpu/drm/radeon/cikd.h
··· 888 888 # define DC_HPD6_RX_INTERRUPT (1 << 18) 889 889 #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 890 890 891 + /* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ 892 + #define GRPH_INT_STATUS 0x6858 893 + # define GRPH_PFLIP_INT_OCCURRED (1 << 0) 894 + # define GRPH_PFLIP_INT_CLEAR (1 << 8) 895 + /* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ 896 + #define GRPH_INT_CONTROL 0x685c 897 + # define GRPH_PFLIP_INT_MASK (1 << 0) 898 + # define GRPH_PFLIP_INT_TYPE (1 << 8) 899 + 891 900 #define DAC_AUTODETECT_INT_CONTROL 0x67c8 892 901 893 902 #define DC_HPD1_INT_STATUS 0x601c
+21 -7
drivers/gpu/drm/radeon/evergreen.c
··· 4371 4371 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 4372 4372 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 4373 4373 u32 grbm_int_cntl = 0; 4374 - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 4375 4374 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 4376 4375 u32 dma_cntl, dma_cntl1 = 0; 4377 4376 u32 thermal_int = 0; ··· 4553 4554 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 4554 4555 } 4555 4556 4556 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 4557 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 4557 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 4558 + GRPH_PFLIP_INT_MASK); 4559 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 4560 + GRPH_PFLIP_INT_MASK); 4558 4561 if (rdev->num_crtc >= 4) { 4559 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 4560 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 4562 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 4563 + GRPH_PFLIP_INT_MASK); 4564 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 4565 + GRPH_PFLIP_INT_MASK); 4561 4566 } 4562 4567 if (rdev->num_crtc >= 6) { 4563 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 4564 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 4568 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 4569 + GRPH_PFLIP_INT_MASK); 4570 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 4571 + GRPH_PFLIP_INT_MASK); 4565 4572 } 4566 4573 4567 4574 WREG32(DC_HPD1_INT_CONTROL, hpd1); ··· 4955 4950 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4956 4951 break; 4957 4952 } 4953 + break; 4954 + case 8: /* D1 page flip */ 4955 + case 10: /* D2 page flip */ 4956 + case 12: /* D3 page flip */ 4957 + case 14: /* D4 page flip */ 4958 + case 16: /* D5 page flip */ 4959 + case 18: /* D6 page flip */ 4960 + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); 4961 + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); 4958 4962 break; 4959 4963 case 42: /* HPD hotplug */ 4960 4964 switch (src_data) {
+1
drivers/gpu/drm/radeon/evergreen_dma.c
··· 151 151 r = radeon_fence_emit(rdev, fence, ring->idx); 152 152 if (r) { 153 153 radeon_ring_unlock_undo(rdev, ring); 154 + radeon_semaphore_free(rdev, &sem, NULL); 154 155 return r; 155 156 } 156 157
+106 -29
drivers/gpu/drm/radeon/kv_dpm.c
··· 546 546 return 0; 547 547 } 548 548 549 + static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 550 + struct sumo_vid_mapping_table *vid_mapping_table, 551 + u32 vid_2bit) 552 + { 553 + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 554 + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 555 + u32 i; 556 + 557 + if (vddc_sclk_table && vddc_sclk_table->count) { 558 + if (vid_2bit < vddc_sclk_table->count) 559 + return vddc_sclk_table->entries[vid_2bit].v; 560 + else 561 + return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 562 + } else { 563 + for (i = 0; i < vid_mapping_table->num_entries; i++) { 564 + if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 565 + return vid_mapping_table->entries[i].vid_7bit; 566 + } 567 + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 568 + } 569 + } 570 + 571 + static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 572 + struct sumo_vid_mapping_table *vid_mapping_table, 573 + u32 vid_7bit) 574 + { 575 + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 576 + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 577 + u32 i; 578 + 579 + if (vddc_sclk_table && vddc_sclk_table->count) { 580 + for (i = 0; i < vddc_sclk_table->count; i++) { 581 + if (vddc_sclk_table->entries[i].v == vid_7bit) 582 + return i; 583 + } 584 + return vddc_sclk_table->count - 1; 585 + } else { 586 + for (i = 0; i < vid_mapping_table->num_entries; i++) { 587 + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 588 + return vid_mapping_table->entries[i].vid_2bit; 589 + } 590 + 591 + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 592 + } 593 + } 594 + 549 595 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 550 596 u16 voltage) 551 597 { ··· 602 556 u32 vid_2bit) 603 557 { 604 558 struct kv_power_info *pi = kv_get_pi(rdev); 605 - u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, 606 - &pi->sys_info.vid_mapping_table, 607 - vid_2bit); 559 + u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 560 + &pi->sys_info.vid_mapping_table, 561 + vid_2bit); 608 562 609 563 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 610 564 } ··· 685 639 686 640 static int kv_unforce_levels(struct radeon_device *rdev) 687 641 { 688 - if (rdev->family == CHIP_KABINI) 642 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 689 643 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 690 644 else 691 645 return kv_set_enabled_levels(rdev); ··· 1408 1362 struct radeon_uvd_clock_voltage_dependency_table *table = 1409 1363 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1410 1364 int ret; 1365 + u32 mask; 1411 1366 1412 1367 if (!gate) { 1413 - if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) 1368 + if (table->count) 1414 1369 pi->uvd_boot_level = table->count - 1; 1415 1370 else 1416 1371 pi->uvd_boot_level = 0; 1372 + 1373 + if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1374 + mask = 1 << pi->uvd_boot_level; 1375 + } else { 1376 + mask = 0x1f; 1377 + } 1417 1378 1418 1379 ret = kv_copy_bytes_to_smc(rdev, 1419 1380 pi->dpm_table_start + ··· 1430 1377 if (ret) 1431 1378 return ret; 1432 1379 1433 - if (!pi->caps_uvd_dpm || 1434 - pi->caps_stable_p_state) 1435 - kv_send_msg_to_smc_with_parameter(rdev, 1436 - PPSMC_MSG_UVDDPM_SetEnabledMask, 1437 - (1 << pi->uvd_boot_level)); 1380 + kv_send_msg_to_smc_with_parameter(rdev, 1381 + PPSMC_MSG_UVDDPM_SetEnabledMask, 1382 + mask); 1438 1383 } 1439 1384 1440 1385 return kv_enable_uvd_dpm(rdev, !gate); ··· 1668 1617 if (pi->acp_power_gated == gate) 1669 1618 return; 1670 1619 1671 - if (rdev->family == CHIP_KABINI) 1620 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1672 1621 return; 1673 1622 1674 1623 pi->acp_power_gated = gate; ··· 1837 1786 } 1838 1787 } 1839 1788 1840 - if (rdev->family == CHIP_KABINI) { 1789 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1841 1790 if (pi->enable_dpm) { 1842 1791 kv_set_valid_clock_range(rdev, new_ps); 1843 1792 kv_update_dfs_bypass_settings(rdev, new_ps); ··· 1863 1812 return ret; 1864 1813 } 1865 1814 kv_update_sclk_t(rdev); 1815 + if (rdev->family == CHIP_MULLINS) 1816 + kv_enable_nb_dpm(rdev); 1866 1817 } 1867 1818 } else { 1868 1819 if (pi->enable_dpm) { ··· 1915 1862 { 1916 1863 struct kv_power_info *pi = kv_get_pi(rdev); 1917 1864 1918 - if (rdev->family == CHIP_KABINI) { 1865 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1919 1866 kv_force_lowest_valid(rdev); 1920 1867 kv_init_graphics_levels(rdev); 1921 1868 kv_program_bootup_state(rdev); ··· 1954 1901 static void kv_patch_voltage_values(struct radeon_device *rdev) 1955 1902 { 1956 1903 int i; 1957 - struct radeon_uvd_clock_voltage_dependency_table *table = 1904 + struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1958 1905 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1906 + struct radeon_vce_clock_voltage_dependency_table *vce_table = 1907 + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1908 + struct radeon_clock_voltage_dependency_table *samu_table = 1909 + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1910 + struct radeon_clock_voltage_dependency_table *acp_table = 1911 + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1959 1912 1960 - if (table->count) { 1961 - for (i = 0; i < table->count; i++) 1962 - table->entries[i].v = 1913 + if (uvd_table->count) { 1914 + for (i = 0; i < uvd_table->count; i++) 1915 + uvd_table->entries[i].v = 1963 1916 kv_convert_8bit_index_to_voltage(rdev, 1964 - table->entries[i].v); 1917 + uvd_table->entries[i].v); 1918 + } 1919 + 1920 + if (vce_table->count) { 1921 + for (i = 0; i < vce_table->count; i++) 1922 + vce_table->entries[i].v = 1923 + kv_convert_8bit_index_to_voltage(rdev, 1924 + vce_table->entries[i].v); 1925 + } 1926 + 1927 + if (samu_table->count) { 1928 + for (i = 0; i < samu_table->count; i++) 1929 + samu_table->entries[i].v = 1930 + kv_convert_8bit_index_to_voltage(rdev, 1931 + samu_table->entries[i].v); 1932 + } 1933 + 1934 + if (acp_table->count) { 1935 + for (i = 0; i < acp_table->count; i++) 1936 + acp_table->entries[i].v = 1937 + kv_convert_8bit_index_to_voltage(rdev, 1938 + acp_table->entries[i].v); 1965 1939 } 1966 1940 1967 1941 } ··· 2021 1941 break; 2022 1942 } 2023 1943 2024 - if (rdev->family == CHIP_KABINI) 1944 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2025 1945 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2026 1946 else 2027 1947 return kv_set_enabled_level(rdev, i); ··· 2041 1961 break; 2042 1962 } 2043 1963 2044 - if (rdev->family == CHIP_KABINI) 1964 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2045 1965 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2046 1966 else 2047 1967 return kv_set_enabled_level(rdev, i); ··· 2198 2118 else 2199 2119 pi->battery_state = false; 2200 2120 2201 - if (rdev->family == CHIP_KABINI) { 2121 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2202 2122 ps->dpm0_pg_nb_ps_lo = 0x1; 2203 2123 ps->dpm0_pg_nb_ps_hi = 0x0; 2204 2124 ps->dpmx_nb_ps_lo = 0x1; ··· 2259 2179 if (pi->lowest_valid > pi->highest_valid) 2260 2180 return -EINVAL; 2261 2181 2262 - if (rdev->family == CHIP_KABINI) { 2182 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2263 2183 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2264 2184 pi->graphics_level[i].GnbSlow = 1; 2265 2185 pi->graphics_level[i].ForceNbPs1 = 0; ··· 2333 2253 break; 2334 2254 2335 2255 kv_set_divider_value(rdev, i, table->entries[i].clk); 2336 - vid_2bit = sumo_convert_vid7_to_vid2(rdev, 2337 - &pi->sys_info.vid_mapping_table, 2338 - table->entries[i].v); 2256 + vid_2bit = kv_convert_vid7_to_vid2(rdev, 2257 + &pi->sys_info.vid_mapping_table, 2258 + table->entries[i].v); 2339 2259 kv_set_vid(rdev, i, vid_2bit); 2340 2260 kv_set_at(rdev, i, pi->at[i]); 2341 2261 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); ··· 2404 2324 struct kv_power_info *pi = kv_get_pi(rdev); 2405 2325 u32 nbdpmconfig1; 2406 2326 2407 - if (rdev->family == CHIP_KABINI) 2327 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2408 2328 return; 2409 2329 2410 2330 if (pi->sys_info.nb_dpm_enable) { ··· 2710 2630 pi->at[i] = TRINITY_AT_DFLT; 2711 2631 2712 2632 pi->sram_end = SMC_RAM_END; 2713 - 2714 - if (rdev->family == CHIP_KABINI) 2715 - pi->high_voltage_t = 4001; 2716 2633 2717 2634 pi->enable_nb_dpm = true; 2718 2635
+11 -3
drivers/gpu/drm/radeon/r600.c
··· 2839 2839 r = radeon_fence_emit(rdev, fence, ring->idx); 2840 2840 if (r) { 2841 2841 radeon_ring_unlock_undo(rdev, ring); 2842 + radeon_semaphore_free(rdev, &sem, NULL); 2842 2843 return r; 2843 2844 } 2844 2845 ··· 3506 3505 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3507 3506 u32 grbm_int_cntl = 0; 3508 3507 u32 hdmi0, hdmi1; 3509 - u32 d1grph = 0, d2grph = 0; 3510 3508 u32 dma_cntl; 3511 3509 u32 thermal_int = 0; 3512 3510 ··· 3614 3614 WREG32(CP_INT_CNTL, cp_int_cntl); 3615 3615 WREG32(DMA_CNTL, dma_cntl); 3616 3616 WREG32(DxMODE_INT_MASK, mode_int); 3617 - WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3618 - WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3617 + WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3618 + WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3619 3619 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3620 3620 if (ASIC_IS_DCE3(rdev)) { 3621 3621 WREG32(DC_HPD1_INT_CONTROL, hpd1); ··· 3917 3917 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3918 3918 break; 3919 3919 } 3920 + break; 3921 + case 9: /* D1 pflip */ 3922 + DRM_DEBUG("IH: D1 flip\n"); 3923 + radeon_crtc_handle_flip(rdev, 0); 3924 + break; 3925 + case 11: /* D2 pflip */ 3926 + DRM_DEBUG("IH: D2 flip\n"); 3927 + radeon_crtc_handle_flip(rdev, 1); 3920 3928 break; 3921 3929 case 19: /* HPD/DAC hotplug */ 3922 3930 switch (src_data) {
+1
drivers/gpu/drm/radeon/r600_dma.c
··· 489 489 r = radeon_fence_emit(rdev, fence, ring->idx); 490 490 if (r) { 491 491 radeon_ring_unlock_undo(rdev, ring); 492 + radeon_semaphore_free(rdev, &sem, NULL); 492 493 return r; 493 494 } 494 495
+6
drivers/gpu/drm/radeon/radeon.h
··· 730 730 u32 disp_int_cont4; 731 731 u32 disp_int_cont5; 732 732 u32 disp_int_cont6; 733 + u32 d1grph_int; 734 + u32 d2grph_int; 735 + u32 d3grph_int; 736 + u32 d4grph_int; 737 + u32 d5grph_int; 738 + u32 d6grph_int; 733 739 }; 734 740 735 741 union radeon_irq_stat_regs {
+1
drivers/gpu/drm/radeon/radeon_asic.c
··· 2516 2516 break; 2517 2517 case CHIP_KAVERI: 2518 2518 case CHIP_KABINI: 2519 + case CHIP_MULLINS: 2519 2520 rdev->asic = &kv_asic; 2520 2521 /* set num crtcs */ 2521 2522 if (rdev->family == CHIP_KAVERI) {
+1
drivers/gpu/drm/radeon/radeon_device.c
··· 99 99 "KAVERI", 100 100 "KABINI", 101 101 "HAWAII", 102 + "MULLINS", 102 103 "LAST", 103 104 }; 104 105
+17 -3
drivers/gpu/drm/radeon/radeon_display.c
··· 284 284 u32 update_pending; 285 285 int vpos, hpos; 286 286 287 + /* can happen during initialization */ 288 + if (radeon_crtc == NULL) 289 + return; 290 + 287 291 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 288 292 work = radeon_crtc->unpin_work; 289 293 if (work == NULL || ··· 830 826 831 827 /* make sure nominator is large enough */ 832 828 if (*nom < nom_min) { 833 - tmp = (nom_min + *nom - 1) / *nom; 829 + tmp = DIV_ROUND_UP(nom_min, *nom); 834 830 *nom *= tmp; 835 831 *den *= tmp; 836 832 } 837 833 838 834 /* make sure the denominator is large enough */ 839 835 if (*den < den_min) { 840 - tmp = (den_min + *den - 1) / *den; 836 + tmp = DIV_ROUND_UP(den_min, *den); 841 837 *nom *= tmp; 842 838 *den *= tmp; 843 839 } ··· 862 858 unsigned *fb_div, unsigned *ref_div) 863 859 { 864 860 /* limit reference * post divider to a maximum */ 865 - ref_div_max = min(210 / post_div, ref_div_max); 861 + ref_div_max = min(128 / post_div, ref_div_max); 866 862 867 863 /* get matching reference and feedback divider */ 868 864 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); ··· 996 992 /* reduce the numbers to a simpler ratio once more */ 997 993 /* this also makes sure that the reference divider is large enough */ 998 994 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 995 + 996 + /* avoid high jitter with small fractional dividers */ 997 + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 998 + fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); 999 + if (fb_div < fb_div_min) { 1000 + unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 1001 + fb_div *= tmp; 1002 + ref_div *= tmp; 1003 + } 1004 + } 999 1005 1000 1006 /* and finally save the result */ 1001 1007 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+1
drivers/gpu/drm/radeon/radeon_family.h
··· 97 97 CHIP_KAVERI, 98 98 CHIP_KABINI, 99 99 CHIP_HAWAII, 100 + CHIP_MULLINS, 100 101 CHIP_LAST, 101 102 }; 102 103
+1
drivers/gpu/drm/radeon/radeon_pm.c
··· 1300 1300 case CHIP_KABINI: 1301 1301 case CHIP_KAVERI: 1302 1302 case CHIP_HAWAII: 1303 + case CHIP_MULLINS: 1303 1304 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1304 1305 if (!rdev->rlc_fw) 1305 1306 rdev->pm.pm_method = PM_METHOD_PROFILE;
+1
drivers/gpu/drm/radeon/radeon_ucode.h
··· 52 52 #define BONAIRE_RLC_UCODE_SIZE 2048 53 53 #define KB_RLC_UCODE_SIZE 2560 54 54 #define KV_RLC_UCODE_SIZE 2560 55 + #define ML_RLC_UCODE_SIZE 2560 55 56 56 57 /* MC */ 57 58 #define BTC_MC_UCODE_SIZE 6024
+5
drivers/gpu/drm/radeon/radeon_uvd.c
··· 99 99 case CHIP_KABINI: 100 100 case CHIP_KAVERI: 101 101 case CHIP_HAWAII: 102 + case CHIP_MULLINS: 102 103 fw_name = FIRMWARE_BONAIRE; 103 104 break; 104 105 ··· 466 465 cmd = radeon_get_ib_value(p, p->idx) >> 1; 467 466 468 467 if (cmd < 0x4) { 468 + if (end <= start) { 469 + DRM_ERROR("invalid reloc offset %X!\n", offset); 470 + return -EINVAL; 471 + } 469 472 if ((end - start) < buf_sizes[cmd]) { 470 473 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 471 474 (unsigned)(end - start), buf_sizes[cmd]);
+1
drivers/gpu/drm/radeon/radeon_vce.c
··· 66 66 case CHIP_BONAIRE: 67 67 case CHIP_KAVERI: 68 68 case CHIP_KABINI: 69 + case CHIP_MULLINS: 69 70 fw_name = FIRMWARE_BONAIRE; 70 71 break; 71 72
+1
drivers/gpu/drm/radeon/rv770_dma.c
··· 86 86 r = radeon_fence_emit(rdev, fence, ring->idx); 87 87 if (r) { 88 88 radeon_ring_unlock_undo(rdev, ring); 89 + radeon_semaphore_free(rdev, &sem, NULL); 89 90 return r; 90 91 } 91 92
+21 -7
drivers/gpu/drm/radeon/si.c
··· 5780 5780 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5781 5781 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; 5782 5782 u32 grbm_int_cntl = 0; 5783 - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 5784 5783 u32 dma_cntl, dma_cntl1; 5785 5784 u32 thermal_int = 0; 5786 5785 ··· 5918 5919 } 5919 5920 5920 5921 if (rdev->num_crtc >= 2) { 5921 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 5922 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 5922 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 5923 + GRPH_PFLIP_INT_MASK); 5924 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 5925 + GRPH_PFLIP_INT_MASK); 5923 5926 } 5924 5927 if (rdev->num_crtc >= 4) { 5925 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 5926 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 5928 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 5929 + GRPH_PFLIP_INT_MASK); 5930 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 5931 + GRPH_PFLIP_INT_MASK); 5927 5932 } 5928 5933 if (rdev->num_crtc >= 6) { 5929 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 5930 - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 5934 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 5935 + GRPH_PFLIP_INT_MASK); 5936 + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 5937 + GRPH_PFLIP_INT_MASK); 5931 5938 } 5932 5939 5933 5940 if (!ASIC_IS_NODCE(rdev)) { ··· 6296 6291 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6297 6292 break; 6298 6293 } 6294 + break; 6295 + case 8: /* D1 page flip */ 6296 + case 10: /* D2 page flip */ 6297 + case 12: /* D3 page flip */ 6298 + case 14: /* D4 page flip */ 6299 + case 16: /* D5 page flip */ 6300 + case 18: /* D6 page flip */ 6301 + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); 6302 + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); 6299 6303 break; 6300 6304 case 42: /* HPD hotplug */ 6301 6305 switch (src_data) {
+1
drivers/gpu/drm/radeon/si_dma.c
··· 213 213 r = radeon_fence_emit(rdev, fence, ring->idx); 214 214 if (r) { 215 215 radeon_ring_unlock_undo(rdev, ring); 216 + radeon_semaphore_free(rdev, &sem, NULL); 216 217 return r; 217 218 } 218 219
+8 -2
drivers/gpu/drm/radeon/uvd_v1_0.c
··· 83 83 int r; 84 84 85 85 /* raise clocks while booting up the VCPU */ 86 - radeon_set_uvd_clocks(rdev, 53300, 40000); 86 + if (rdev->family < CHIP_RV740) 87 + radeon_set_uvd_clocks(rdev, 10000, 10000); 88 + else 89 + radeon_set_uvd_clocks(rdev, 53300, 40000); 87 90 88 91 r = uvd_v1_0_start(rdev); 89 92 if (r) ··· 410 407 struct radeon_fence *fence = NULL; 411 408 int r; 412 409 413 - r = radeon_set_uvd_clocks(rdev, 53300, 40000); 410 + if (rdev->family < CHIP_RV740) 411 + r = radeon_set_uvd_clocks(rdev, 10000, 10000); 412 + else 413 + r = radeon_set_uvd_clocks(rdev, 53300, 40000); 414 414 if (r) { 415 415 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); 416 416 return r;
+1 -1
drivers/gpu/drm/tegra/dc.c
··· 312 312 struct drm_device *drm = crtc->dev; 313 313 struct drm_plane *plane; 314 314 315 - list_for_each_entry(plane, &drm->mode_config.plane_list, head) { 315 + drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { 316 316 if (plane->crtc == crtc) { 317 317 tegra_plane_disable(plane); 318 318 plane->crtc = NULL;
+22
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1214 1214 SVGA3dCmdSurfaceDMA dma; 1215 1215 } *cmd; 1216 1216 int ret; 1217 + SVGA3dCmdSurfaceDMASuffix *suffix; 1218 + uint32_t bo_size; 1217 1219 1218 1220 cmd = container_of(header, struct vmw_dma_cmd, header); 1221 + suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + 1222 + header->size - sizeof(*suffix)); 1223 + 1224 + /* Make sure device and verifier stays in sync. */ 1225 + if (unlikely(suffix->suffixSize != sizeof(*suffix))) { 1226 + DRM_ERROR("Invalid DMA suffix size.\n"); 1227 + return -EINVAL; 1228 + } 1229 + 1219 1230 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1220 1231 &cmd->dma.guest.ptr, 1221 1232 &vmw_bo); 1222 1233 if (unlikely(ret != 0)) 1223 1234 return ret; 1235 + 1236 + /* Make sure DMA doesn't cross BO boundaries. */ 1237 + bo_size = vmw_bo->base.num_pages * PAGE_SIZE; 1238 + if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { 1239 + DRM_ERROR("Invalid DMA offset.\n"); 1240 + return -EINVAL; 1241 + } 1242 + 1243 + bo_size -= cmd->dma.guest.ptr.offset; 1244 + if (unlikely(suffix->maximumOffset > bo_size)) 1245 + suffix->maximumOffset = bo_size; 1224 1246 1225 1247 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1226 1248 user_surface_converter, &cmd->dma.host.sid,
+3 -2
drivers/hid/hid-core.c
··· 1253 1253 1254 1254 static int hid_report_len(struct hid_report *report) 1255 1255 { 1256 - return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; 1256 + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ 1257 + return ((report->size - 1) >> 3) + 1 + (report->id > 0); 1257 1258 } 1258 1259 1259 1260 /* ··· 1267 1266 * of implement() working on 8 byte chunks 1268 1267 */ 1269 1268 1270 - int len = hid_report_len(report); 1269 + int len = hid_report_len(report) + 7; 1271 1270 1272 1271 return kmalloc(len, flags); 1273 1272 }
+7
drivers/hid/hid-ids.h
··· 301 301 302 302 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 303 303 304 + #define USB_VENDOR_ID_ELITEGROUP 0x03fc 305 + #define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8 306 + 304 307 #define USB_VENDOR_ID_ELO 0x04E7 305 308 #define USB_DEVICE_ID_ELO_TS2515 0x0022 306 309 #define USB_DEVICE_ID_ELO_TS2700 0x0020 ··· 837 834 #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 838 835 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 839 836 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 837 + #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 838 + 839 + #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 840 + #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 840 841 841 842 #define USB_VENDOR_ID_THINGM 0x27b8 842 843 #define USB_DEVICE_ID_BLINK1 0x01ed
+5
drivers/hid/hid-multitouch.c
··· 1155 1155 MT_USB_DEVICE(USB_VENDOR_ID_DWAV, 1156 1156 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, 1157 1157 1158 + /* Elitegroup panel */ 1159 + { .driver_data = MT_CLS_SERIAL, 1160 + MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, 1161 + USB_DEVICE_ID_ELITEGROUP_05D8) }, 1162 + 1158 1163 /* Flatfrog Panels */ 1159 1164 { .driver_data = MT_CLS_FLATFROG, 1160 1165 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
+3
drivers/hid/hid-sensor-hub.c
··· 708 708 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, 709 709 USB_DEVICE_ID_STM_HID_SENSOR), 710 710 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 711 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS, 712 + USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA), 713 + .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 711 714 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 712 715 HID_ANY_ID) }, 713 716 { }
+1
drivers/hid/usbhid/hid-quirks.c
··· 119 119 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, 120 120 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS }, 121 121 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS }, 122 + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS }, 122 123 123 124 { 0, 0 } 124 125 };
+2 -2
drivers/hwmon/coretemp.c
··· 365 365 if (cpu_has_tjmax(c)) 366 366 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); 367 367 } else { 368 - val = (eax >> 16) & 0x7f; 368 + val = (eax >> 16) & 0xff; 369 369 /* 370 370 * If the TjMax is not plausible, an assumption 371 371 * will be used 372 372 */ 373 - if (val >= 85) { 373 + if (val) { 374 374 dev_dbg(dev, "TjMax is %d degrees C\n", val); 375 375 return val * 1000; 376 376 }
+2 -2
drivers/iio/adc/Kconfig
··· 106 106 Say yes here to build support for Atmel AT91 ADC. 107 107 108 108 config EXYNOS_ADC 109 - bool "Exynos ADC driver support" 109 + tristate "Exynos ADC driver support" 110 110 depends on OF 111 111 help 112 112 Core support for the ADC block found in the Samsung EXYNOS series ··· 114 114 this resource. 115 115 116 116 config LP8788_ADC 117 - bool "LP8788 ADC driver" 117 + tristate "LP8788 ADC driver" 118 118 depends on MFD_LP8788 119 119 help 120 120 Say yes here to build support for TI LP8788 ADC.
+3 -3
drivers/iio/adc/exynos_adc.c
··· 344 344 345 345 exynos_adc_hw_init(info); 346 346 347 - ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev); 347 + ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev); 348 348 if (ret < 0) { 349 349 dev_err(&pdev->dev, "failed adding child nodes\n"); 350 350 goto err_of_populate; ··· 353 353 return 0; 354 354 355 355 err_of_populate: 356 - device_for_each_child(&pdev->dev, NULL, 356 + device_for_each_child(&indio_dev->dev, NULL, 357 357 exynos_adc_remove_devices); 358 358 regulator_disable(info->vdd); 359 359 clk_disable_unprepare(info->clk); ··· 369 369 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 370 370 struct exynos_adc *info = iio_priv(indio_dev); 371 371 372 - device_for_each_child(&pdev->dev, NULL, 372 + device_for_each_child(&indio_dev->dev, NULL, 373 373 exynos_adc_remove_devices); 374 374 regulator_disable(info->vdd); 375 375 clk_disable_unprepare(info->clk);
+5 -2
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 660 660 { 661 661 struct inv_mpu6050_state *st; 662 662 struct iio_dev *indio_dev; 663 + struct inv_mpu6050_platform_data *pdata; 663 664 int result; 664 665 665 666 if (!i2c_check_functionality(client->adapter, ··· 673 672 674 673 st = iio_priv(indio_dev); 675 674 st->client = client; 676 - st->plat_data = *(struct inv_mpu6050_platform_data 677 - *)dev_get_platdata(&client->dev); 675 + pdata = (struct inv_mpu6050_platform_data 676 + *)dev_get_platdata(&client->dev); 677 + if (pdata) 678 + st->plat_data = *pdata; 678 679 /* power is turned on inside check chip type*/ 679 680 result = inv_check_and_setup_chip(st, id); 680 681 if (result)
+3 -3
drivers/infiniband/hw/cxgb4/Kconfig
··· 1 1 config INFINIBAND_CXGB4 2 - tristate "Chelsio T4 RDMA Driver" 2 + tristate "Chelsio T4/T5 RDMA Driver" 3 3 depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) 4 4 select GENERIC_ALLOCATOR 5 5 ---help--- 6 - This is an iWARP/RDMA driver for the Chelsio T4 1GbE and 7 - 10GbE adapters. 6 + This is an iWARP/RDMA driver for the Chelsio T4 and T5 7 + 1GbE, 10GbE adapters and T5 40GbE adapter. 8 8 9 9 For general information about Chelsio and our products, visit 10 10 our website at <http://www.chelsio.com>.
+28 -11
drivers/infiniband/hw/cxgb4/cm.c
··· 587 587 opt2 |= SACK_EN(1); 588 588 if (wscale && enable_tcp_window_scaling) 589 589 opt2 |= WND_SCALE_EN(1); 590 + if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 591 + opt2 |= T5_OPT_2_VALID; 592 + opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 593 + } 590 594 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 591 595 592 596 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { ··· 1000 996 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 1001 997 { 1002 998 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1003 - state_set(&ep->com, ABORTING); 999 + __state_set(&ep->com, ABORTING); 1004 1000 set_bit(ABORT_CONN, &ep->com.history); 1005 1001 return send_abort(ep, skb, gfp); 1006 1002 } ··· 1158 1154 return credits; 1159 1155 } 1160 1156 1161 - static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1157 + static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1162 1158 { 1163 1159 struct mpa_message *mpa; 1164 1160 struct mpa_v2_conn_params *mpa_v2_params; ··· 1168 1164 struct c4iw_qp_attributes attrs; 1169 1165 enum c4iw_qp_attr_mask mask; 1170 1166 int err; 1167 + int disconnect = 0; 1171 1168 1172 1169 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1173 1170 ··· 1178 1173 * will abort the connection. 1179 1174 */ 1180 1175 if (stop_ep_timer(ep)) 1181 - return; 1176 + return 0; 1182 1177 1183 1178 /* 1184 1179 * If we get more than the supported amount of private data ··· 1200 1195 * if we don't even have the mpa message, then bail. 1201 1196 */ 1202 1197 if (ep->mpa_pkt_len < sizeof(*mpa)) 1203 - return; 1198 + return 0; 1204 1199 mpa = (struct mpa_message *) ep->mpa_pkt; 1205 1200 1206 1201 /* Validate MPA header. */ ··· 1240 1235 * We'll continue process when more data arrives. 1241 1236 */ 1242 1237 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1243 - return; 1238 + return 0; 1244 1239 1245 1240 if (mpa->flags & MPA_REJECT) { 1246 1241 err = -ECONNREFUSED; ··· 1342 1337 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1343 1338 attrs.ecode = MPA_NOMATCH_RTR; 1344 1339 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1340 + attrs.send_term = 1; 1345 1341 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1346 - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1342 + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1347 1343 err = -ENOMEM; 1344 + disconnect = 1; 1348 1345 goto out; 1349 1346 } 1350 1347 ··· 1362 1355 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1363 1356 attrs.ecode = MPA_INSUFF_IRD; 1364 1357 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1358 + attrs.send_term = 1; 1365 1359 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1366 - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1360 + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1367 1361 err = -ENOMEM; 1362 + disconnect = 1; 1368 1363 goto out; 1369 1364 } 1370 1365 goto out; ··· 1375 1366 send_abort(ep, skb, GFP_KERNEL); 1376 1367 out: 1377 1368 connect_reply_upcall(ep, err); 1378 - return; 1369 + return disconnect; 1379 1370 } 1380 1371 1381 1372 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ··· 1533 1524 unsigned int tid = GET_TID(hdr); 1534 1525 struct tid_info *t = dev->rdev.lldi.tids; 1535 1526 __u8 status = hdr->status; 1527 + int disconnect = 0; 1536 1528 1537 1529 ep = lookup_tid(t, tid); 1538 1530 if (!ep) ··· 1549 1539 switch (ep->com.state) { 1550 1540 case MPA_REQ_SENT: 1551 1541 ep->rcv_seq += dlen; 1552 - process_mpa_reply(ep, skb); 1542 + disconnect = process_mpa_reply(ep, skb); 1553 1543 break; 1554 1544 case MPA_REQ_WAIT: 1555 1545 ep->rcv_seq += dlen; ··· 1565 1555 ep->com.state, ep->hwtid, status); 1566 1556 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1567 1557 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1568 - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1558 + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1559 + disconnect = 1; 1569 1560 break; 1570 1561 } 1571 1562 default: 1572 1563 break; 1573 1564 } 1574 1565 mutex_unlock(&ep->com.mutex); 1566 + if (disconnect) 1567 + c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1575 1568 return 0; 1576 1569 } 1577 1570 ··· 2021 2008 G_IP_HDR_LEN(hlen); 2022 2009 if (tcph->ece && tcph->cwr) 2023 2010 opt2 |= CCTRL_ECN(1); 2011 + } 2012 + if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2013 + opt2 |= T5_OPT_2_VALID; 2014 + opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2024 2015 } 2025 2016 2026 2017 rpl = cplhdr(skb); ··· 3499 3482 __func__, ep, ep->hwtid, ep->com.state); 3500 3483 abort = 0; 3501 3484 } 3502 - mutex_unlock(&ep->com.mutex); 3503 3485 if (abort) 3504 3486 abort_connection(ep, NULL, GFP_KERNEL); 3487 + mutex_unlock(&ep->com.mutex); 3505 3488 c4iw_put_ep(&ep->com); 3506 3489 } 3507 3490
+1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 435 435 u8 ecode; 436 436 u16 sq_db_inc; 437 437 u16 rq_db_inc; 438 + u8 send_term; 438 439 }; 439 440 440 441 struct c4iw_qp {
+9 -4
drivers/infiniband/hw/cxgb4/qp.c
··· 1388 1388 qhp->attr.layer_etype = attrs->layer_etype; 1389 1389 qhp->attr.ecode = attrs->ecode; 1390 1390 ep = qhp->ep; 1391 - disconnect = 1; 1392 - c4iw_get_ep(&qhp->ep->com); 1393 - if (!internal) 1391 + if (!internal) { 1392 + c4iw_get_ep(&qhp->ep->com); 1394 1393 terminate = 1; 1395 - else { 1394 + disconnect = 1; 1395 + } else { 1396 + terminate = qhp->attr.send_term; 1396 1397 ret = rdma_fini(rhp, qhp, ep); 1397 1398 if (ret) 1398 1399 goto err; ··· 1777 1776 /* 1778 1777 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for 1779 1778 * ringing the queue db when we're in DB_FULL mode. 1779 + * Only allow this on T4 devices. 1780 1780 */ 1781 1781 attrs.sq_db_inc = attr->sq_psn; 1782 1782 attrs.rq_db_inc = attr->rq_psn; 1783 1783 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; 1784 1784 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; 1785 + if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && 1786 + (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) 1787 + return -EINVAL; 1785 1788 1786 1789 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1787 1790 }
+14
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
··· 836 836 #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) 837 837 #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) 838 838 839 + enum { /* TCP congestion control algorithms */ 840 + CONG_ALG_RENO, 841 + CONG_ALG_TAHOE, 842 + CONG_ALG_NEWRENO, 843 + CONG_ALG_HIGHSPEED 844 + }; 845 + 846 + #define S_CONG_CNTRL 14 847 + #define M_CONG_CNTRL 0x3 848 + #define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) 849 + #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) 850 + 851 + #define T5_OPT_2_VALID (1 << 31) 852 + 839 853 #endif /* _T4FW_RI_API_H_ */
+28 -1
drivers/input/keyboard/atkbd.c
··· 243 243 static void *atkbd_platform_fixup_data; 244 244 static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); 245 245 246 + /* 247 + * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding 248 + * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed. 249 + */ 250 + static bool atkbd_skip_deactivate; 251 + 246 252 static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, 247 253 ssize_t (*handler)(struct atkbd *, char *)); 248 254 static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, ··· 774 768 * Make sure nothing is coming from the keyboard and disturbs our 775 769 * internal state. 776 770 */ 777 - atkbd_deactivate(atkbd); 771 + if (!atkbd_skip_deactivate) 772 + atkbd_deactivate(atkbd); 778 773 779 774 return 0; 780 775 } ··· 1645 1638 return 1; 1646 1639 } 1647 1640 1641 + static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id) 1642 + { 1643 + atkbd_skip_deactivate = true; 1644 + return 1; 1645 + } 1646 + 1648 1647 static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { 1649 1648 { 1650 1649 .matches = { ··· 1787 1774 }, 1788 1775 .callback = atkbd_setup_scancode_fixup, 1789 1776 .driver_data = atkbd_oqo_01plus_scancode_fixup, 1777 + }, 1778 + { 1779 + .matches = { 1780 + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), 1781 + DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"), 1782 + }, 1783 + .callback = atkbd_deactivate_fixup, 1784 + }, 1785 + { 1786 + .matches = { 1787 + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), 1788 + DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"), 1789 + }, 1790 + .callback = atkbd_deactivate_fixup, 1790 1791 }, 1791 1792 { } 1792 1793 };
+7
drivers/input/keyboard/tca8418_keypad.c
··· 392 392 { } 393 393 }; 394 394 MODULE_DEVICE_TABLE(of, tca8418_dt_ids); 395 + 396 + /* 397 + * The device tree based i2c loader looks for 398 + * "i2c:" + second_component_of(property("compatible")) 399 + * and therefore we need an alias to be found. 400 + */ 401 + MODULE_ALIAS("i2c:tca8418"); 395 402 #endif 396 403 397 404 static struct i2c_driver tca8418_keypad_driver = {
+3 -1
drivers/input/misc/bma150.c
··· 70 70 #define BMA150_CFG_5_REG 0x11 71 71 72 72 #define BMA150_CHIP_ID 2 73 + #define BMA180_CHIP_ID 3 73 74 #define BMA150_CHIP_ID_REG BMA150_DATA_0_REG 74 75 75 76 #define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG ··· 540 539 } 541 540 542 541 chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG); 543 - if (chip_id != BMA150_CHIP_ID) { 542 + if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) { 544 543 dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id); 545 544 return -EINVAL; 546 545 } ··· 644 643 645 644 static const struct i2c_device_id bma150_id[] = { 646 645 { "bma150", 0 }, 646 + { "bma180", 0 }, 647 647 { "smb380", 0 }, 648 648 { "bma023", 0 }, 649 649 { }
+25 -1
drivers/input/mouse/elantech.c
··· 11 11 */ 12 12 13 13 #include <linux/delay.h> 14 + #include <linux/dmi.h> 14 15 #include <linux/slab.h> 15 16 #include <linux/module.h> 16 17 #include <linux/input.h> ··· 832 831 break; 833 832 834 833 case 3: 835 - etd->reg_10 = 0x0b; 834 + if (etd->set_hw_resolution) 835 + etd->reg_10 = 0x0b; 836 + else 837 + etd->reg_10 = 0x03; 838 + 836 839 if (elantech_write_reg(psmouse, 0x10, etd->reg_10)) 837 840 rc = -1; 838 841 ··· 1336 1331 } 1337 1332 1338 1333 /* 1334 + * Some hw_version 3 models go into error state when we try to set bit 3 of r10 1335 + */ 1336 + static const struct dmi_system_id no_hw_res_dmi_table[] = { 1337 + #if defined(CONFIG_DMI) && defined(CONFIG_X86) 1338 + { 1339 + /* Gigabyte U2442 */ 1340 + .matches = { 1341 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), 1342 + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"), 1343 + }, 1344 + }, 1345 + #endif 1346 + { } 1347 + }; 1348 + 1349 + /* 1339 1350 * determine hardware version and set some properties according to it. 1340 1351 */ 1341 1352 static int elantech_set_properties(struct elantech_data *etd) ··· 1410 1389 * value of this hardware flag. 1411 1390 */ 1412 1391 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); 1392 + 1393 + /* Enable real hardware resolution on hw_version 3 ? */ 1394 + etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table); 1413 1395 1414 1396 return 0; 1415 1397 }
+1
drivers/input/mouse/elantech.h
··· 130 130 bool jumpy_cursor; 131 131 bool reports_pressure; 132 132 bool crc_enabled; 133 + bool set_hw_resolution; 133 134 unsigned char hw_version; 134 135 unsigned int fw_version; 135 136 unsigned int single_finger_reports;
+8
drivers/input/mouse/synaptics.c
··· 1566 1566 .driver_data = (int []){1232, 5710, 1156, 4696}, 1567 1567 }, 1568 1568 { 1569 + /* Lenovo ThinkPad Edge E431 */ 1570 + .matches = { 1571 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1572 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"), 1573 + }, 1574 + .driver_data = (int []){1024, 5022, 2508, 4832}, 1575 + }, 1576 + { 1569 1577 /* Lenovo ThinkPad T431s */ 1570 1578 .matches = { 1571 1579 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+20 -34
drivers/irqchip/irq-armada-370-xp.c
··· 41 41 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 42 42 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 43 43 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 44 + #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF 44 45 45 46 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 46 47 #define ARMADA_375_PPI_CAUSE (0x10) ··· 133 132 struct msi_desc *desc) 134 133 { 135 134 struct msi_msg msg; 136 - irq_hw_number_t hwirq; 137 - int virq; 135 + int virq, hwirq; 138 136 139 137 hwirq = armada_370_xp_alloc_msi(); 140 138 if (hwirq < 0) ··· 159 159 unsigned int irq) 160 160 { 161 161 struct irq_data *d = irq_get_irq_data(irq); 162 + unsigned long hwirq = d->hwirq; 163 + 162 164 irq_dispose_mapping(irq); 163 - armada_370_xp_free_msi(d->hwirq); 165 + armada_370_xp_free_msi(hwirq); 166 + } 167 + 168 + static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, 169 + int nvec, int type) 170 + { 171 + /* We support MSI, but not MSI-X */ 172 + if (type == PCI_CAP_ID_MSI) 173 + return 0; 174 + return -EINVAL; 164 175 } 165 176 166 177 static struct irq_chip armada_370_xp_msi_irq_chip = { ··· 212 201 213 202 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 214 203 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 204 + msi_chip->check_device = armada_370_xp_check_msi_device; 215 205 msi_chip->of_node = node; 216 206 217 207 armada_370_xp_msi_domain = ··· 256 244 static int armada_xp_set_affinity(struct irq_data *d, 257 245 const struct cpumask *mask_val, bool force) 258 246 { 259 - unsigned long reg; 260 - unsigned long new_mask = 0; 261 - unsigned long online_mask = 0; 262 - unsigned long count = 0; 263 247 irq_hw_number_t hwirq = irqd_to_hwirq(d); 248 + unsigned long reg, mask; 264 249 int cpu; 265 250 266 - for_each_cpu(cpu, mask_val) { 267 - new_mask |= 1 << cpu_logical_map(cpu); 268 - count++; 269 - } 270 - 271 - /* 272 - * Forbid mutlicore interrupt affinity 273 - * This is required since the MPIC HW doesn't limit 274 - * several CPUs from acknowledging the same interrupt. 275 - */ 276 - if (count > 1) 277 - return -EINVAL; 278 - 279 - for_each_cpu(cpu, cpu_online_mask) 280 - online_mask |= 1 << cpu_logical_map(cpu); 251 + /* Select a single core from the affinity mask which is online */ 252 + cpu = cpumask_any_and(mask_val, cpu_online_mask); 253 + mask = 1UL << cpu_logical_map(cpu); 281 254 282 255 raw_spin_lock(&irq_controller_lock); 283 - 284 256 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 285 - reg = (reg & (~online_mask)) | new_mask; 257 + reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; 286 258 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 287 - 288 259 raw_spin_unlock(&irq_controller_lock); 289 260 290 261 return 0; ··· 489 494 490 495 #ifdef CONFIG_SMP 491 496 armada_xp_mpic_smp_cpu_init(); 492 - 493 - /* 494 - * Set the default affinity from all CPUs to the boot cpu. 495 - * This is required since the MPIC doesn't limit several CPUs 496 - * from acknowledging the same interrupt. 497 - */ 498 - cpumask_clear(irq_default_affinity); 499 - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 500 - 501 497 #endif 502 498 503 499 armada_370_xp_msi_init(node, main_int_res.start);
+1 -1
drivers/irqchip/irq-crossbar.c
··· 107 107 int i, size, max, reserved = 0, entry; 108 108 const __be32 *irqsr; 109 109 110 - cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); 110 + cb = kzalloc(sizeof(*cb), GFP_KERNEL); 111 111 112 112 if (!cb) 113 113 return -ENOMEM;
+1 -1
drivers/isdn/hisax/icc.c
··· 425 425 if (cs->debug & L1_DEB_MONITOR) 426 426 debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]); 427 427 } 428 - AfterMOX1: 428 + AfterMOX1: ; 429 429 #endif 430 430 } 431 431 }
+1
drivers/md/dm-cache-target.c
··· 2488 2488 2489 2489 } else { 2490 2490 inc_hit_counter(cache, bio); 2491 + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2491 2492 2492 2493 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2493 2494 !is_dirty(cache, lookup_result.cblock))
+71 -6
drivers/md/dm-thin.c
··· 232 232 struct bio_list deferred_bio_list; 233 233 struct bio_list retry_on_resume_list; 234 234 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 235 + 236 + /* 237 + * Ensures the thin is not destroyed until the worker has finished 238 + * iterating the active_thins list. 239 + */ 240 + atomic_t refcount; 241 + struct completion can_destroy; 235 242 }; 236 243 237 244 /*----------------------------------------------------------------*/ ··· 1493 1486 blk_finish_plug(&plug); 1494 1487 } 1495 1488 1489 + static void thin_get(struct thin_c *tc); 1490 + static void thin_put(struct thin_c *tc); 1491 + 1492 + /* 1493 + * We can't hold rcu_read_lock() around code that can block. So we 1494 + * find a thin with the rcu lock held; bump a refcount; then drop 1495 + * the lock. 1496 + */ 1497 + static struct thin_c *get_first_thin(struct pool *pool) 1498 + { 1499 + struct thin_c *tc = NULL; 1500 + 1501 + rcu_read_lock(); 1502 + if (!list_empty(&pool->active_thins)) { 1503 + tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); 1504 + thin_get(tc); 1505 + } 1506 + rcu_read_unlock(); 1507 + 1508 + return tc; 1509 + } 1510 + 1511 + static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) 1512 + { 1513 + struct thin_c *old_tc = tc; 1514 + 1515 + rcu_read_lock(); 1516 + list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { 1517 + thin_get(tc); 1518 + thin_put(old_tc); 1519 + rcu_read_unlock(); 1520 + return tc; 1521 + } 1522 + thin_put(old_tc); 1523 + rcu_read_unlock(); 1524 + 1525 + return NULL; 1526 + } 1527 + 1496 1528 static void process_deferred_bios(struct pool *pool) 1497 1529 { 1498 1530 unsigned long flags; ··· 1539 1493 struct bio_list bios; 1540 1494 struct thin_c *tc; 1541 1495 1542 - rcu_read_lock(); 1543 - list_for_each_entry_rcu(tc, &pool->active_thins, list) 1496 + tc = get_first_thin(pool); 1497 + while (tc) { 1544 1498 process_thin_deferred_bios(tc); 1545 - rcu_read_unlock(); 1499 + tc = get_next_thin(pool, tc); 1500 + } 1546 1501 1547 1502 /* 1548 1503 * If there are any deferred flush bios, we must commit ··· 1625 1578 { 1626 1579 struct noflush_work w; 1627 1580 1628 - INIT_WORK(&w.worker, fn); 1581 + INIT_WORK_ONSTACK(&w.worker, fn); 1629 1582 w.tc = tc; 1630 1583 atomic_set(&w.complete, 0); 1631 1584 init_waitqueue_head(&w.wait); ··· 3108 3061 /*---------------------------------------------------------------- 3109 3062 * Thin target methods 3110 3063 *--------------------------------------------------------------*/ 3064 + static void thin_get(struct thin_c *tc) 3065 + { 3066 + atomic_inc(&tc->refcount); 3067 + } 3068 + 3069 + static void thin_put(struct thin_c *tc) 3070 + { 3071 + if (atomic_dec_and_test(&tc->refcount)) 3072 + complete(&tc->can_destroy); 3073 + } 3074 + 3111 3075 static void thin_dtr(struct dm_target *ti) 3112 3076 { 3113 3077 struct thin_c *tc = ti->private; 3114 3078 unsigned long flags; 3079 + 3080 + thin_put(tc); 3081 + wait_for_completion(&tc->can_destroy); 3115 3082 3116 3083 spin_lock_irqsave(&tc->pool->lock, flags); 3117 3084 list_del_rcu(&tc->list); ··· 3162 3101 struct thin_c *tc; 3163 3102 struct dm_dev *pool_dev, *origin_dev; 3164 3103 struct mapped_device *pool_md; 3104 + unsigned long flags; 3165 3105 3166 3106 mutex_lock(&dm_thin_pool_table.mutex); 3167 3107 ··· 3253 3191 3254 3192 mutex_unlock(&dm_thin_pool_table.mutex); 3255 3193 3256 - spin_lock(&tc->pool->lock); 3194 + atomic_set(&tc->refcount, 1); 3195 + init_completion(&tc->can_destroy); 3196 + 3197 + spin_lock_irqsave(&tc->pool->lock, flags); 3257 3198 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3258 - spin_unlock(&tc->pool->lock); 3199 + spin_unlock_irqrestore(&tc->pool->lock, flags); 3259 3200 /* 3260 3201 * This synchronize_rcu() call is needed here otherwise we risk a 3261 3202 * wake_worker() call finding no bios to process (because the newly
+9 -6
drivers/md/dm-verity.c
··· 330 330 return r; 331 331 } 332 332 } 333 - 334 333 todo = 1 << v->data_dev_block_bits; 335 - while (io->iter.bi_size) { 334 + do { 336 335 u8 *page; 336 + unsigned len; 337 337 struct bio_vec bv = bio_iter_iovec(bio, io->iter); 338 338 339 339 page = kmap_atomic(bv.bv_page); 340 - r = crypto_shash_update(desc, page + bv.bv_offset, 341 - bv.bv_len); 340 + len = bv.bv_len; 341 + if (likely(len >= todo)) 342 + len = todo; 343 + r = crypto_shash_update(desc, page + bv.bv_offset, len); 342 344 kunmap_atomic(page); 343 345 344 346 if (r < 0) { ··· 348 346 return r; 349 347 } 350 348 351 - bio_advance_iter(bio, &io->iter, bv.bv_len); 352 - } 349 + bio_advance_iter(bio, &io->iter, len); 350 + todo -= len; 351 + } while (todo); 353 352 354 353 if (!v->version) { 355 354 r = crypto_shash_update(desc, v->salt, v->salt_size);
+41 -91
drivers/mfd/rtsx_pcr.c
··· 338 338 int num_sg, bool read, int timeout) 339 339 { 340 340 struct completion trans_done; 341 - int err = 0, count; 341 + u8 dir; 342 + int err = 0, i, count; 342 343 long timeleft; 343 344 unsigned long flags; 345 + struct scatterlist *sg; 346 + enum dma_data_direction dma_dir; 347 + u32 val; 348 + dma_addr_t addr; 349 + unsigned int len; 344 350 345 - count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 351 + dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg); 352 + 353 + /* don't transfer data during abort processing */ 354 + if (pcr->remove_pci) 355 + return -EINVAL; 356 + 357 + if ((sglist == NULL) || (num_sg <= 0)) 358 + return -EINVAL; 359 + 360 + if (read) { 361 + dir = DEVICE_TO_HOST; 362 + dma_dir = DMA_FROM_DEVICE; 363 + } else { 364 + dir = HOST_TO_DEVICE; 365 + dma_dir = DMA_TO_DEVICE; 366 + } 367 + 368 + count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); 346 369 if (count < 1) { 347 370 dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); 348 371 return -EINVAL; 349 372 } 350 373 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); 351 374 375 + val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 376 + pcr->sgi = 0; 377 + for_each_sg(sglist, sg, count, i) { 378 + addr = sg_dma_address(sg); 379 + len = sg_dma_len(sg); 380 + rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); 381 + } 352 382 353 383 spin_lock_irqsave(&pcr->lock, flags); 354 384 355 385 pcr->done = &trans_done; 356 386 pcr->trans_result = TRANS_NOT_READY; 357 387 init_completion(&trans_done); 388 + rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 389 + rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 358 390 359 391 spin_unlock_irqrestore(&pcr->lock, flags); 360 - 361 - rtsx_pci_dma_transfer(pcr, sglist, count, read); 362 392 363 393 timeleft = wait_for_completion_interruptible_timeout( 364 394 &trans_done, msecs_to_jiffies(timeout)); ··· 413 383 pcr->done = NULL; 414 384 spin_unlock_irqrestore(&pcr->lock, flags); 415 385 416 - rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 386 + dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); 417 387 418 388 if ((err < 0) && (err != -ENODEV)) 419 389 rtsx_pci_stop_cmd(pcr); ··· 424 394 return err; 425 395 } 426 396 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 427 - 428 - int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 429 - int num_sg, bool read) 430 - { 431 - enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 432 - 433 - if (pcr->remove_pci) 434 - return -EINVAL; 435 - 436 - if ((sglist == NULL) || num_sg < 1) 437 - return -EINVAL; 438 - 439 - return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); 440 - } 441 - EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); 442 - 443 - int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 444 - int num_sg, bool read) 445 - { 446 - enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 447 - 448 - if (pcr->remove_pci) 449 - return -EINVAL; 450 - 451 - if (sglist == NULL || num_sg < 1) 452 - return -EINVAL; 453 - 454 - dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); 455 - return num_sg; 456 - } 457 - EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); 458 - 459 - int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 460 - int sg_count, bool read) 461 - { 462 - struct scatterlist *sg; 463 - dma_addr_t addr; 464 - unsigned int len; 465 - int i; 466 - u32 val; 467 - u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; 468 - unsigned long flags; 469 - 470 - if (pcr->remove_pci) 471 - return -EINVAL; 472 - 473 - if ((sglist == NULL) || (sg_count < 1)) 474 - return -EINVAL; 475 - 476 - val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 477 - pcr->sgi = 0; 478 - for_each_sg(sglist, sg, sg_count, i) { 479 - addr = sg_dma_address(sg); 480 - len = sg_dma_len(sg); 481 - rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1); 482 - } 483 - 484 - spin_lock_irqsave(&pcr->lock, flags); 485 - 486 - rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 487 - rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 488 - 489 - spin_unlock_irqrestore(&pcr->lock, flags); 490 - 491 - return 0; 492 - } 493 - EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); 494 397 495 398 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 496 399 { ··· 836 873 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 837 874 /* Clear interrupt flag */ 838 875 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 839 - dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg); 840 - 841 876 if ((int_reg & pcr->bier) == 0) { 842 877 spin_unlock(&pcr->lock); 843 878 return IRQ_NONE; ··· 866 905 } 867 906 868 907 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 869 - if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) 908 + if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { 870 909 pcr->trans_result = TRANS_RESULT_FAIL; 871 - else if (int_reg & TRANS_OK_INT) 910 + if (pcr->done) 911 + complete(pcr->done); 912 + } else if (int_reg & TRANS_OK_INT) { 872 913 pcr->trans_result = TRANS_RESULT_OK; 873 - 874 - if (pcr->done) 875 - complete(pcr->done); 876 - 877 - if (int_reg & SD_EXIST) { 878 - struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD]; 879 - if (slot && slot->done_transfer) 880 - slot->done_transfer(slot->p_dev); 881 - } 882 - 883 - if (int_reg & MS_EXIST) { 884 - struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD]; 885 - if (slot && slot->done_transfer) 886 - slot->done_transfer(slot->p_dev); 914 + if (pcr->done) 915 + complete(pcr->done); 887 916 } 888 917 } 889 - 890 918 891 919 if (pcr->card_inserted || pcr->card_removed) 892 920 schedule_delayed_work(&pcr->carddet_work,
+66 -348
drivers/mmc/host/rtsx_pci_sdmmc.c
··· 31 31 #include <linux/mfd/rtsx_pci.h> 32 32 #include <asm/unaligned.h> 33 33 34 - struct realtek_next { 35 - unsigned int sg_count; 36 - s32 cookie; 37 - }; 38 - 39 34 struct realtek_pci_sdmmc { 40 35 struct platform_device *pdev; 41 36 struct rtsx_pcr *pcr; 42 37 struct mmc_host *mmc; 43 38 struct mmc_request *mrq; 44 - struct mmc_command *cmd; 45 - struct mmc_data *data; 46 39 47 - spinlock_t lock; 48 - struct timer_list timer; 49 - struct tasklet_struct cmd_tasklet; 50 - struct tasklet_struct data_tasklet; 51 - struct tasklet_struct finish_tasklet; 40 + struct mutex host_mutex; 52 41 53 - u8 rsp_type; 54 - u8 rsp_len; 55 - int sg_count; 56 42 u8 ssc_depth; 57 43 unsigned int clock; 58 44 bool vpclk; ··· 48 62 int power_state; 49 63 #define SDMMC_POWER_ON 1 50 64 #define SDMMC_POWER_OFF 0 51 - 52 - struct realtek_next next_data; 53 65 }; 54 - 55 - static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, 56 - struct mmc_request *mrq); 57 66 58 67 static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) 59 68 { ··· 85 104 #else 86 105 #define sd_print_debug_regs(host) 87 106 #endif /* DEBUG */ 88 - 89 - static void sd_isr_done_transfer(struct platform_device *pdev) 90 - { 91 - struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); 92 - 93 - spin_lock(&host->lock); 94 - if (host->cmd) 95 - tasklet_schedule(&host->cmd_tasklet); 96 - if (host->data) 97 - tasklet_schedule(&host->data_tasklet); 98 - spin_unlock(&host->lock); 99 - } 100 - 101 - static void sd_request_timeout(unsigned long host_addr) 102 - { 103 - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 104 - unsigned long flags; 105 - 106 - spin_lock_irqsave(&host->lock, flags); 107 - 108 - if (!host->mrq) { 109 - dev_err(sdmmc_dev(host), "error: no request exist\n"); 110 - goto out; 111 - } 112 - 113 - if (host->cmd) 114 - host->cmd->error = -ETIMEDOUT; 115 - if (host->data) 116 - host->data->error = -ETIMEDOUT; 117 - 118 - dev_dbg(sdmmc_dev(host), "timeout for request\n"); 119 - 120 - out: 121 - tasklet_schedule(&host->finish_tasklet); 122 - spin_unlock_irqrestore(&host->lock, flags); 123 - } 124 - 125 - static void sd_finish_request(unsigned long host_addr) 126 - { 127 - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 128 - struct rtsx_pcr *pcr = host->pcr; 129 - struct mmc_request *mrq; 130 - struct mmc_command *cmd; 131 - struct mmc_data *data; 132 - unsigned long flags; 133 - bool any_error; 134 - 135 - spin_lock_irqsave(&host->lock, flags); 136 - 137 - del_timer(&host->timer); 138 - mrq = host->mrq; 139 - if (!mrq) { 140 - dev_err(sdmmc_dev(host), "error: no request need finish\n"); 141 - goto out; 142 - } 143 - 144 - cmd = mrq->cmd; 145 - data = mrq->data; 146 - 147 - any_error = (mrq->sbc && mrq->sbc->error) || 148 - (mrq->stop && mrq->stop->error) || 149 - (cmd && cmd->error) || (data && data->error); 150 - 151 - if (any_error) { 152 - rtsx_pci_stop_cmd(pcr); 153 - sd_clear_error(host); 154 - } 155 - 156 - if (data) { 157 - if (any_error) 158 - data->bytes_xfered = 0; 159 - else 160 - data->bytes_xfered = data->blocks * data->blksz; 161 - 162 - if (!data->host_cookie) 163 - rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, 164 - data->flags & MMC_DATA_READ); 165 - 166 - } 167 - 168 - host->mrq = NULL; 169 - host->cmd = NULL; 170 - host->data = NULL; 171 - 172 - out: 173 - spin_unlock_irqrestore(&host->lock, flags); 174 - mutex_unlock(&pcr->pcr_mutex); 175 - mmc_request_done(host->mmc, mrq); 176 - } 177 107 178 108 static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, 179 109 u8 *buf, int buf_len, int timeout) ··· 203 311 return 0; 204 312 } 205 313 206 - static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) 314 + static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, 315 + struct mmc_command *cmd) 207 316 { 208 317 struct rtsx_pcr *pcr = host->pcr; 209 318 u8 cmd_idx = (u8)cmd->opcode; ··· 212 319 int err = 0; 213 320 int timeout = 100; 214 321 int i; 322 + u8 *ptr; 323 + int stat_idx = 0; 215 324 u8 rsp_type; 216 325 int rsp_len = 5; 217 - unsigned long flags; 218 - 219 - if (host->cmd) 220 - dev_err(sdmmc_dev(host), "error: cmd already exist\n"); 221 - 222 - host->cmd = cmd; 326 + bool clock_toggled = false; 223 327 224 328 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", 225 329 __func__, cmd_idx, arg); ··· 251 361 err = -EINVAL; 252 362 goto out; 253 363 } 254 - host->rsp_type = rsp_type; 255 - host->rsp_len = rsp_len; 256 364 257 365 if (rsp_type == SD_RSP_TYPE_R1b) 258 366 timeout = 3000; ··· 260 372 0xFF, SD_CLK_TOGGLE_EN); 261 373 if (err < 0) 262 374 goto out; 375 + 376 + clock_toggled = true; 263 377 } 264 378 265 379 rtsx_pci_init_cmd(pcr); ··· 285 395 /* Read data from ping-pong buffer */ 286 396 for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) 287 397 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 398 + stat_idx = 16; 288 399 } else if (rsp_type != SD_RSP_TYPE_R0) { 289 400 /* Read data from SD_CMDx registers */ 290 401 for (i = SD_CMD0; i <= SD_CMD4; i++) 291 402 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 403 + stat_idx = 5; 292 404 } 293 405 294 406 rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); 295 407 296 - mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); 297 - 298 - spin_lock_irqsave(&pcr->lock, flags); 299 - pcr->trans_result = TRANS_NOT_READY; 300 - rtsx_pci_send_cmd_no_wait(pcr); 301 - spin_unlock_irqrestore(&pcr->lock, flags); 302 - 303 - return; 304 - 305 - out: 306 - cmd->error = err; 307 - tasklet_schedule(&host->finish_tasklet); 308 - } 309 - 310 - static void sd_get_rsp(unsigned long host_addr) 311 - { 312 - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 313 - struct rtsx_pcr *pcr = host->pcr; 314 - struct mmc_command *cmd; 315 - int i, err = 0, stat_idx; 316 - u8 *ptr, rsp_type; 317 - unsigned long flags; 318 - 319 - spin_lock_irqsave(&host->lock, flags); 320 - 321 - cmd = host->cmd; 322 - host->cmd = NULL; 323 - 324 - if (!cmd) { 325 - dev_err(sdmmc_dev(host), "error: cmd not exist\n"); 408 + err = rtsx_pci_send_cmd(pcr, timeout); 409 + if (err < 0) { 410 + sd_print_debug_regs(host); 411 + sd_clear_error(host); 412 + dev_dbg(sdmmc_dev(host), 413 + "rtsx_pci_send_cmd error (err = %d)\n", err); 326 414 goto out; 327 415 } 328 - 329 - spin_lock(&pcr->lock); 330 - if (pcr->trans_result == TRANS_NO_DEVICE) 331 - err = -ENODEV; 332 - else if (pcr->trans_result != TRANS_RESULT_OK) 333 - err = -EINVAL; 334 - spin_unlock(&pcr->lock); 335 - 336 - if (err < 0) 337 - goto out; 338 - 339 - rsp_type = host->rsp_type; 340 - stat_idx = host->rsp_len; 341 416 342 417 if (rsp_type == SD_RSP_TYPE_R0) { 343 418 err = 0; ··· 340 485 cmd->resp[0]); 341 486 } 342 487 343 - if (cmd == host->mrq->sbc) { 344 - sd_send_cmd(host, host->mrq->cmd); 345 - spin_unlock_irqrestore(&host->lock, flags); 346 - return; 347 - } 348 - 349 - if (cmd == host->mrq->stop) 350 - goto out; 351 - 352 - if (cmd->data) { 353 - sd_start_multi_rw(host, host->mrq); 354 - spin_unlock_irqrestore(&host->lock, flags); 355 - return; 356 - } 357 - 358 488 out: 359 489 cmd->error = err; 360 490 361 - tasklet_schedule(&host->finish_tasklet); 362 - spin_unlock_irqrestore(&host->lock, flags); 491 + if (err && clock_toggled) 492 + rtsx_pci_write_register(pcr, SD_BUS_STAT, 493 + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); 363 494 } 364 495 365 - static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host, 366 - struct mmc_data *data, struct realtek_next *next) 367 - { 368 - struct rtsx_pcr *pcr = host->pcr; 369 - int read = data->flags & MMC_DATA_READ; 370 - int sg_count = 0; 371 - 372 - if (!next && data->host_cookie && 373 - data->host_cookie != host->next_data.cookie) { 374 - dev_err(sdmmc_dev(host), 375 - "error: invalid cookie data[%d] host[%d]\n", 376 - data->host_cookie, host->next_data.cookie); 377 - data->host_cookie = 0; 378 - } 379 - 380 - if (next || (!next && data->host_cookie != host->next_data.cookie)) 381 - sg_count = rtsx_pci_dma_map_sg(pcr, 382 - data->sg, data->sg_len, read); 383 - else 384 - sg_count = host->next_data.sg_count; 385 - 386 - if (next) { 387 - next->sg_count = sg_count; 388 - if (++next->cookie < 0) 389 - next->cookie = 1; 390 - data->host_cookie = next->cookie; 391 - } 392 - 393 - return sg_count; 394 - } 395 - 396 - static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 397 - bool is_first_req) 398 - { 399 - struct realtek_pci_sdmmc *host = mmc_priv(mmc); 400 - struct mmc_data *data = mrq->data; 401 - 402 - if (data->host_cookie) { 403 - dev_err(sdmmc_dev(host), 404 - "error: descard already cookie data[%d]\n", 405 - data->host_cookie); 406 - data->host_cookie = 0; 407 - } 408 - 409 - dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n", 410 - sd_pre_dma_transfer(host, data, &host->next_data)); 411 - } 412 - 413 - static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 414 - int err) 415 - { 416 - struct realtek_pci_sdmmc *host = mmc_priv(mmc); 417 - struct rtsx_pcr *pcr = host->pcr; 418 - struct mmc_data *data = mrq->data; 419 - int read = data->flags & MMC_DATA_READ; 420 - 421 - rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read); 422 - data->host_cookie = 0; 423 - } 424 - 425 - static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, 426 - struct mmc_request *mrq) 496 + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) 427 497 { 428 498 struct rtsx_pcr *pcr = host->pcr; 429 499 struct mmc_host *mmc = host->mmc; 430 500 struct mmc_card *card = mmc->card; 431 501 struct mmc_data *data = mrq->data; 432 502 int uhs = mmc_card_uhs(card); 433 - int read = data->flags & MMC_DATA_READ; 503 + int read = (data->flags & MMC_DATA_READ) ? 1 : 0; 434 504 u8 cfg2, trans_mode; 435 505 int err; 436 506 size_t data_len = data->blksz * data->blocks; 437 - 438 - if (host->data) 439 - dev_err(sdmmc_dev(host), "error: data already exist\n"); 440 - 441 - host->data = data; 442 507 443 508 if (read) { 444 509 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | ··· 410 635 rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, 411 636 SD_TRANSFER_END, SD_TRANSFER_END); 412 637 413 - mod_timer(&host->timer, jiffies + 10 * HZ); 414 638 rtsx_pci_send_cmd_no_wait(pcr); 415 639 416 - err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read); 640 + err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); 417 641 if (err < 0) { 418 - data->error = err; 419 - tasklet_schedule(&host->finish_tasklet); 642 + sd_clear_error(host); 643 + return err; 420 644 } 645 + 421 646 return 0; 422 - } 423 - 424 - static void sd_finish_multi_rw(unsigned long host_addr) 425 - { 426 - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; 427 - struct rtsx_pcr *pcr = host->pcr; 428 - struct mmc_data *data; 429 - int err = 0; 430 - unsigned long flags; 431 - 432 - spin_lock_irqsave(&host->lock, flags); 433 - 434 - if (!host->data) { 435 - dev_err(sdmmc_dev(host), "error: no data exist\n"); 436 - goto out; 437 - } 438 - 439 - data = host->data; 440 - host->data = NULL; 441 - 442 - if (pcr->trans_result == TRANS_NO_DEVICE) 443 - err = -ENODEV; 444 - else if (pcr->trans_result != TRANS_RESULT_OK) 445 - err = -EINVAL; 446 - 447 - if (err < 0) { 448 - data->error = err; 449 - goto out; 450 - } 451 - 452 - if (!host->mrq->sbc && data->stop) { 453 - sd_send_cmd(host, data->stop); 454 - spin_unlock_irqrestore(&host->lock, flags); 455 - return; 456 - } 457 - 458 - out: 459 - tasklet_schedule(&host->finish_tasklet); 460 - spin_unlock_irqrestore(&host->lock, flags); 461 647 } 462 648 463 649 static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) ··· 637 901 return 0; 638 902 } 639 903 640 - static inline bool sd_use_muti_rw(struct mmc_command *cmd) 641 - { 642 - return mmc_op_multi(cmd->opcode) || 643 - (cmd->opcode == MMC_READ_SINGLE_BLOCK) || 644 - (cmd->opcode == MMC_WRITE_BLOCK); 645 - } 646 - 647 904 static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 648 905 { 649 906 struct realtek_pci_sdmmc *host = mmc_priv(mmc); ··· 645 916 struct mmc_data *data = mrq->data; 646 917 unsigned int data_size = 0; 647 918 int err; 648 - unsigned long flags; 649 - 650 - mutex_lock(&pcr->pcr_mutex); 651 - spin_lock_irqsave(&host->lock, flags); 652 - 653 - if (host->mrq) 654 - dev_err(sdmmc_dev(host), "error: request already exist\n"); 655 - host->mrq = mrq; 656 919 657 920 if (host->eject) { 658 921 cmd->error = -ENOMEDIUM; ··· 657 936 goto finish; 658 937 } 659 938 939 + mutex_lock(&pcr->pcr_mutex); 940 + 660 941 rtsx_pci_start_run(pcr); 661 942 662 943 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, ··· 667 944 rtsx_pci_write_register(pcr, CARD_SHARE_MODE, 668 945 CARD_SHARE_MASK, CARD_SHARE_48_SD); 669 946 947 + mutex_lock(&host->host_mutex); 948 + host->mrq = mrq; 949 + mutex_unlock(&host->host_mutex); 950 + 670 951 if (mrq->data) 671 952 data_size = data->blocks * data->blksz; 672 953 673 - if (sd_use_muti_rw(cmd)) 674 - host->sg_count = sd_pre_dma_transfer(host, data, NULL); 954 + if (!data_size || mmc_op_multi(cmd->opcode) || 955 + (cmd->opcode == MMC_READ_SINGLE_BLOCK) || 956 + (cmd->opcode == MMC_WRITE_BLOCK)) { 957 + sd_send_cmd_get_rsp(host, cmd); 675 958 676 - if (!data_size || sd_use_muti_rw(cmd)) { 677 - if (mrq->sbc) 678 - sd_send_cmd(host, mrq->sbc); 679 - else 680 - sd_send_cmd(host, cmd); 681 - spin_unlock_irqrestore(&host->lock, flags); 959 + if (!cmd->error && data_size) { 960 + sd_rw_multi(host, mrq); 961 + 962 + if (mmc_op_multi(cmd->opcode) && mrq->stop) 963 + sd_send_cmd_get_rsp(host, mrq->stop); 964 + } 682 965 } else { 683 - spin_unlock_irqrestore(&host->lock, flags); 684 966 sd_normal_rw(host, mrq); 685 - tasklet_schedule(&host->finish_tasklet); 686 967 } 687 - return; 968 + 969 + if (mrq->data) { 970 + if (cmd->error || data->error) 971 + data->bytes_xfered = 0; 972 + else 973 + data->bytes_xfered = data->blocks * data->blksz; 974 + } 975 + 976 + mutex_unlock(&pcr->pcr_mutex); 688 977 689 978 finish: 690 - tasklet_schedule(&host->finish_tasklet); 691 - spin_unlock_irqrestore(&host->lock, flags); 979 + if (cmd->error) 980 + dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); 981 + 982 + mutex_lock(&host->host_mutex); 983 + host->mrq = NULL; 984 + mutex_unlock(&host->host_mutex); 985 + 986 + mmc_request_done(mmc, mrq); 692 987 } 693 988 694 989 static int sd_set_bus_width(struct realtek_pci_sdmmc *host, ··· 1141 1400 } 1142 1401 1143 1402 static const struct mmc_host_ops realtek_pci_sdmmc_ops = { 1144 - .pre_req = sdmmc_pre_req, 1145 - .post_req = sdmmc_post_req, 1146 1403 .request = sdmmc_request, 1147 1404 .set_ios = sdmmc_set_ios, 1148 1405 .get_ro = sdmmc_get_ro, ··· 1204 1465 struct realtek_pci_sdmmc *host; 1205 1466 struct rtsx_pcr *pcr; 1206 1467 struct pcr_handle *handle = pdev->dev.platform_data; 1207 - unsigned long host_addr; 1208 1468 1209 1469 if (!handle) 1210 1470 return -ENXIO; ··· 1227 1489 pcr->slots[RTSX_SD_CARD].p_dev = pdev; 1228 1490 pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; 1229 1491 1230 - host_addr = (unsigned long)host; 1231 - host->next_data.cookie = 1; 1232 - setup_timer(&host->timer, sd_request_timeout, host_addr); 1233 - tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr); 1234 - tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr); 1235 - tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr); 1236 - spin_lock_init(&host->lock); 1492 + mutex_init(&host->host_mutex); 1237 1493 1238 - pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer; 1239 1494 realtek_init_host(host); 1240 1495 1241 1496 mmc_add_host(mmc); ··· 1241 1510 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); 1242 1511 struct rtsx_pcr *pcr; 1243 1512 struct mmc_host *mmc; 1244 - struct mmc_request *mrq; 1245 - unsigned long flags; 1246 1513 1247 1514 if (!host) 1248 1515 return 0; ··· 1248 1519 pcr = host->pcr; 1249 1520 pcr->slots[RTSX_SD_CARD].p_dev = NULL; 1250 1521 pcr->slots[RTSX_SD_CARD].card_event = NULL; 1251 - pcr->slots[RTSX_SD_CARD].done_transfer = NULL; 1252 1522 mmc = host->mmc; 1253 - mrq = host->mrq; 1254 1523 1255 - spin_lock_irqsave(&host->lock, flags); 1524 + mutex_lock(&host->host_mutex); 1256 1525 if (host->mrq) { 1257 1526 dev_dbg(&(pdev->dev), 1258 1527 "%s: Controller removed during transfer\n", 1259 1528 mmc_hostname(mmc)); 1260 1529 1261 - if (mrq->sbc) 1262 - mrq->sbc->error = -ENOMEDIUM; 1263 - if (mrq->cmd) 1264 - mrq->cmd->error = -ENOMEDIUM; 1265 - if (mrq->stop) 1266 - mrq->stop->error = -ENOMEDIUM; 1267 - if (mrq->data) 1268 - mrq->data->error = -ENOMEDIUM; 1530 + rtsx_pci_complete_unfinished_transfer(pcr); 1269 1531 1270 - tasklet_schedule(&host->finish_tasklet); 1532 + host->mrq->cmd->error = -ENOMEDIUM; 1533 + if (host->mrq->stop) 1534 + host->mrq->stop->error = -ENOMEDIUM; 1535 + mmc_request_done(mmc, host->mrq); 1271 1536 } 1272 - spin_unlock_irqrestore(&host->lock, flags); 1273 - 1274 - del_timer_sync(&host->timer); 1275 - tasklet_kill(&host->cmd_tasklet); 1276 - tasklet_kill(&host->data_tasklet); 1277 - tasklet_kill(&host->finish_tasklet); 1537 + mutex_unlock(&host->host_mutex); 1278 1538 1279 1539 mmc_remove_host(mmc); 1280 1540 host->eject = true;
+6
drivers/mtd/nand/davinci_nand.c
··· 523 523 #if defined(CONFIG_OF) 524 524 static const struct of_device_id davinci_nand_of_match[] = { 525 525 {.compatible = "ti,davinci-nand", }, 526 + {.compatible = "ti,keystone-nand", }, 526 527 {}, 527 528 }; 528 529 MODULE_DEVICE_TABLE(of, davinci_nand_of_match); ··· 582 581 of_property_read_bool(pdev->dev.of_node, 583 582 "ti,davinci-nand-use-bbt")) 584 583 pdata->bbt_options = NAND_BBT_USE_FLASH; 584 + 585 + if (of_device_is_compatible(pdev->dev.of_node, 586 + "ti,keystone-nand")) { 587 + pdata->options |= NAND_NO_SUBPAGE_WRITE; 588 + } 585 589 } 586 590 587 591 return dev_get_platdata(&pdev->dev);
+1 -1
drivers/mtd/ubi/block.c
··· 431 431 * Create one workqueue per volume (per registered block device). 432 432 * Rembember workqueues are cheap, they're not threads. 433 433 */ 434 - dev->wq = alloc_workqueue(gd->disk_name, 0, 0); 434 + dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); 435 435 if (!dev->wq) 436 436 goto out_free_queue; 437 437 INIT_WORK(&dev->work, ubiblock_do_work);
+6
drivers/mtd/ubi/wl.c
··· 671 671 672 672 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 673 673 self_check_in_wl_tree(ubi, e, &ubi->free); 674 + ubi->free_count--; 675 + ubi_assert(ubi->free_count >= 0); 674 676 rb_erase(&e->u.rb, &ubi->free); 675 677 676 678 return e; ··· 685 683 spin_lock(&ubi->wl_lock); 686 684 peb = __wl_get_peb(ubi); 687 685 spin_unlock(&ubi->wl_lock); 686 + 687 + if (peb < 0) 688 + return peb; 688 689 689 690 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, 690 691 ubi->peb_size - ubi->vid_hdr_aloffset); ··· 1073 1068 1074 1069 /* Give the unused PEB back */ 1075 1070 wl_tree_add(e2, &ubi->free); 1071 + ubi->free_count++; 1076 1072 goto out_cancel; 1077 1073 } 1078 1074 self_check_in_wl_tree(ubi, e1, &ubi->used);
+1 -1
drivers/net/bonding/bond_sysfs.c
··· 534 534 { 535 535 struct bonding *bond = to_bond(d); 536 536 537 - return sprintf(buf, "%d\n", bond->params.min_links); 537 + return sprintf(buf, "%u\n", bond->params.min_links); 538 538 } 539 539 540 540 static ssize_t bonding_store_min_links(struct device *d,
+7
drivers/net/can/c_can/Kconfig
··· 14 14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) 15 15 boards like am335x, dm814x, dm813x and dm811x. 16 16 17 + config CAN_C_CAN_STRICT_FRAME_ORDERING 18 + bool "Force a strict RX CAN frame order (may cause frame loss)" 19 + ---help--- 20 + The RX split buffer prevents packet reordering but can cause packet 21 + loss. Only enable this option when you accept to lose CAN frames 22 + in favour of getting the received CAN frames in the correct order. 23 + 17 24 config CAN_C_CAN_PCI 18 25 tristate "Generic PCI Bus based C_CAN/D_CAN driver" 19 26 depends on PCI
+309 -357
drivers/net/can/c_can/c_can.c
··· 60 60 #define CONTROL_IE BIT(1) 61 61 #define CONTROL_INIT BIT(0) 62 62 63 + #define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE) 64 + 63 65 /* test register */ 64 66 #define TEST_RX BIT(7) 65 67 #define TEST_TX1 BIT(6) ··· 110 108 #define IF_COMM_CONTROL BIT(4) 111 109 #define IF_COMM_CLR_INT_PND BIT(3) 112 110 #define IF_COMM_TXRQST BIT(2) 111 + #define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST 113 112 #define IF_COMM_DATAA BIT(1) 114 113 #define IF_COMM_DATAB BIT(0) 115 - #define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \ 116 - IF_COMM_CONTROL | IF_COMM_TXRQST | \ 117 - IF_COMM_DATAA | IF_COMM_DATAB) 114 + 115 + /* TX buffer setup */ 116 + #define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \ 117 + IF_COMM_TXRQST | \ 118 + IF_COMM_DATAA | IF_COMM_DATAB) 118 119 119 120 /* For the low buffers we clear the interrupt bit, but keep newdat */ 120 121 #define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \ ··· 125 120 IF_COMM_DATAA | IF_COMM_DATAB) 126 121 127 122 /* For the high buffers we clear the interrupt bit and newdat */ 128 - #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST) 123 + #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT) 124 + 125 + 126 + /* Receive setup of message objects */ 127 + #define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL) 128 + 129 + /* Invalidation of message objects */ 130 + #define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL) 129 131 130 132 /* IFx arbitration */ 131 - #define IF_ARB_MSGVAL BIT(15) 132 - #define IF_ARB_MSGXTD BIT(14) 133 - #define IF_ARB_TRANSMIT BIT(13) 133 + #define IF_ARB_MSGVAL BIT(31) 134 + #define IF_ARB_MSGXTD BIT(30) 135 + #define IF_ARB_TRANSMIT BIT(29) 134 136 135 137 /* IFx message control */ 136 138 #define IF_MCONT_NEWDAT BIT(15) ··· 151 139 #define IF_MCONT_EOB BIT(7) 152 140 #define IF_MCONT_DLC_MASK 0xf 153 141 142 + #define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK) 143 + #define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB) 144 + 145 + #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) 146 + 154 147 /* 155 148 * Use IF1 for RX and IF2 for TX 156 149 */ 157 150 #define IF_RX 0 158 151 #define IF_TX 1 159 - 160 - /* status interrupt */ 161 - #define STATUS_INTERRUPT 0x8000 162 - 163 - /* global interrupt masks */ 164 - #define ENABLE_ALL_INTERRUPTS 1 165 - #define DISABLE_ALL_INTERRUPTS 0 166 152 167 153 /* minimum timeout for checking BUSY status */ 168 154 #define MIN_TIMEOUT_VALUE 6 ··· 181 171 LEC_BIT0_ERROR, 182 172 LEC_CRC_ERROR, 183 173 LEC_UNUSED, 174 + LEC_MASK = LEC_UNUSED, 184 175 }; 185 176 186 177 /* ··· 237 226 priv->raminit(priv, enable); 238 227 } 239 228 240 - static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 229 + static void c_can_irq_control(struct c_can_priv *priv, bool enable) 241 230 { 242 - return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + 243 - C_CAN_MSG_OBJ_TX_FIRST; 244 - } 245 - 246 - static inline int get_tx_echo_msg_obj(int txecho) 247 - { 248 - return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST; 249 - } 250 - 251 - static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index) 252 - { 253 - u32 val = priv->read_reg(priv, index); 254 - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; 255 - return val; 256 - } 257 - 258 - static void c_can_enable_all_interrupts(struct c_can_priv *priv, 259 - int enable) 260 - { 261 - unsigned int cntrl_save = priv->read_reg(priv, 262 - C_CAN_CTRL_REG); 231 + u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK; 263 232 264 233 if (enable) 265 - cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); 266 - else 267 - cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE); 234 + ctrl |= CONTROL_IRQMSK; 268 235 269 - priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save); 236 + priv->write_reg(priv, C_CAN_CTRL_REG, ctrl); 270 237 } 271 238 272 - static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) 239 + static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj) 273 240 { 274 - int count = MIN_TIMEOUT_VALUE; 241 + struct c_can_priv *priv = netdev_priv(dev); 242 + int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface); 275 243 276 - while (count && priv->read_reg(priv, 277 - C_CAN_IFACE(COMREQ_REG, iface)) & 278 - IF_COMR_BUSY) { 279 - count--; 244 + priv->write_reg(priv, reg + 1, cmd); 245 + priv->write_reg(priv, reg, obj); 246 + 247 + for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) { 248 + if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY)) 249 + return; 280 250 udelay(1); 281 251 } 252 + netdev_err(dev, "Updating object timed out\n"); 282 253 283 - if (!count) 284 - return 1; 285 - 286 - return 0; 287 254 } 288 255 289 - static inline void c_can_object_get(struct net_device *dev, 290 - int iface, int objno, int mask) 256 + static inline void c_can_object_get(struct net_device *dev, int iface, 257 + u32 obj, u32 cmd) 258 + { 259 + c_can_obj_update(dev, iface, cmd, obj); 260 + } 261 + 262 + static inline void c_can_object_put(struct net_device *dev, int iface, 263 + u32 obj, u32 cmd) 264 + { 265 + c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); 266 + } 267 + 268 + /* 269 + * Note: According to documentation clearing TXIE while MSGVAL is set 270 + * is not allowed, but works nicely on C/DCAN. And that lowers the I/O 271 + * load significantly. 272 + */ 273 + static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj) 291 274 { 292 275 struct c_can_priv *priv = netdev_priv(dev); 293 276 294 - /* 295 - * As per specs, after writting the message object number in the 296 - * IF command request register the transfer b/w interface 297 - * register and message RAM must be complete in 6 CAN-CLK 298 - * period. 299 - */ 300 - priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), 301 - IFX_WRITE_LOW_16BIT(mask)); 302 - priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface), 303 - IFX_WRITE_LOW_16BIT(objno)); 304 - 305 - if (c_can_msg_obj_is_busy(priv, iface)) 306 - netdev_err(dev, "timed out in object get\n"); 277 + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0); 278 + c_can_object_put(dev, iface, obj, IF_COMM_INVAL); 307 279 } 308 280 309 - static inline void c_can_object_put(struct net_device *dev, 310 - int iface, int objno, int mask) 281 + static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj) 311 282 { 312 283 struct c_can_priv *priv = netdev_priv(dev); 313 284 314 - /* 315 - * As per specs, after writting the message object number in the 316 - * IF command request register the transfer b/w interface 317 - * register and message RAM must be complete in 6 CAN-CLK 318 - * period. 319 - */ 320 - priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), 321 - (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask))); 322 - priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface), 323 - IFX_WRITE_LOW_16BIT(objno)); 324 - 325 - if (c_can_msg_obj_is_busy(priv, iface)) 326 - netdev_err(dev, "timed out in object put\n"); 285 + priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); 286 + priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); 287 + c_can_inval_tx_object(dev, iface, obj); 327 288 } 328 289 329 - static void c_can_write_msg_object(struct net_device *dev, 330 - int iface, struct can_frame *frame, int objno) 290 + static void c_can_setup_tx_object(struct net_device *dev, int iface, 291 + struct can_frame *frame, int idx) 331 292 { 293 + struct c_can_priv *priv = netdev_priv(dev); 294 + u16 ctrl = IF_MCONT_TX | frame->can_dlc; 295 + bool rtr = frame->can_id & CAN_RTR_FLAG; 296 + u32 arb = IF_ARB_MSGVAL; 332 297 int i; 333 - u16 flags = 0; 334 - unsigned int id; 335 - struct c_can_priv *priv = netdev_priv(dev); 336 - 337 - if (!(frame->can_id & CAN_RTR_FLAG)) 338 - flags |= IF_ARB_TRANSMIT; 339 298 340 299 if (frame->can_id & CAN_EFF_FLAG) { 341 - id = frame->can_id & CAN_EFF_MASK; 342 - flags |= IF_ARB_MSGXTD; 343 - } else 344 - id = ((frame->can_id & CAN_SFF_MASK) << 18); 300 + arb |= frame->can_id & CAN_EFF_MASK; 301 + arb |= IF_ARB_MSGXTD; 302 + } else { 303 + arb |= (frame->can_id & CAN_SFF_MASK) << 18; 304 + } 345 305 346 - flags |= IF_ARB_MSGVAL; 306 + if (!rtr) 307 + arb |= IF_ARB_TRANSMIT; 347 308 348 - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 349 - IFX_WRITE_LOW_16BIT(id)); 350 - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags | 351 - IFX_WRITE_HIGH_16BIT(id)); 309 + /* 310 + * If we change the DIR bit, we need to invalidate the buffer 311 + * first, i.e. clear the MSGVAL flag in the arbiter. 312 + */ 313 + if (rtr != (bool)test_bit(idx, &priv->tx_dir)) { 314 + u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST; 315 + 316 + c_can_inval_msg_object(dev, iface, obj); 317 + change_bit(idx, &priv->tx_dir); 318 + } 319 + 320 + priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb); 321 + priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16); 322 + 323 + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 352 324 353 325 for (i = 0; i < frame->can_dlc; i += 2) { 354 326 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, 355 327 frame->data[i] | (frame->data[i + 1] << 8)); 356 328 } 357 - 358 - /* enable interrupt for this message object */ 359 - priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 360 - IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB | 361 - frame->can_dlc); 362 - c_can_object_put(dev, iface, objno, IF_COMM_ALL); 363 329 } 364 330 365 331 static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, 366 - int iface, 367 - int ctrl_mask) 332 + int iface) 368 333 { 369 334 int i; 370 - struct c_can_priv *priv = netdev_priv(dev); 371 335 372 - for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 373 - priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 374 - ctrl_mask & ~IF_MCONT_NEWDAT); 375 - c_can_object_put(dev, iface, i, IF_COMM_CONTROL); 376 - } 336 + for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) 337 + c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT); 377 338 } 378 339 379 340 static int c_can_handle_lost_msg_obj(struct net_device *dev, ··· 360 377 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 361 378 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL); 362 379 380 + stats->rx_errors++; 381 + stats->rx_over_errors++; 382 + 363 383 /* create an error msg */ 364 384 skb = alloc_can_err_skb(dev, &frame); 365 385 if (unlikely(!skb)) ··· 370 384 371 385 frame->can_id |= CAN_ERR_CRTL; 372 386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 373 - stats->rx_errors++; 374 - stats->rx_over_errors++; 375 387 376 388 netif_receive_skb(skb); 377 389 return 1; 378 390 } 379 391 380 - static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) 392 + static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) 381 393 { 382 - u16 flags, data; 383 - int i; 384 - unsigned int val; 385 - struct c_can_priv *priv = netdev_priv(dev); 386 394 struct net_device_stats *stats = &dev->stats; 387 - struct sk_buff *skb; 395 + struct c_can_priv *priv = netdev_priv(dev); 388 396 struct can_frame *frame; 397 + struct sk_buff *skb; 398 + u32 arb, data; 389 399 390 400 skb = alloc_can_skb(dev, &frame); 391 401 if (!skb) { ··· 391 409 392 410 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 393 411 394 - flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)); 395 - val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) | 396 - (flags << 16); 412 + arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)); 413 + arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16; 397 414 398 - if (flags & IF_ARB_MSGXTD) 399 - frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG; 415 + if (arb & IF_ARB_MSGXTD) 416 + frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG; 400 417 else 401 - frame->can_id = (val >> 18) & CAN_SFF_MASK; 418 + frame->can_id = (arb >> 18) & CAN_SFF_MASK; 402 419 403 - if (flags & IF_ARB_TRANSMIT) 420 + if (arb & IF_ARB_TRANSMIT) { 404 421 frame->can_id |= CAN_RTR_FLAG; 405 - else { 406 - for (i = 0; i < frame->can_dlc; i += 2) { 407 - data = priv->read_reg(priv, 408 - C_CAN_IFACE(DATA1_REG, iface) + i / 2); 422 + } else { 423 + int i, dreg = C_CAN_IFACE(DATA1_REG, iface); 424 + 425 + for (i = 0; i < frame->can_dlc; i += 2, dreg ++) { 426 + data = priv->read_reg(priv, dreg); 409 427 frame->data[i] = data; 410 428 frame->data[i + 1] = data >> 8; 411 429 } 412 430 } 413 431 414 - netif_receive_skb(skb); 415 - 416 432 stats->rx_packets++; 417 433 stats->rx_bytes += frame->can_dlc; 434 + 435 + netif_receive_skb(skb); 418 436 return 0; 419 437 } 420 438 421 439 static void c_can_setup_receive_object(struct net_device *dev, int iface, 422 - int objno, unsigned int mask, 423 - unsigned int id, unsigned int mcont) 440 + u32 obj, u32 mask, u32 id, u32 mcont) 424 441 { 425 442 struct c_can_priv *priv = netdev_priv(dev); 426 443 427 - priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 428 - IFX_WRITE_LOW_16BIT(mask)); 444 + mask |= BIT(29); 445 + priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask); 446 + priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16); 429 447 430 - /* According to C_CAN documentation, the reserved bit 431 - * in IFx_MASK2 register is fixed 1 432 - */ 433 - priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), 434 - IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); 435 - 436 - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 437 - IFX_WRITE_LOW_16BIT(id)); 438 - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 439 - (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id))); 448 + id |= IF_ARB_MSGVAL; 449 + priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id); 450 + priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16); 440 451 441 452 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); 442 - c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 443 - 444 - netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, 445 - c_can_read_reg32(priv, C_CAN_MSGVAL1_REG)); 446 - } 447 - 448 - static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno) 449 - { 450 - struct c_can_priv *priv = netdev_priv(dev); 451 - 452 - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); 453 - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); 454 - priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0); 455 - 456 - c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL); 457 - 458 - netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, 459 - c_can_read_reg32(priv, C_CAN_MSGVAL1_REG)); 460 - } 461 - 462 - static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno) 463 - { 464 - int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG); 465 - 466 - /* 467 - * as transmission request register's bit n-1 corresponds to 468 - * message object n, we need to handle the same properly. 469 - */ 470 - if (val & (1 << (objno - 1))) 471 - return 1; 472 - 473 - return 0; 453 + c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); 474 454 } 475 455 476 456 static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, 477 - struct net_device *dev) 457 + struct net_device *dev) 478 458 { 479 - u32 msg_obj_no; 480 - struct c_can_priv *priv = netdev_priv(dev); 481 459 struct can_frame *frame = (struct can_frame *)skb->data; 460 + struct c_can_priv *priv = netdev_priv(dev); 461 + u32 idx, obj; 482 462 483 463 if (can_dropped_invalid_skb(dev, skb)) 484 464 return NETDEV_TX_OK; 485 - 486 - spin_lock_bh(&priv->xmit_lock); 487 - msg_obj_no = get_tx_next_msg_obj(priv); 488 - 489 - /* prepare message object for transmission */ 490 - c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no); 491 - priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc; 492 - can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 493 - 494 465 /* 495 - * we have to stop the queue in case of a wrap around or 496 - * if the next TX message object is still in use 466 + * This is not a FIFO. C/D_CAN sends out the buffers 467 + * prioritized. The lowest buffer number wins. 497 468 */ 498 - priv->tx_next++; 499 - if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || 500 - (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) 469 + idx = fls(atomic_read(&priv->tx_active)); 470 + obj = idx + C_CAN_MSG_OBJ_TX_FIRST; 471 + 472 + /* If this is the last buffer, stop the xmit queue */ 473 + if (idx == C_CAN_MSG_OBJ_TX_NUM - 1) 501 474 netif_stop_queue(dev); 502 - spin_unlock_bh(&priv->xmit_lock); 475 + /* 476 + * Store the message in the interface so we can call 477 + * can_put_echo_skb(). We must do this before we enable 478 + * transmit as we might race against do_tx(). 479 + */ 480 + c_can_setup_tx_object(dev, IF_TX, frame, idx); 481 + priv->dlc[idx] = frame->can_dlc; 482 + can_put_echo_skb(skb, dev, idx); 483 + 484 + /* Update the active bits */ 485 + atomic_add((1 << idx), &priv->tx_active); 486 + /* Start transmission */ 487 + c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); 503 488 504 489 return NETDEV_TX_OK; 505 490 } ··· 543 594 544 595 /* setup receive message objects */ 545 596 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) 546 - c_can_setup_receive_object(dev, IF_RX, i, 0, 0, 547 - (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB); 597 + c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV); 548 598 549 599 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, 550 - IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); 600 + IF_MCONT_RCV_EOB); 551 601 } 552 602 553 603 /* ··· 560 612 struct c_can_priv *priv = netdev_priv(dev); 561 613 562 614 /* enable automatic retransmission */ 563 - priv->write_reg(priv, C_CAN_CTRL_REG, 564 - CONTROL_ENABLE_AR); 615 + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); 565 616 566 617 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && 567 618 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { 568 619 /* loopback + silent mode : useful for hot self-test */ 569 - priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 570 - CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 571 - priv->write_reg(priv, C_CAN_TEST_REG, 572 - TEST_LBACK | TEST_SILENT); 620 + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); 621 + priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT); 573 622 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 574 623 /* loopback mode : useful for self-test function */ 575 - priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 576 - CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 624 + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); 577 625 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK); 578 626 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { 579 627 /* silent mode : bus-monitoring mode */ 580 - priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 581 - CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 628 + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); 582 629 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT); 583 - } else 584 - /* normal mode*/ 585 - priv->write_reg(priv, C_CAN_CTRL_REG, 586 - CONTROL_EIE | CONTROL_SIE | CONTROL_IE); 630 + } 587 631 588 632 /* configure message objects */ 589 633 c_can_configure_msg_objects(dev); 590 634 591 635 /* set a `lec` value so that we can check for updates later */ 592 636 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 637 + 638 + /* Clear all internal status */ 639 + atomic_set(&priv->tx_active, 0); 640 + priv->rxmasked = 0; 641 + priv->tx_dir = 0; 593 642 594 643 /* set bittiming params */ 595 644 return c_can_set_bittiming(dev); ··· 602 657 if (err) 603 658 return err; 604 659 660 + /* Setup the command for new messages */ 661 + priv->comm_rcv_high = priv->type != BOSCH_D_CAN ? 662 + IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH; 663 + 605 664 priv->can.state = CAN_STATE_ERROR_ACTIVE; 606 - 607 - /* reset tx helper pointers */ 608 - priv->tx_next = priv->tx_echo = 0; 609 - 610 - /* enable status change, error and module interrupts */ 611 - c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); 612 665 613 666 return 0; 614 667 } ··· 615 672 { 616 673 struct c_can_priv *priv = netdev_priv(dev); 617 674 618 - /* disable all interrupts */ 619 - c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); 620 - 621 - /* set the state as STOPPED */ 675 + c_can_irq_control(priv, false); 622 676 priv->can.state = CAN_STATE_STOPPED; 623 677 } 624 678 625 679 static int c_can_set_mode(struct net_device *dev, enum can_mode mode) 626 680 { 681 + struct c_can_priv *priv = netdev_priv(dev); 627 682 int err; 628 683 629 684 switch (mode) { ··· 630 689 if (err) 631 690 return err; 632 691 netif_wake_queue(dev); 692 + c_can_irq_control(priv, true); 633 693 break; 634 694 default: 635 695 return -EOPNOTSUPP; ··· 666 724 return err; 667 725 } 668 726 669 - /* 670 - * priv->tx_echo holds the number of the oldest can_frame put for 671 - * transmission into the hardware, but not yet ACKed by the CAN tx 672 - * complete IRQ. 673 - * 674 - * We iterate from priv->tx_echo to priv->tx_next and check if the 675 - * packet has been transmitted, echo it back to the CAN framework. 676 - * If we discover a not yet transmitted packet, stop looking for more. 677 - */ 678 727 static void c_can_do_tx(struct net_device *dev) 679 728 { 680 729 struct c_can_priv *priv = netdev_priv(dev); 681 730 struct net_device_stats *stats = &dev->stats; 682 - u32 val, obj, pkts = 0, bytes = 0; 731 + u32 idx, obj, pkts = 0, bytes = 0, pend, clr; 683 732 684 - spin_lock_bh(&priv->xmit_lock); 733 + clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG); 685 734 686 - for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 687 - obj = get_tx_echo_msg_obj(priv->tx_echo); 688 - val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG); 689 - 690 - if (val & (1 << (obj - 1))) 691 - break; 692 - 693 - can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST); 694 - bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST]; 735 + while ((idx = ffs(pend))) { 736 + idx--; 737 + pend &= ~(1 << idx); 738 + obj = idx + C_CAN_MSG_OBJ_TX_FIRST; 739 + c_can_inval_tx_object(dev, IF_RX, obj); 740 + can_get_echo_skb(dev, idx); 741 + bytes += priv->dlc[idx]; 695 742 pkts++; 696 - c_can_inval_msg_object(dev, IF_TX, obj); 697 743 } 698 744 699 - /* restart queue if wrap-up or if queue stalled on last pkt */ 700 - if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || 701 - ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0)) 702 - netif_wake_queue(dev); 745 + /* Clear the bits in the tx_active mask */ 746 + atomic_sub(clr, &priv->tx_active); 703 747 704 - spin_unlock_bh(&priv->xmit_lock); 748 + if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1))) 749 + netif_wake_queue(dev); 705 750 706 751 if (pkts) { 707 752 stats->tx_bytes += bytes; ··· 729 800 return pend & ~((1 << lasts) - 1); 730 801 } 731 802 803 + static inline void c_can_rx_object_get(struct net_device *dev, 804 + struct c_can_priv *priv, u32 obj) 805 + { 806 + #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING 807 + if (obj < C_CAN_MSG_RX_LOW_LAST) 808 + c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW); 809 + else 810 + #endif 811 + c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); 812 + } 813 + 814 + static inline void c_can_rx_finalize(struct net_device *dev, 815 + struct c_can_priv *priv, u32 obj) 816 + { 817 + #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING 818 + if (obj < C_CAN_MSG_RX_LOW_LAST) 819 + priv->rxmasked |= BIT(obj - 1); 820 + else if (obj == C_CAN_MSG_RX_LOW_LAST) { 821 + priv->rxmasked = 0; 822 + /* activate all lower message objects */ 823 + c_can_activate_all_lower_rx_msg_obj(dev, IF_RX); 824 + } 825 + #endif 826 + if (priv->type != BOSCH_D_CAN) 827 + c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); 828 + } 829 + 732 830 static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, 733 831 u32 pend, int quota) 734 832 { 735 - u32 pkts = 0, ctrl, obj, mcmd; 833 + u32 pkts = 0, ctrl, obj; 736 834 737 835 while ((obj = ffs(pend)) && quota > 0) { 738 836 pend &= ~BIT(obj - 1); 739 837 740 - mcmd = obj < C_CAN_MSG_RX_LOW_LAST ? 741 - IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH; 742 - 743 - c_can_object_get(dev, IF_RX, obj, mcmd); 838 + c_can_rx_object_get(dev, priv, obj); 744 839 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); 745 840 746 841 if (ctrl & IF_MCONT_MSGLST) { ··· 786 833 /* read the data from the message object */ 787 834 c_can_read_msg_object(dev, IF_RX, ctrl); 788 835 789 - if (obj == C_CAN_MSG_RX_LOW_LAST) 790 - /* activate all lower message objects */ 791 - c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl); 836 + c_can_rx_finalize(dev, priv, obj); 792 837 793 838 pkts++; 794 839 quota--; 795 840 } 796 841 797 842 return pkts; 843 + } 844 + 845 + static inline u32 c_can_get_pending(struct c_can_priv *priv) 846 + { 847 + u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); 848 + 849 + #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING 850 + pend &= ~priv->rxmasked; 851 + #endif 852 + return pend; 798 853 } 799 854 800 855 /* ··· 813 852 * INTPND are set for this message object indicating that a new message 814 853 * has arrived. To work-around this issue, we keep two groups of message 815 854 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 855 + * 856 + * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y 816 857 * 817 858 * To ensure in-order frame reception we use the following 818 859 * approach while re-activating a message object to receive further ··· 828 865 * - if the current message object number is greater than 829 866 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of 830 867 * only this message object. 868 + * 869 + * This can cause packet loss! 870 + * 871 + * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n 872 + * 873 + * We clear the newdat bit right away. 874 + * 875 + * This can result in packet reordering when the readout is slow. 831 876 */ 832 877 static int c_can_do_rx_poll(struct net_device *dev, int quota) 833 878 { ··· 851 880 852 881 while (quota > 0) { 853 882 if (!pend) { 854 - pend = priv->read_reg(priv, C_CAN_INTPND1_REG); 883 + pend = c_can_get_pending(priv); 855 884 if (!pend) 856 885 break; 857 886 /* ··· 876 905 return pkts; 877 906 } 878 907 879 - static inline int c_can_has_and_handle_berr(struct c_can_priv *priv) 880 - { 881 - return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 882 - (priv->current_status & LEC_UNUSED); 883 - } 884 - 885 908 static int c_can_handle_state_change(struct net_device *dev, 886 909 enum c_can_bus_error_types error_type) 887 910 { ··· 886 921 struct can_frame *cf; 887 922 struct sk_buff *skb; 888 923 struct can_berr_counter bec; 924 + 925 + switch (error_type) { 926 + case C_CAN_ERROR_WARNING: 927 + /* error warning state */ 928 + priv->can.can_stats.error_warning++; 929 + priv->can.state = CAN_STATE_ERROR_WARNING; 930 + break; 931 + case C_CAN_ERROR_PASSIVE: 932 + /* error passive state */ 933 + priv->can.can_stats.error_passive++; 934 + priv->can.state = CAN_STATE_ERROR_PASSIVE; 935 + break; 936 + case C_CAN_BUS_OFF: 937 + /* bus-off state */ 938 + priv->can.state = CAN_STATE_BUS_OFF; 939 + can_bus_off(dev); 940 + break; 941 + default: 942 + break; 943 + } 889 944 890 945 /* propagate the error condition to the CAN stack */ 891 946 skb = alloc_can_err_skb(dev, &cf); ··· 920 935 switch (error_type) { 921 936 case C_CAN_ERROR_WARNING: 922 937 /* error warning state */ 923 - priv->can.can_stats.error_warning++; 924 - priv->can.state = CAN_STATE_ERROR_WARNING; 925 938 cf->can_id |= CAN_ERR_CRTL; 926 939 cf->data[1] = (bec.txerr > bec.rxerr) ? 927 940 CAN_ERR_CRTL_TX_WARNING : ··· 930 947 break; 931 948 case C_CAN_ERROR_PASSIVE: 932 949 /* error passive state */ 933 - priv->can.can_stats.error_passive++; 934 - priv->can.state = CAN_STATE_ERROR_PASSIVE; 935 950 cf->can_id |= CAN_ERR_CRTL; 936 951 if (rx_err_passive) 937 952 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; ··· 941 960 break; 942 961 case C_CAN_BUS_OFF: 943 962 /* bus-off state */ 944 - priv->can.state = CAN_STATE_BUS_OFF; 945 963 cf->can_id |= CAN_ERR_BUSOFF; 946 - /* 947 - * disable all interrupts in bus-off mode to ensure that 948 - * the CPU is not hogged down 949 - */ 950 - c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); 951 964 can_bus_off(dev); 952 965 break; 953 966 default: 954 967 break; 955 968 } 956 969 957 - netif_receive_skb(skb); 958 970 stats->rx_packets++; 959 971 stats->rx_bytes += cf->can_dlc; 972 + netif_receive_skb(skb); 960 973 961 974 return 1; 962 975 } ··· 971 996 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) 972 997 return 0; 973 998 999 + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 1000 + return 0; 1001 + 1002 + /* common for all type of bus errors */ 1003 + priv->can.can_stats.bus_error++; 1004 + stats->rx_errors++; 1005 + 974 1006 /* propagate the error condition to the CAN stack */ 975 1007 skb = alloc_can_err_skb(dev, &cf); 976 1008 if (unlikely(!skb)) ··· 987 1005 * check for 'last error code' which tells us the 988 1006 * type of the last error to occur on the CAN bus 989 1007 */ 990 - 991 - /* common for all type of bus errors */ 992 - priv->can.can_stats.bus_error++; 993 - stats->rx_errors++; 994 1008 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 995 1009 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 996 1010 ··· 1021 1043 break; 1022 1044 } 1023 1045 1024 - /* set a `lec` value so that we can check for updates later */ 1025 - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 1026 - 1027 - netif_receive_skb(skb); 1028 1046 stats->rx_packets++; 1029 1047 stats->rx_bytes += cf->can_dlc; 1030 - 1048 + netif_receive_skb(skb); 1031 1049 return 1; 1032 1050 } 1033 1051 1034 1052 static int c_can_poll(struct napi_struct *napi, int quota) 1035 1053 { 1036 - u16 irqstatus; 1037 - int lec_type = 0; 1038 - int work_done = 0; 1039 1054 struct net_device *dev = napi->dev; 1040 1055 struct c_can_priv *priv = netdev_priv(dev); 1056 + u16 curr, last = priv->last_status; 1057 + int work_done = 0; 1041 1058 1042 - irqstatus = priv->irqstatus; 1043 - if (!irqstatus) 1044 - goto end; 1059 + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); 1060 + /* Ack status on C_CAN. D_CAN is self clearing */ 1061 + if (priv->type != BOSCH_D_CAN) 1062 + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 1045 1063 1046 - /* status events have the highest priority */ 1047 - if (irqstatus == STATUS_INTERRUPT) { 1048 - priv->current_status = priv->read_reg(priv, 1049 - C_CAN_STS_REG); 1050 - 1051 - /* handle Tx/Rx events */ 1052 - if (priv->current_status & STATUS_TXOK) 1053 - priv->write_reg(priv, C_CAN_STS_REG, 1054 - priv->current_status & ~STATUS_TXOK); 1055 - 1056 - if (priv->current_status & STATUS_RXOK) 1057 - priv->write_reg(priv, C_CAN_STS_REG, 1058 - priv->current_status & ~STATUS_RXOK); 1059 - 1060 - /* handle state changes */ 1061 - if ((priv->current_status & STATUS_EWARN) && 1062 - (!(priv->last_status & STATUS_EWARN))) { 1063 - netdev_dbg(dev, "entered error warning state\n"); 1064 - work_done += c_can_handle_state_change(dev, 1065 - C_CAN_ERROR_WARNING); 1066 - } 1067 - if ((priv->current_status & STATUS_EPASS) && 1068 - (!(priv->last_status & STATUS_EPASS))) { 1069 - netdev_dbg(dev, "entered error passive state\n"); 1070 - work_done += c_can_handle_state_change(dev, 1071 - C_CAN_ERROR_PASSIVE); 1072 - } 1073 - if ((priv->current_status & STATUS_BOFF) && 1074 - (!(priv->last_status & STATUS_BOFF))) { 1075 - netdev_dbg(dev, "entered bus off state\n"); 1076 - work_done += c_can_handle_state_change(dev, 1077 - C_CAN_BUS_OFF); 1078 - } 1079 - 1080 - /* handle bus recovery events */ 1081 - if ((!(priv->current_status & STATUS_BOFF)) && 1082 - (priv->last_status & STATUS_BOFF)) { 1083 - netdev_dbg(dev, "left bus off state\n"); 1084 - priv->can.state = CAN_STATE_ERROR_ACTIVE; 1085 - } 1086 - if ((!(priv->current_status & STATUS_EPASS)) && 1087 - (priv->last_status & STATUS_EPASS)) { 1088 - netdev_dbg(dev, "left error passive state\n"); 1089 - priv->can.state = CAN_STATE_ERROR_ACTIVE; 1090 - } 1091 - 1092 - priv->last_status = priv->current_status; 1093 - 1094 - /* handle lec errors on the bus */ 1095 - lec_type = c_can_has_and_handle_berr(priv); 1096 - if (lec_type) 1097 - work_done += c_can_handle_bus_err(dev, lec_type); 1098 - } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) && 1099 - (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) { 1100 - /* handle events corresponding to receive message objects */ 1101 - work_done += c_can_do_rx_poll(dev, (quota - work_done)); 1102 - } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) && 1103 - (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) { 1104 - /* handle events corresponding to transmit message objects */ 1105 - c_can_do_tx(dev); 1064 + /* handle state changes */ 1065 + if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { 1066 + netdev_dbg(dev, "entered error warning state\n"); 1067 + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); 1106 1068 } 1069 + 1070 + if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) { 1071 + netdev_dbg(dev, "entered error passive state\n"); 1072 + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); 1073 + } 1074 + 1075 + if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) { 1076 + netdev_dbg(dev, "entered bus off state\n"); 1077 + work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF); 1078 + goto end; 1079 + } 1080 + 1081 + /* handle bus recovery events */ 1082 + if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { 1083 + netdev_dbg(dev, "left bus off state\n"); 1084 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 1085 + } 1086 + if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { 1087 + netdev_dbg(dev, "left error passive state\n"); 1088 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 1089 + } 1090 + 1091 + /* handle lec errors on the bus */ 1092 + work_done += c_can_handle_bus_err(dev, curr & LEC_MASK); 1093 + 1094 + /* Handle Tx/Rx events. We do this unconditionally */ 1095 + work_done += c_can_do_rx_poll(dev, (quota - work_done)); 1096 + c_can_do_tx(dev); 1107 1097 1108 1098 end: 1109 1099 if (work_done < quota) { 1110 1100 napi_complete(napi); 1111 - /* enable all IRQs */ 1112 - c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); 1101 + /* enable all IRQs if we are not in bus off state */ 1102 + if (priv->can.state != CAN_STATE_BUS_OFF) 1103 + c_can_irq_control(priv, true); 1113 1104 } 1114 1105 1115 1106 return work_done; ··· 1089 1142 struct net_device *dev = (struct net_device *)dev_id; 1090 1143 struct c_can_priv *priv = netdev_priv(dev); 1091 1144 1092 - priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG); 1093 - if (!priv->irqstatus) 1145 + if (!priv->read_reg(priv, C_CAN_INT_REG)) 1094 1146 return IRQ_NONE; 1095 1147 1096 1148 /* disable all interrupts and schedule the NAPI */ 1097 - c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); 1149 + c_can_irq_control(priv, false); 1098 1150 napi_schedule(&priv->napi); 1099 1151 1100 1152 return IRQ_HANDLED; ··· 1130 1184 can_led_event(dev, CAN_LED_EVENT_OPEN); 1131 1185 1132 1186 napi_enable(&priv->napi); 1187 + /* enable status change, error and module interrupts */ 1188 + c_can_irq_control(priv, true); 1133 1189 netif_start_queue(dev); 1134 1190 1135 1191 return 0; ··· 1174 1226 return NULL; 1175 1227 1176 1228 priv = netdev_priv(dev); 1177 - spin_lock_init(&priv->xmit_lock); 1178 1229 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1179 1230 1180 1231 priv->dev = dev; ··· 1228 1281 u32 val; 1229 1282 unsigned long time_out; 1230 1283 struct c_can_priv *priv = netdev_priv(dev); 1284 + int ret; 1231 1285 1232 1286 if (!(dev->flags & IFF_UP)) 1233 1287 return 0; ··· 1255 1307 if (time_after(jiffies, time_out)) 1256 1308 return -ETIMEDOUT; 1257 1309 1258 - return c_can_start(dev); 1310 + ret = c_can_start(dev); 1311 + if (!ret) 1312 + c_can_irq_control(priv, true); 1313 + 1314 + return ret; 1259 1315 } 1260 1316 EXPORT_SYMBOL_GPL(c_can_power_up); 1261 1317 #endif
+5 -18
drivers/net/can/c_can/c_can.h
··· 22 22 #ifndef C_CAN_H 23 23 #define C_CAN_H 24 24 25 - /* 26 - * IFx register masks: 27 - * allow easy operation on 16-bit registers when the 28 - * argument is 32-bit instead 29 - */ 30 - #define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF) 31 - #define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16) 32 - 33 25 /* message object split */ 34 26 #define C_CAN_NO_OF_OBJECTS 32 35 27 #define C_CAN_MSG_OBJ_RX_NUM 16 ··· 37 45 38 46 #define C_CAN_MSG_OBJ_RX_SPLIT 9 39 47 #define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) 40 - 41 - #define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1) 42 48 #define RECEIVE_OBJECT_BITS 0x0000ffff 43 49 44 50 enum reg { ··· 173 183 struct napi_struct napi; 174 184 struct net_device *dev; 175 185 struct device *device; 176 - spinlock_t xmit_lock; 177 - int tx_object; 178 - int current_status; 186 + atomic_t tx_active; 187 + unsigned long tx_dir; 179 188 int last_status; 180 189 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 181 190 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 182 191 void __iomem *base; 183 192 const u16 *regs; 184 - unsigned long irq_flags; /* for request_irq() */ 185 - unsigned int tx_next; 186 - unsigned int tx_echo; 187 193 void *priv; /* for board-specific data */ 188 - u16 irqstatus; 189 194 enum c_can_dev_id type; 190 195 u32 __iomem *raminit_ctrlreg; 191 - unsigned int instance; 196 + int instance; 192 197 void (*raminit) (const struct c_can_priv *priv, bool enable); 198 + u32 comm_rcv_high; 199 + u32 rxmasked; 193 200 u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; 194 201 }; 195 202
+7 -2
drivers/net/can/c_can/c_can_pci.c
··· 84 84 goto out_disable_device; 85 85 } 86 86 87 - pci_set_master(pdev); 88 - pci_enable_msi(pdev); 87 + ret = pci_enable_msi(pdev); 88 + if (!ret) { 89 + dev_info(&pdev->dev, "MSI enabled\n"); 90 + pci_set_master(pdev); 91 + } 89 92 90 93 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 91 94 if (!addr) { ··· 134 131 ret = -EINVAL; 135 132 goto out_free_c_can; 136 133 } 134 + 135 + priv->type = c_can_pci_data->type; 137 136 138 137 /* Configure access to registers */ 139 138 switch (c_can_pci_data->reg_align) {
+1 -1
drivers/net/can/c_can/c_can_platform.c
··· 222 222 223 223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 224 224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 225 - if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) 225 + if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 226 226 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 227 227 else 228 228 priv->raminit = c_can_hw_raminit;
+1 -1
drivers/net/can/dev.c
··· 256 256 257 257 /* Check if the CAN device has bit-timing parameters */ 258 258 if (!btc) 259 - return -ENOTSUPP; 259 + return -EOPNOTSUPP; 260 260 261 261 /* 262 262 * Depending on the given can_bittiming parameter structure the CAN
+13 -3
drivers/net/can/sja1000/sja1000_isa.c
··· 46 46 static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 47 47 static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 48 48 static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; 49 + static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */ 49 50 50 51 module_param_array(port, ulong, NULL, S_IRUGO); 51 52 MODULE_PARM_DESC(port, "I/O port number"); ··· 102 101 static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, 103 102 int reg) 104 103 { 105 - unsigned long base = (unsigned long)priv->reg_base; 104 + unsigned long flags, base = (unsigned long)priv->reg_base; 105 + u8 readval; 106 106 107 + spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); 107 108 outb(reg, base); 108 - return inb(base + 1); 109 + readval = inb(base + 1); 110 + spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); 111 + 112 + return readval; 109 113 } 110 114 111 115 static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, 112 116 int reg, u8 val) 113 117 { 114 - unsigned long base = (unsigned long)priv->reg_base; 118 + unsigned long flags, base = (unsigned long)priv->reg_base; 115 119 120 + spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); 116 121 outb(reg, base); 117 122 outb(val, base + 1); 123 + spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); 118 124 } 119 125 120 126 static int sja1000_isa_probe(struct platform_device *pdev) ··· 177 169 if (iosize == SJA1000_IOSIZE_INDIRECT) { 178 170 priv->read_reg = sja1000_isa_port_read_reg_indirect; 179 171 priv->write_reg = sja1000_isa_port_write_reg_indirect; 172 + spin_lock_init(&indirect_lock[idx]); 180 173 } else { 181 174 priv->read_reg = sja1000_isa_port_read_reg; 182 175 priv->write_reg = sja1000_isa_port_write_reg; ··· 207 198 208 199 platform_set_drvdata(pdev, dev); 209 200 SET_NETDEV_DEV(dev, &pdev->dev); 201 + dev->dev_id = idx; 210 202 211 203 err = register_sja1000dev(dev); 212 204 if (err) {
+3 -3
drivers/net/can/slcan.c
··· 322 322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 323 323 return; 324 324 325 - spin_lock(&sl->lock); 325 + spin_lock_bh(&sl->lock); 326 326 if (sl->xleft <= 0) { 327 327 /* Now serial buffer is almost free & we can start 328 328 * transmission of another packet */ 329 329 sl->dev->stats.tx_packets++; 330 330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 331 - spin_unlock(&sl->lock); 331 + spin_unlock_bh(&sl->lock); 332 332 netif_wake_queue(sl->dev); 333 333 return; 334 334 } ··· 336 336 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 337 337 sl->xleft -= actual; 338 338 sl->xhead += actual; 339 - spin_unlock(&sl->lock); 339 + spin_unlock_bh(&sl->lock); 340 340 } 341 341 342 342 /* Send a can_frame to a TTY queue. */
+1
drivers/net/ethernet/altera/Kconfig
··· 1 1 config ALTERA_TSE 2 2 tristate "Altera Triple-Speed Ethernet MAC support" 3 + depends on HAS_DMA 3 4 select PHYLIB 4 5 ---help--- 5 6 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
+6 -2
drivers/net/ethernet/altera/altera_msgdma.c
··· 18 18 #include "altera_utils.h" 19 19 #include "altera_tse.h" 20 20 #include "altera_msgdmahw.h" 21 + #include "altera_msgdma.h" 21 22 22 23 /* No initialization work to do for MSGDMA */ 23 24 int msgdma_initialize(struct altera_tse_private *priv) ··· 27 26 } 28 27 29 28 void msgdma_uninitialize(struct altera_tse_private *priv) 29 + { 30 + } 31 + 32 + void msgdma_start_rxdma(struct altera_tse_private *priv) 30 33 { 31 34 } 32 35 ··· 159 154 160 155 /* Put buffer to the mSGDMA RX FIFO 161 156 */ 162 - int msgdma_add_rx_desc(struct altera_tse_private *priv, 157 + void msgdma_add_rx_desc(struct altera_tse_private *priv, 163 158 struct tse_buffer *rxbuffer) 164 159 { 165 160 struct msgdma_extended_desc *desc = priv->rx_dma_desc; ··· 180 175 iowrite32(0, &desc->burst_seq_num); 181 176 iowrite32(0x00010001, &desc->stride); 182 177 iowrite32(control, &desc->control); 183 - return 1; 184 178 } 185 179 186 180 /* status is returned on upper 16 bits,
+2 -1
drivers/net/ethernet/altera/altera_msgdma.h
··· 25 25 void msgdma_clear_rxirq(struct altera_tse_private *); 26 26 void msgdma_clear_txirq(struct altera_tse_private *); 27 27 u32 msgdma_tx_completions(struct altera_tse_private *); 28 - int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); 28 + void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); 29 29 int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); 30 30 u32 msgdma_rx_status(struct altera_tse_private *); 31 31 int msgdma_initialize(struct altera_tse_private *); 32 32 void msgdma_uninitialize(struct altera_tse_private *); 33 + void msgdma_start_rxdma(struct altera_tse_private *); 33 34 34 35 #endif /* __ALTERA_MSGDMA_H__ */
+105 -72
drivers/net/ethernet/altera/altera_sgdma.c
··· 20 20 #include "altera_sgdmahw.h" 21 21 #include "altera_sgdma.h" 22 22 23 - static void sgdma_descrip(struct sgdma_descrip *desc, 24 - struct sgdma_descrip *ndesc, 25 - dma_addr_t ndesc_phys, 26 - dma_addr_t raddr, 27 - dma_addr_t waddr, 28 - u16 length, 29 - int generate_eop, 30 - int rfixed, 31 - int wfixed); 23 + static void sgdma_setup_descrip(struct sgdma_descrip *desc, 24 + struct sgdma_descrip *ndesc, 25 + dma_addr_t ndesc_phys, 26 + dma_addr_t raddr, 27 + dma_addr_t waddr, 28 + u16 length, 29 + int generate_eop, 30 + int rfixed, 31 + int wfixed); 32 32 33 33 static int sgdma_async_write(struct altera_tse_private *priv, 34 34 struct sgdma_descrip *desc); ··· 64 64 65 65 int sgdma_initialize(struct altera_tse_private *priv) 66 66 { 67 - priv->txctrlreg = SGDMA_CTRLREG_ILASTD; 67 + priv->txctrlreg = SGDMA_CTRLREG_ILASTD | 68 + SGDMA_CTRLREG_INTEN; 68 69 69 70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | 71 + SGDMA_CTRLREG_INTEN | 70 72 SGDMA_CTRLREG_ILASTD; 73 + 74 + priv->sgdmadesclen = sizeof(struct sgdma_descrip); 71 75 72 76 INIT_LIST_HEAD(&priv->txlisthd); 73 77 INIT_LIST_HEAD(&priv->rxlisthd); ··· 96 92 netdev_err(priv->dev, "error mapping tx descriptor memory\n"); 97 93 return -EINVAL; 98 94 } 95 + 96 + /* Initialize descriptor memory to all 0's, sync memory to cache */ 97 + memset(priv->tx_dma_desc, 0, priv->txdescmem); 98 + memset(priv->rx_dma_desc, 0, priv->rxdescmem); 99 + 100 + dma_sync_single_for_device(priv->device, priv->txdescphys, 101 + priv->txdescmem, DMA_TO_DEVICE); 102 + 103 + dma_sync_single_for_device(priv->device, priv->rxdescphys, 104 + priv->rxdescmem, DMA_TO_DEVICE); 99 105 100 106 return 0; 101 107 } ··· 144 130 iowrite32(0, &prxsgdma->control); 145 131 } 146 132 133 + /* For SGDMA, interrupts remain enabled after initially enabling, 134 + * so no need to provide implementations for abstract enable 135 + * and disable 136 + */ 137 + 147 138 void sgdma_enable_rxirq(struct altera_tse_private *priv) 148 139 { 149 - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 150 - priv->rxctrlreg |= SGDMA_CTRLREG_INTEN; 151 - tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN); 152 140 } 153 141 154 142 void sgdma_enable_txirq(struct altera_tse_private *priv) 155 143 { 156 - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 157 - priv->txctrlreg |= SGDMA_CTRLREG_INTEN; 158 - tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN); 159 144 } 160 145 161 - /* for SGDMA, RX interrupts remain enabled after enabling */ 162 146 void sgdma_disable_rxirq(struct altera_tse_private *priv) 163 147 { 164 148 } 165 149 166 - /* for SGDMA, TX interrupts remain enabled after enabling */ 167 150 void sgdma_disable_txirq(struct altera_tse_private *priv) 168 151 { 169 152 } ··· 195 184 if (sgdma_txbusy(priv)) 196 185 return 0; 197 186 198 - sgdma_descrip(cdesc, /* current descriptor */ 199 - ndesc, /* next descriptor */ 200 - sgdma_txphysaddr(priv, ndesc), 201 - buffer->dma_addr, /* address of packet to xmit */ 202 - 0, /* write addr 0 for tx dma */ 203 - buffer->len, /* length of packet */ 204 - SGDMA_CONTROL_EOP, /* Generate EOP */ 205 - 0, /* read fixed */ 206 - SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 187 + sgdma_setup_descrip(cdesc, /* current descriptor */ 188 + ndesc, /* next descriptor */ 189 + sgdma_txphysaddr(priv, ndesc), 190 + buffer->dma_addr, /* address of packet to xmit */ 191 + 0, /* write addr 0 for tx dma */ 192 + buffer->len, /* length of packet */ 193 + SGDMA_CONTROL_EOP, /* Generate EOP */ 194 + 0, /* read fixed */ 195 + SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 207 196 208 197 pktstx = sgdma_async_write(priv, cdesc); 209 198 ··· 230 219 return ready; 231 220 } 232 221 233 - int sgdma_add_rx_desc(struct altera_tse_private *priv, 234 - struct tse_buffer *rxbuffer) 222 + void sgdma_start_rxdma(struct altera_tse_private *priv) 223 + { 224 + sgdma_async_read(priv); 225 + } 226 + 227 + void sgdma_add_rx_desc(struct altera_tse_private *priv, 228 + struct tse_buffer *rxbuffer) 235 229 { 236 230 queue_rx(priv, rxbuffer); 237 - return sgdma_async_read(priv); 238 231 } 239 232 240 233 /* status is returned on upper 16 bits, ··· 255 240 unsigned int pktstatus = 0; 256 241 struct tse_buffer *rxbuffer = NULL; 257 242 258 - dma_sync_single_for_cpu(priv->device, 259 - priv->rxdescphys, 260 - priv->rxdescmem, 261 - DMA_BIDIRECTIONAL); 243 + u32 sts = ioread32(&csr->status); 262 244 263 245 desc = &base[0]; 264 - if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || 265 - (desc->status & SGDMA_STATUS_EOP)) { 246 + if (sts & SGDMA_STSREG_EOP) { 247 + dma_sync_single_for_cpu(priv->device, 248 + priv->rxdescphys, 249 + priv->sgdmadesclen, 250 + DMA_FROM_DEVICE); 251 + 266 252 pktlength = desc->bytes_xferred; 267 253 pktstatus = desc->status & 0x3f; 268 254 rxstatus = pktstatus; 269 255 rxstatus = rxstatus << 16; 270 256 rxstatus |= (pktlength & 0xffff); 271 257 272 - desc->status = 0; 258 + if (rxstatus) { 259 + desc->status = 0; 273 260 274 - rxbuffer = dequeue_rx(priv); 275 - if (rxbuffer == NULL) 261 + rxbuffer = dequeue_rx(priv); 262 + if (rxbuffer == NULL) 263 + netdev_info(priv->dev, 264 + "sgdma rx and rx queue empty!\n"); 265 + 266 + /* Clear control */ 267 + iowrite32(0, &csr->control); 268 + /* clear status */ 269 + iowrite32(0xf, &csr->status); 270 + 271 + /* kick the rx sgdma after reaping this descriptor */ 272 + pktsrx = sgdma_async_read(priv); 273 + 274 + } else { 275 + /* If the SGDMA indicated an end of packet on recv, 276 + * then it's expected that the rxstatus from the 277 + * descriptor is non-zero - meaning a valid packet 278 + * with a nonzero length, or an error has been 279 + * indicated. if not, then all we can do is signal 280 + * an error and return no packet received. Most likely 281 + * there is a system design error, or an error in the 282 + * underlying kernel (cache or cache management problem) 283 + */ 276 284 netdev_err(priv->dev, 277 - "sgdma rx and rx queue empty!\n"); 278 - 279 - /* kick the rx sgdma after reaping this descriptor */ 285 + "SGDMA RX Error Info: %x, %x, %x\n", 286 + sts, desc->status, rxstatus); 287 + } 288 + } else if (sts == 0) { 280 289 pktsrx = sgdma_async_read(priv); 281 290 } 282 291 ··· 309 270 310 271 311 272 /* Private functions */ 312 - static void sgdma_descrip(struct sgdma_descrip *desc, 313 - struct sgdma_descrip *ndesc, 314 - dma_addr_t ndesc_phys, 315 - dma_addr_t raddr, 316 - dma_addr_t waddr, 317 - u16 length, 318 - int generate_eop, 319 - int rfixed, 320 - int wfixed) 273 + static void sgdma_setup_descrip(struct sgdma_descrip *desc, 274 + struct sgdma_descrip *ndesc, 275 + dma_addr_t ndesc_phys, 276 + dma_addr_t raddr, 277 + dma_addr_t waddr, 278 + u16 length, 279 + int generate_eop, 280 + int rfixed, 281 + int wfixed) 321 282 { 322 283 /* Clear the next descriptor as not owned by hardware */ 323 284 u32 ctrl = ndesc->control; ··· 358 319 struct sgdma_descrip *cdesc = &descbase[0]; 359 320 struct sgdma_descrip *ndesc = &descbase[1]; 360 321 361 - unsigned int sts = ioread32(&csr->status); 362 322 struct tse_buffer *rxbuffer = NULL; 363 323 364 324 if (!sgdma_rxbusy(priv)) { 365 325 rxbuffer = queue_rx_peekhead(priv); 366 - if (rxbuffer == NULL) 326 + if (rxbuffer == NULL) { 327 + netdev_err(priv->dev, "no rx buffers available\n"); 367 328 return 0; 329 + } 368 330 369 - sgdma_descrip(cdesc, /* current descriptor */ 370 - ndesc, /* next descriptor */ 371 - sgdma_rxphysaddr(priv, ndesc), 372 - 0, /* read addr 0 for rx dma */ 373 - rxbuffer->dma_addr, /* write addr for rx dma */ 374 - 0, /* read 'til EOP */ 375 - 0, /* EOP: NA for rx dma */ 376 - 0, /* read fixed: NA for rx dma */ 377 - 0); /* SOP: NA for rx DMA */ 378 - 379 - /* clear control and status */ 380 - iowrite32(0, &csr->control); 381 - 382 - /* If status available, clear those bits */ 383 - if (sts & 0xf) 384 - iowrite32(0xf, &csr->status); 331 + sgdma_setup_descrip(cdesc, /* current descriptor */ 332 + ndesc, /* next descriptor */ 333 + sgdma_rxphysaddr(priv, ndesc), 334 + 0, /* read addr 0 for rx dma */ 335 + rxbuffer->dma_addr, /* write addr for rx dma */ 336 + 0, /* read 'til EOP */ 337 + 0, /* EOP: NA for rx dma */ 338 + 0, /* read fixed: NA for rx dma */ 339 + 0); /* SOP: NA for rx DMA */ 385 340 386 341 dma_sync_single_for_device(priv->device, 387 342 priv->rxdescphys, 388 - priv->rxdescmem, 389 - DMA_BIDIRECTIONAL); 343 + priv->sgdmadesclen, 344 + DMA_TO_DEVICE); 390 345 391 346 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 392 347 &csr->next_descrip); ··· 407 374 iowrite32(0x1f, &csr->status); 408 375 409 376 dma_sync_single_for_device(priv->device, priv->txdescphys, 410 - priv->txdescmem, DMA_TO_DEVICE); 377 + priv->sgdmadesclen, DMA_TO_DEVICE); 411 378 412 379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 413 380 &csr->next_descrip);
+2 -1
drivers/net/ethernet/altera/altera_sgdma.h
··· 26 26 void sgdma_clear_txirq(struct altera_tse_private *); 27 27 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); 28 28 u32 sgdma_tx_completions(struct altera_tse_private *); 29 - int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); 29 + void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); 30 30 void sgdma_status(struct altera_tse_private *); 31 31 u32 sgdma_rx_status(struct altera_tse_private *); 32 32 int sgdma_initialize(struct altera_tse_private *); 33 33 void sgdma_uninitialize(struct altera_tse_private *); 34 + void sgdma_start_rxdma(struct altera_tse_private *); 34 35 35 36 #endif /* __ALTERA_SGDMA_H__ */
+5 -1
drivers/net/ethernet/altera/altera_tse.h
··· 58 58 /* MAC function configuration default settings */ 59 59 #define ALTERA_TSE_TX_IPG_LENGTH 12 60 60 61 + #define ALTERA_TSE_PAUSE_QUANTA 0xffff 62 + 61 63 #define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) 62 64 63 65 /* MAC Command_Config Register Bit Definitions ··· 392 390 void (*clear_rxirq)(struct altera_tse_private *); 393 391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); 394 392 u32 (*tx_completions)(struct altera_tse_private *); 395 - int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); 393 + void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); 396 394 u32 (*get_rx_status)(struct altera_tse_private *); 397 395 int (*init_dma)(struct altera_tse_private *); 398 396 void (*uninit_dma)(struct altera_tse_private *); 397 + void (*start_rxdma)(struct altera_tse_private *); 399 398 }; 400 399 401 400 /* This structure is private to each device. ··· 456 453 u32 rxctrlreg; 457 454 dma_addr_t rxdescphys; 458 455 dma_addr_t txdescphys; 456 + size_t sgdmadesclen; 459 457 460 458 struct list_head txlisthd; 461 459 struct list_head rxlisthd;
+7 -1
drivers/net/ethernet/altera/altera_tse_ethtool.c
··· 77 77 struct altera_tse_private *priv = netdev_priv(dev); 78 78 u32 rev = ioread32(&priv->mac_dev->megacore_revision); 79 79 80 - strcpy(info->driver, "Altera TSE MAC IP Driver"); 80 + strcpy(info->driver, "altera_tse"); 81 81 strcpy(info->version, "v8.0"); 82 82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", 83 83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); ··· 185 185 * how to do any special formatting of this data. 186 186 * This version number will need to change if and 187 187 * when this register table is changed. 188 + * 189 + * version[31:0] = 1: Dump the first 128 TSE Registers 190 + * Upper bits are all 0 by default 191 + * 192 + * Upper 16-bits will indicate feature presence for 193 + * Ethtool register decoding in future version. 188 194 */ 189 195 190 196 regs->version = 1;
+46 -31
drivers/net/ethernet/altera/altera_tse_main.c
··· 224 224 dev_kfree_skb_any(rxbuffer->skb); 225 225 return -EINVAL; 226 226 } 227 + rxbuffer->dma_addr &= (dma_addr_t)~3; 227 228 rxbuffer->len = len; 228 229 return 0; 229 230 } ··· 426 425 priv->dev->stats.rx_bytes += pktlength; 427 426 428 427 entry = next_entry; 428 + 429 + tse_rx_refill(priv); 429 430 } 430 431 431 - tse_rx_refill(priv); 432 432 return count; 433 433 } 434 434 ··· 521 519 struct net_device *dev = dev_id; 522 520 struct altera_tse_private *priv; 523 521 unsigned long int flags; 524 - 525 522 526 523 if (unlikely(!dev)) { 527 524 pr_err("%s: invalid dev pointer\n", __func__); ··· 869 868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 870 869 * start address 871 870 */ 872 - tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 871 + tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 873 872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 874 873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 875 874 876 875 /* Set the MAC options */ 877 876 cmd = ioread32(&mac->command_config); 878 - cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ 877 + cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ 879 878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 880 879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 881 880 * with CRC errors ··· 883 882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA; 884 883 cmd &= ~MAC_CMDCFG_TX_ENA; 885 884 cmd &= ~MAC_CMDCFG_RX_ENA; 885 + 886 + /* Default speed and duplex setting, full/100 */ 887 + cmd &= ~MAC_CMDCFG_HD_ENA; 888 + cmd &= ~MAC_CMDCFG_ETH_SPEED; 889 + cmd &= ~MAC_CMDCFG_ENA_10; 890 + 886 891 iowrite32(cmd, &mac->command_config); 892 + 893 + iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta); 887 894 888 895 if (netif_msg_hw(priv)) 889 896 dev_dbg(priv->device, ··· 1094 1085 1095 1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 1096 1087 1097 - /* Start MAC Rx/Tx */ 1098 - spin_lock(&priv->mac_cfg_lock); 1099 - tse_set_mac(priv, true); 1100 - spin_unlock(&priv->mac_cfg_lock); 1101 - 1102 1088 if (priv->phydev) 1103 1089 phy_start(priv->phydev); 1104 1090 1105 1091 napi_enable(&priv->napi); 1106 1092 netif_start_queue(dev); 1093 + 1094 + priv->dmaops->start_rxdma(priv); 1095 + 1096 + /* Start MAC Rx/Tx */ 1097 + spin_lock(&priv->mac_cfg_lock); 1098 + tse_set_mac(priv, true); 1099 + spin_unlock(&priv->mac_cfg_lock); 1107 1100 1108 1101 return 0; 1109 1102 ··· 1178 1167 .ndo_validate_addr = eth_validate_addr, 1179 1168 }; 1180 1169 1181 - 1182 1170 static int request_and_map(struct platform_device *pdev, const char *name, 1183 1171 struct resource **res, void __iomem **ptr) 1184 1172 { ··· 1245 1235 /* Get the mapped address to the SGDMA descriptor memory */ 1246 1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap); 1247 1237 if (ret) 1248 - goto out_free; 1238 + goto err_free_netdev; 1249 1239 1250 1240 /* Start of that memory is for transmit descriptors */ 1251 1241 priv->tx_dma_desc = descmap; ··· 1264 1254 if (upper_32_bits(priv->rxdescmem_busaddr)) { 1265 1255 dev_dbg(priv->device, 1266 1256 "SGDMA bus addresses greater than 32-bits\n"); 1267 - goto out_free; 1257 + goto err_free_netdev; 1268 1258 } 1269 1259 if (upper_32_bits(priv->txdescmem_busaddr)) { 1270 1260 dev_dbg(priv->device, 1271 1261 "SGDMA bus addresses greater than 32-bits\n"); 1272 - goto out_free; 1262 + goto err_free_netdev; 1273 1263 } 1274 1264 } else if (priv->dmaops && 1275 1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { 1276 1266 ret = request_and_map(pdev, "rx_resp", &dma_res, 1277 1267 &priv->rx_dma_resp); 1278 1268 if (ret) 1279 - goto out_free; 1269 + goto err_free_netdev; 1280 1270 1281 1271 ret = request_and_map(pdev, "tx_desc", &dma_res, 1282 1272 &priv->tx_dma_desc); 1283 1273 if (ret) 1284 - goto out_free; 1274 + goto err_free_netdev; 1285 1275 1286 1276 priv->txdescmem = resource_size(dma_res); 1287 1277 priv->txdescmem_busaddr = dma_res->start; ··· 1289 1279 ret = request_and_map(pdev, "rx_desc", &dma_res, 1290 1280 &priv->rx_dma_desc); 1291 1281 if (ret) 1292 - goto out_free; 1282 + goto err_free_netdev; 1293 1283 1294 1284 priv->rxdescmem = resource_size(dma_res); 1295 1285 priv->rxdescmem_busaddr = dma_res->start; 1296 1286 1297 1287 } else { 1298 - goto out_free; 1288 + goto err_free_netdev; 1299 1289 } 1300 1290 1301 1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) ··· 1304 1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) 1305 1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); 1306 1296 else 1307 - goto out_free; 1297 + goto err_free_netdev; 1308 1298 1309 1299 /* MAC address space */ 1310 1300 ret = request_and_map(pdev, "control_port", &control_port, 1311 1301 (void __iomem **)&priv->mac_dev); 1312 1302 if (ret) 1313 - goto out_free; 1303 + goto err_free_netdev; 1314 1304 1315 1305 /* xSGDMA Rx Dispatcher address space */ 1316 1306 ret = request_and_map(pdev, "rx_csr", &dma_res, 1317 1307 &priv->rx_dma_csr); 1318 1308 if (ret) 1319 - goto out_free; 1309 + goto err_free_netdev; 1320 1310 1321 1311 1322 1312 /* xSGDMA Tx Dispatcher address space */ 1323 1313 ret = request_and_map(pdev, "tx_csr", &dma_res, 1324 1314 &priv->tx_dma_csr); 1325 1315 if (ret) 1326 - goto out_free; 1316 + goto err_free_netdev; 1327 1317 1328 1318 1329 1319 /* Rx IRQ */ ··· 1331 1321 if (priv->rx_irq == -ENXIO) { 1332 1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); 1333 1323 ret = -ENXIO; 1334 - goto out_free; 1324 + goto err_free_netdev; 1335 1325 } 1336 1326 1337 1327 /* Tx IRQ */ ··· 1339 1329 if (priv->tx_irq == -ENXIO) { 1340 1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); 1341 1331 ret = -ENXIO; 1342 - goto out_free; 1332 + goto err_free_netdev; 1343 1333 } 1344 1334 1345 1335 /* get FIFO depths from device tree */ ··· 1347 1337 &priv->rx_fifo_depth)) { 1348 1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); 1349 1339 ret = -ENXIO; 1350 - goto out_free; 1340 + goto err_free_netdev; 1351 1341 } 1352 1342 1353 1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1354 1344 &priv->rx_fifo_depth)) { 1355 1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1356 1346 ret = -ENXIO; 1357 - goto out_free; 1347 + goto err_free_netdev; 1358 1348 } 1359 1349 1360 1350 /* get hash filter settings for this instance */ ··· 1403 1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { 1404 1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n", 1405 1395 priv->phy_addr); 1406 - goto out_free; 1396 + goto err_free_netdev; 1407 1397 } 1408 1398 1409 1399 /* Create/attach to MDIO bus */ ··· 1411 1401 atomic_add_return(1, &instance_count)); 1412 1402 1413 1403 if (ret) 1414 - goto out_free; 1404 + goto err_free_netdev; 1415 1405 1416 1406 /* initialize netdev */ 1417 1407 ether_setup(ndev); ··· 1448 1438 ret = register_netdev(ndev); 1449 1439 if (ret) { 1450 1440 dev_err(&pdev->dev, "failed to register TSE net device\n"); 1451 - goto out_free_mdio; 1441 + goto err_register_netdev; 1452 1442 } 1453 1443 1454 1444 platform_set_drvdata(pdev, ndev); ··· 1465 1455 ret = init_phy(ndev); 1466 1456 if (ret != 0) { 1467 1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); 1468 - goto out_free_mdio; 1458 + goto err_init_phy; 1469 1459 } 1470 1460 return 0; 1471 1461 1472 - out_free_mdio: 1462 + err_init_phy: 1463 + unregister_netdev(ndev); 1464 + err_register_netdev: 1465 + netif_napi_del(&priv->napi); 1473 1466 altera_tse_mdio_destroy(ndev); 1474 - out_free: 1467 + err_free_netdev: 1475 1468 free_netdev(ndev); 1476 1469 return ret; 1477 1470 } ··· 1509 1496 .get_rx_status = sgdma_rx_status, 1510 1497 .init_dma = sgdma_initialize, 1511 1498 .uninit_dma = sgdma_uninitialize, 1499 + .start_rxdma = sgdma_start_rxdma, 1512 1500 }; 1513 1501 1514 1502 struct altera_dmaops altera_dtype_msgdma = { ··· 1528 1514 .get_rx_status = msgdma_rx_status, 1529 1515 .init_dma = msgdma_initialize, 1530 1516 .uninit_dma = msgdma_uninitialize, 1517 + .start_rxdma = msgdma_start_rxdma, 1531 1518 }; 1532 1519 1533 1520 static struct of_device_id altera_tse_ids[] = {
+2
drivers/net/ethernet/arc/emac.h
··· 11 11 #include <linux/dma-mapping.h> 12 12 #include <linux/netdevice.h> 13 13 #include <linux/phy.h> 14 + #include <linux/clk.h> 14 15 15 16 /* STATUS and ENABLE Register bit masks */ 16 17 #define TXINT_MASK (1<<0) /* Transmit interrupt */ ··· 132 131 struct mii_bus *bus; 133 132 134 133 void __iomem *regs; 134 + struct clk *clk; 135 135 136 136 struct napi_struct napi; 137 137 struct net_device_stats stats;
+59 -23
drivers/net/ethernet/arc/emac_main.c
··· 574 574 return NETDEV_TX_OK; 575 575 } 576 576 577 + static void arc_emac_set_address_internal(struct net_device *ndev) 578 + { 579 + struct arc_emac_priv *priv = netdev_priv(ndev); 580 + unsigned int addr_low, addr_hi; 581 + 582 + addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 583 + addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); 584 + 585 + arc_reg_set(priv, R_ADDRL, addr_low); 586 + arc_reg_set(priv, R_ADDRH, addr_hi); 587 + } 588 + 577 589 /** 578 590 * arc_emac_set_address - Set the MAC address for this device. 579 591 * @ndev: Pointer to net_device structure. ··· 599 587 */ 600 588 static int arc_emac_set_address(struct net_device *ndev, void *p) 601 589 { 602 - struct arc_emac_priv *priv = netdev_priv(ndev); 603 590 struct sockaddr *addr = p; 604 - unsigned int addr_low, addr_hi; 605 591 606 592 if (netif_running(ndev)) 607 593 return -EBUSY; ··· 609 599 610 600 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 611 601 612 - addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 613 - addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); 614 - 615 - arc_reg_set(priv, R_ADDRL, addr_low); 616 - arc_reg_set(priv, R_ADDRH, addr_hi); 602 + arc_emac_set_address_internal(ndev); 617 603 618 604 return 0; 619 605 } ··· 649 643 return -ENODEV; 650 644 } 651 645 652 - /* Get CPU clock frequency from device tree */ 653 - if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", 654 - &clock_frequency)) { 655 - dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); 656 - return -EINVAL; 657 - } 658 - 659 646 /* Get IRQ from device tree */ 660 647 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 661 648 if (!irq) { ··· 676 677 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 677 678 if (IS_ERR(priv->regs)) { 678 679 err = PTR_ERR(priv->regs); 679 - goto out; 680 + goto out_netdev; 680 681 } 681 682 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 683 + 684 + priv->clk = of_clk_get(pdev->dev.of_node, 0); 685 + if (IS_ERR(priv->clk)) { 686 + /* Get CPU clock frequency from device tree */ 687 + if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", 688 + &clock_frequency)) { 689 + dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); 690 + err = -EINVAL; 691 + goto out_netdev; 692 + } 693 + } else { 694 + err = clk_prepare_enable(priv->clk); 695 + if (err) { 696 + dev_err(&pdev->dev, "failed to enable clock\n"); 697 + goto out_clkget; 698 + } 699 + 700 + clock_frequency = clk_get_rate(priv->clk); 701 + } 682 702 683 703 id = arc_reg_get(priv, R_ID); 684 704 ··· 705 687 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 706 688 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 707 689 err = -ENODEV; 708 - goto out; 690 + goto out_clken; 709 691 } 710 692 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 711 693 ··· 720 702 ndev->name, ndev); 721 703 if (err) { 722 704 dev_err(&pdev->dev, "could not allocate IRQ\n"); 723 - goto out; 705 + goto out_clken; 724 706 } 725 707 726 708 /* Get MAC address from device tree */ ··· 731 713 else 732 714 eth_hw_addr_random(ndev); 733 715 716 + arc_emac_set_address_internal(ndev); 734 717 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 735 718 736 719 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ ··· 741 722 if (!priv->rxbd) { 742 723 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 743 724 err = -ENOMEM; 744 - goto out; 725 + goto out_clken; 745 726 } 746 727 747 728 priv->txbd = priv->rxbd + RX_BD_NUM; ··· 753 734 err = arc_mdio_probe(pdev, priv); 754 735 if (err) { 755 736 dev_err(&pdev->dev, "failed to probe MII bus\n"); 756 - goto out; 737 + goto out_clken; 757 738 } 758 739 759 740 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, ··· 761 742 if (!priv->phy_dev) { 762 743 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 763 744 err = -ENODEV; 764 - goto out; 745 + goto out_mdio; 765 746 } 766 747 767 748 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", ··· 771 752 772 753 err = register_netdev(ndev); 773 754 if (err) { 774 - netif_napi_del(&priv->napi); 775 755 dev_err(&pdev->dev, "failed to register network device\n"); 776 - goto out; 756 + goto out_netif_api; 777 757 } 778 758 779 759 return 0; 780 760 781 - out: 761 + out_netif_api: 762 + netif_napi_del(&priv->napi); 763 + phy_disconnect(priv->phy_dev); 764 + priv->phy_dev = NULL; 765 + out_mdio: 766 + arc_mdio_remove(priv); 767 + out_clken: 768 + if (!IS_ERR(priv->clk)) 769 + clk_disable_unprepare(priv->clk); 770 + out_clkget: 771 + if (!IS_ERR(priv->clk)) 772 + clk_put(priv->clk); 773 + out_netdev: 782 774 free_netdev(ndev); 783 775 return err; 784 776 } ··· 804 774 arc_mdio_remove(priv); 805 775 unregister_netdev(ndev); 806 776 netif_napi_del(&priv->napi); 777 + 778 + if (!IS_ERR(priv->clk)) { 779 + clk_disable_unprepare(priv->clk); 780 + clk_put(priv->clk); 781 + } 782 + 807 783 free_netdev(ndev); 808 784 809 785 return 0;
+2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 13233 13233 iounmap(bp->doorbells); 13234 13234 13235 13235 bnx2x_release_firmware(bp); 13236 + } else { 13237 + bnx2x_vf_pci_dealloc(bp); 13236 13238 } 13237 13239 bnx2x_free_mem_bp(bp); 13238 13240
+44 -14
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 427 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 428 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 429 429 vf_vlan_rules_cnt(vf))) { 430 - BNX2X_ERR("No credits for vlan\n"); 430 + BNX2X_ERR("No credits for vlan [%d >= %d]\n", 431 + atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), 432 + vf_vlan_rules_cnt(vf)); 431 433 return -ENOMEM; 432 434 } 433 435 ··· 612 610 } 613 611 614 612 /* add new mcasts */ 613 + mcast.mcast_list_len = mc_num; 615 614 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 616 615 if (rc) 617 616 BNX2X_ERR("Faled to add multicasts\n"); ··· 840 837 return 0; 841 838 } 842 839 840 + static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, 841 + struct bnx2x_virtf *vf, 842 + int new) 843 + { 844 + int num = vf_vlan_rules_cnt(vf); 845 + int diff = new - num; 846 + bool rc = true; 847 + 848 + DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", 849 + vf->abs_vfid, new, num); 850 + 851 + if (diff > 0) 852 + rc = bp->vlans_pool.get(&bp->vlans_pool, diff); 853 + else if (diff < 0) 854 + rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); 855 + 856 + if (rc) 857 + vf_vlan_rules_cnt(vf) = new; 858 + else 859 + DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", 860 + vf->abs_vfid); 861 + } 862 + 843 863 /* must be called after the number of PF queues and the number of VFs are 844 864 * both known 845 865 */ ··· 880 854 resc->num_mac_filters = 1; 881 855 882 856 /* divvy up vlan rules */ 857 + bnx2x_iov_re_set_vlan_filters(bp, vf, 0); 883 858 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 884 859 vlan_count = 1 << ilog2(vlan_count); 885 - resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 860 + bnx2x_iov_re_set_vlan_filters(bp, vf, 861 + vlan_count / BNX2X_NR_VIRTFN(bp)); 886 862 887 863 /* no real limitation */ 888 864 resc->num_mc_filters = 0; ··· 1506 1478 bnx2x_iov_static_resc(bp, vf); 1507 1479 1508 1480 /* queues are initialized during VF-ACQUIRE */ 1509 - 1510 - /* reserve the vf vlan credit */ 1511 - bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 1512 - 1513 1481 vf->filter_state = 0; 1514 1482 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1515 1483 ··· 1936 1912 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1937 1913 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1938 1914 1915 + /* Save a vlan filter for the Hypervisor */ 1939 1916 return ((req_resc->num_rxqs <= rxq_cnt) && 1940 1917 (req_resc->num_txqs <= txq_cnt) && 1941 1918 (req_resc->num_sbs <= vf_sb_count(vf)) && 1942 1919 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1943 - (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1920 + (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); 1944 1921 } 1945 1922 1946 1923 /* CORE VF API */ ··· 1997 1972 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1998 1973 if (resc->num_mac_filters) 1999 1974 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2000 - if (resc->num_vlan_filters) 2001 - vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 1975 + /* Add an additional vlan filter credit for the hypervisor */ 1976 + bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); 2002 1977 2003 1978 DP(BNX2X_MSG_IOV, 2004 1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2005 1980 vf_sb_count(vf), vf_rxq_count(vf), 2006 1981 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2007 - vf_vlan_rules_cnt(vf)); 1982 + vf_vlan_rules_visible_cnt(vf)); 2008 1983 2009 1984 /* Initialize the queues */ 2010 1985 if (!vf->vfqs) { ··· 2921 2896 return bp->regview + PXP_VF_ADDR_DB_START; 2922 2897 } 2923 2898 2899 + void bnx2x_vf_pci_dealloc(struct bnx2x *bp) 2900 + { 2901 + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2902 + sizeof(struct bnx2x_vf_mbx_msg)); 2903 + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 2904 + sizeof(union pf_vf_bulletin)); 2905 + } 2906 + 2924 2907 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2925 2908 { 2926 2909 mutex_init(&bp->vf2pf_mutex); ··· 2948 2915 return 0; 2949 2916 2950 2917 alloc_mem_err: 2951 - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2952 - sizeof(struct bnx2x_vf_mbx_msg)); 2953 - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 2954 - sizeof(union pf_vf_bulletin)); 2918 + bnx2x_vf_pci_dealloc(bp); 2955 2919 return -ENOMEM; 2956 2920 } 2957 2921
+4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
··· 159 159 #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 160 160 #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 161 161 #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 162 + /* Hide a single vlan filter credit for the hypervisor */ 163 + #define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1) 162 164 163 165 u8 sb_count; /* actual number of SBs */ 164 166 u8 igu_base_id; /* base igu status block id */ ··· 504 502 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 505 503 void bnx2x_timer_sriov(struct bnx2x *bp); 506 504 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 505 + void bnx2x_vf_pci_dealloc(struct bnx2x *bp); 507 506 int bnx2x_vf_pci_alloc(struct bnx2x *bp); 508 507 int bnx2x_enable_sriov(struct bnx2x *bp); 509 508 void bnx2x_disable_sriov(struct bnx2x *bp); ··· 571 568 return NULL; 572 569 } 573 570 571 + static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; } 574 572 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575 573 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576 574 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
··· 1163 1163 bnx2x_vf_max_queue_cnt(bp, vf); 1164 1164 resc->num_sbs = vf_sb_count(vf); 1165 1165 resc->num_mac_filters = vf_mac_rules_cnt(vf); 1166 - resc->num_vlan_filters = vf_vlan_rules_cnt(vf); 1166 + resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); 1167 1167 resc->num_mc_filters = 0; 1168 1168 1169 1169 if (status == PFVF_STATUS_SUCCESS) {
+2 -2
drivers/net/ethernet/cadence/Kconfig
··· 4 4 5 5 config NET_CADENCE 6 6 bool "Cadence devices" 7 - depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST) 7 + depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST) 8 8 default y 9 9 ---help--- 10 10 If you have a network (Ethernet) card belonging to this class, say Y. ··· 30 30 31 31 config MACB 32 32 tristate "Cadence MACB/GEM support" 33 - depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST) 33 + depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST) 34 34 select PHYLIB 35 35 ---help--- 36 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
+17 -18
drivers/net/ethernet/cadence/macb.c
··· 599 599 { 600 600 unsigned int entry; 601 601 struct sk_buff *skb; 602 - struct macb_dma_desc *desc; 603 602 dma_addr_t paddr; 604 603 605 604 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 606 - u32 addr, ctrl; 607 - 608 605 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 609 - desc = &bp->rx_ring[entry]; 610 606 611 607 /* Make hw descriptor updates visible to CPU */ 612 608 rmb(); 613 609 614 - addr = desc->addr; 615 - ctrl = desc->ctrl; 616 610 bp->rx_prepared_head++; 617 - 618 - if ((addr & MACB_BIT(RX_USED))) 619 - continue; 620 611 621 612 if (bp->rx_skbuff[entry] == NULL) { 622 613 /* allocate sk_buff for this free entry in ring */ ··· 689 698 if (!(addr & MACB_BIT(RX_USED))) 690 699 break; 691 700 692 - desc->addr &= ~MACB_BIT(RX_USED); 693 701 bp->rx_tail++; 694 702 count++; 695 703 ··· 881 891 if (work_done < budget) { 882 892 napi_complete(napi); 883 893 884 - /* 885 - * We've done what we can to clean the buffers. Make sure we 886 - * get notified when new packets arrive. 887 - */ 888 - macb_writel(bp, IER, MACB_RX_INT_FLAGS); 889 - 890 894 /* Packets received while interrupts were disabled */ 891 895 status = macb_readl(bp, RSR); 892 - if (unlikely(status)) 896 + if (status) { 897 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 898 + macb_writel(bp, ISR, MACB_BIT(RCOMP)); 893 899 napi_reschedule(napi); 900 + } else { 901 + macb_writel(bp, IER, MACB_RX_INT_FLAGS); 902 + } 894 903 } 895 904 896 905 /* TODO: Handle errors */ ··· 940 951 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 941 952 macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 942 953 schedule_work(&bp->tx_error_task); 954 + 955 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 956 + macb_writel(bp, ISR, MACB_TX_ERR_FLAGS); 957 + 943 958 break; 944 959 } 945 960 ··· 961 968 bp->hw_stats.gem.rx_overruns++; 962 969 else 963 970 bp->hw_stats.macb.rx_overruns++; 971 + 972 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 973 + macb_writel(bp, ISR, MACB_BIT(ISR_ROVR)); 964 974 } 965 975 966 976 if (status & MACB_BIT(HRESP)) { ··· 973 977 * (work queue?) 974 978 */ 975 979 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 980 + 981 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 982 + macb_writel(bp, ISR, MACB_BIT(HRESP)); 976 983 } 977 984 978 985 status = macb_readl(bp, ISR); ··· 1112 1113 1113 1114 desc = &bp->rx_ring[i]; 1114 1115 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1115 - dma_unmap_single(&bp->pdev->dev, addr, skb->len, 1116 + dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1116 1117 DMA_FROM_DEVICE); 1117 1118 dev_kfree_skb_any(skb); 1118 1119 skb = NULL;
+7 -6
drivers/net/ethernet/chelsio/Kconfig
··· 67 67 will be called cxgb3. 68 68 69 69 config CHELSIO_T4 70 - tristate "Chelsio Communications T4 Ethernet support" 70 + tristate "Chelsio Communications T4/T5 Ethernet support" 71 71 depends on PCI 72 72 select FW_LOADER 73 73 select MDIO 74 74 ---help--- 75 - This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 76 - adapters. 75 + This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet 76 + adapter and T5 based 40Gb Ethernet adapter. 77 77 78 78 For general information about Chelsio and our products, visit 79 79 our website at <http://www.chelsio.com>. ··· 87 87 will be called cxgb4. 88 88 89 89 config CHELSIO_T4VF 90 - tristate "Chelsio Communications T4 Virtual Function Ethernet support" 90 + tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" 91 91 depends on PCI 92 92 ---help--- 93 - This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 94 - adapters with PCI-E SR-IOV Virtual Functions. 93 + This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet 94 + adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual 95 + Functions. 95 96 96 97 For general information about Chelsio and our products, visit 97 98 our website at <http://www.chelsio.com>.
+2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 5870 5870 spd = " 2.5 GT/s"; 5871 5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 5872 5872 spd = " 5 GT/s"; 5873 + else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) 5874 + spd = " 8 GT/s"; 5873 5875 5874 5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 5875 5877 bufp += sprintf(bufp, "100/");
+113 -110
drivers/net/ethernet/freescale/gianfar.c
··· 121 121 static irqreturn_t gfar_transmit(int irq, void *dev_id); 122 122 static irqreturn_t gfar_interrupt(int irq, void *dev_id); 123 123 static void adjust_link(struct net_device *dev); 124 + static noinline void gfar_update_link_state(struct gfar_private *priv); 124 125 static int init_phy(struct net_device *dev); 125 126 static int gfar_probe(struct platform_device *ofdev); 126 127 static int gfar_remove(struct platform_device *ofdev); ··· 3077 3076 return IRQ_HANDLED; 3078 3077 } 3079 3078 3080 - static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) 3081 - { 3082 - struct phy_device *phydev = priv->phydev; 3083 - u32 val = 0; 3084 - 3085 - if (!phydev->duplex) 3086 - return val; 3087 - 3088 - if (!priv->pause_aneg_en) { 3089 - if (priv->tx_pause_en) 3090 - val |= MACCFG1_TX_FLOW; 3091 - if (priv->rx_pause_en) 3092 - val |= MACCFG1_RX_FLOW; 3093 - } else { 3094 - u16 lcl_adv, rmt_adv; 3095 - u8 flowctrl; 3096 - /* get link partner capabilities */ 3097 - rmt_adv = 0; 3098 - if (phydev->pause) 3099 - rmt_adv = LPA_PAUSE_CAP; 3100 - if (phydev->asym_pause) 3101 - rmt_adv |= LPA_PAUSE_ASYM; 3102 - 3103 - lcl_adv = mii_advertise_flowctrl(phydev->advertising); 3104 - 3105 - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 3106 - if (flowctrl & FLOW_CTRL_TX) 3107 - val |= MACCFG1_TX_FLOW; 3108 - if (flowctrl & FLOW_CTRL_RX) 3109 - val |= MACCFG1_RX_FLOW; 3110 - } 3111 - 3112 - return val; 3113 - } 3114 - 3115 3079 /* Called every time the controller might need to be made 3116 3080 * aware of new link state. The PHY code conveys this 3117 3081 * information through variables in the phydev structure, and this ··· 3086 3120 static void adjust_link(struct net_device *dev) 3087 3121 { 3088 3122 struct gfar_private *priv = netdev_priv(dev); 3089 - struct gfar __iomem *regs = priv->gfargrp[0].regs; 3090 3123 struct phy_device *phydev = priv->phydev; 3091 - int new_state = 0; 3092 3124 3093 - if (test_bit(GFAR_RESETTING, &priv->state)) 3094 - return; 3095 - 3096 - if (phydev->link) { 3097 - u32 tempval1 = gfar_read(&regs->maccfg1); 3098 - u32 tempval = gfar_read(&regs->maccfg2); 3099 - u32 ecntrl = gfar_read(&regs->ecntrl); 3100 - 3101 - /* Now we make sure that we can be in full duplex mode. 3102 - * If not, we operate in half-duplex mode. 3103 - */ 3104 - if (phydev->duplex != priv->oldduplex) { 3105 - new_state = 1; 3106 - if (!(phydev->duplex)) 3107 - tempval &= ~(MACCFG2_FULL_DUPLEX); 3108 - else 3109 - tempval |= MACCFG2_FULL_DUPLEX; 3110 - 3111 - priv->oldduplex = phydev->duplex; 3112 - } 3113 - 3114 - if (phydev->speed != priv->oldspeed) { 3115 - new_state = 1; 3116 - switch (phydev->speed) { 3117 - case 1000: 3118 - tempval = 3119 - ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 3120 - 3121 - ecntrl &= ~(ECNTRL_R100); 3122 - break; 3123 - case 100: 3124 - case 10: 3125 - tempval = 3126 - ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 3127 - 3128 - /* Reduced mode distinguishes 3129 - * between 10 and 100 3130 - */ 3131 - if (phydev->speed == SPEED_100) 3132 - ecntrl |= ECNTRL_R100; 3133 - else 3134 - ecntrl &= ~(ECNTRL_R100); 3135 - break; 3136 - default: 3137 - netif_warn(priv, link, dev, 3138 - "Ack! Speed (%d) is not 10/100/1000!\n", 3139 - phydev->speed); 3140 - break; 3141 - } 3142 - 3143 - priv->oldspeed = phydev->speed; 3144 - } 3145 - 3146 - tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 3147 - tempval1 |= gfar_get_flowctrl_cfg(priv); 3148 - 3149 - gfar_write(&regs->maccfg1, tempval1); 3150 - gfar_write(&regs->maccfg2, tempval); 3151 - gfar_write(&regs->ecntrl, ecntrl); 3152 - 3153 - if (!priv->oldlink) { 3154 - new_state = 1; 3155 - priv->oldlink = 1; 3156 - } 3157 - } else if (priv->oldlink) { 3158 - new_state = 1; 3159 - priv->oldlink = 0; 3160 - priv->oldspeed = 0; 3161 - priv->oldduplex = -1; 3162 - } 3163 - 3164 - if (new_state && netif_msg_link(priv)) 3165 - phy_print_status(phydev); 3125 + if (unlikely(phydev->link != priv->oldlink || 3126 + phydev->duplex != priv->oldduplex || 3127 + phydev->speed != priv->oldspeed)) 3128 + gfar_update_link_state(priv); 3166 3129 } 3167 3130 3168 3131 /* Update the hash table based on the current list of multicast ··· 3335 3440 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 3336 3441 } 3337 3442 return IRQ_HANDLED; 3443 + } 3444 + 3445 + static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) 3446 + { 3447 + struct phy_device *phydev = priv->phydev; 3448 + u32 val = 0; 3449 + 3450 + if (!phydev->duplex) 3451 + return val; 3452 + 3453 + if (!priv->pause_aneg_en) { 3454 + if (priv->tx_pause_en) 3455 + val |= MACCFG1_TX_FLOW; 3456 + if (priv->rx_pause_en) 3457 + val |= MACCFG1_RX_FLOW; 3458 + } else { 3459 + u16 lcl_adv, rmt_adv; 3460 + u8 flowctrl; 3461 + /* get link partner capabilities */ 3462 + rmt_adv = 0; 3463 + if (phydev->pause) 3464 + rmt_adv = LPA_PAUSE_CAP; 3465 + if (phydev->asym_pause) 3466 + rmt_adv |= LPA_PAUSE_ASYM; 3467 + 3468 + lcl_adv = mii_advertise_flowctrl(phydev->advertising); 3469 + 3470 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 3471 + if (flowctrl & FLOW_CTRL_TX) 3472 + val |= MACCFG1_TX_FLOW; 3473 + if (flowctrl & FLOW_CTRL_RX) 3474 + val |= MACCFG1_RX_FLOW; 3475 + } 3476 + 3477 + return val; 3478 + } 3479 + 3480 + static noinline void gfar_update_link_state(struct gfar_private *priv) 3481 + { 3482 + struct gfar __iomem *regs = priv->gfargrp[0].regs; 3483 + struct phy_device *phydev = priv->phydev; 3484 + 3485 + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) 3486 + return; 3487 + 3488 + if (phydev->link) { 3489 + u32 tempval1 = gfar_read(&regs->maccfg1); 3490 + u32 tempval = gfar_read(&regs->maccfg2); 3491 + u32 ecntrl = gfar_read(&regs->ecntrl); 3492 + 3493 + if (phydev->duplex != priv->oldduplex) { 3494 + if (!(phydev->duplex)) 3495 + tempval &= ~(MACCFG2_FULL_DUPLEX); 3496 + else 3497 + tempval |= MACCFG2_FULL_DUPLEX; 3498 + 3499 + priv->oldduplex = phydev->duplex; 3500 + } 3501 + 3502 + if (phydev->speed != priv->oldspeed) { 3503 + switch (phydev->speed) { 3504 + case 1000: 3505 + tempval = 3506 + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 3507 + 3508 + ecntrl &= ~(ECNTRL_R100); 3509 + break; 3510 + case 100: 3511 + case 10: 3512 + tempval = 3513 + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 3514 + 3515 + /* Reduced mode distinguishes 3516 + * between 10 and 100 3517 + */ 3518 + if (phydev->speed == SPEED_100) 3519 + ecntrl |= ECNTRL_R100; 3520 + else 3521 + ecntrl &= ~(ECNTRL_R100); 3522 + break; 3523 + default: 3524 + netif_warn(priv, link, priv->ndev, 3525 + "Ack! Speed (%d) is not 10/100/1000!\n", 3526 + phydev->speed); 3527 + break; 3528 + } 3529 + 3530 + priv->oldspeed = phydev->speed; 3531 + } 3532 + 3533 + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 3534 + tempval1 |= gfar_get_flowctrl_cfg(priv); 3535 + 3536 + gfar_write(&regs->maccfg1, tempval1); 3537 + gfar_write(&regs->maccfg2, tempval); 3538 + gfar_write(&regs->ecntrl, ecntrl); 3539 + 3540 + if (!priv->oldlink) 3541 + priv->oldlink = 1; 3542 + 3543 + } else if (priv->oldlink) { 3544 + priv->oldlink = 0; 3545 + priv->oldspeed = 0; 3546 + priv->oldduplex = -1; 3547 + } 3548 + 3549 + if (netif_msg_link(priv)) 3550 + phy_print_status(phydev); 3338 3551 } 3339 3552 3340 3553 static struct of_device_id gfar_match[] =
+3
drivers/net/ethernet/freescale/gianfar_ethtool.c
··· 533 533 struct gfar __iomem *regs = priv->gfargrp[0].regs; 534 534 u32 oldadv, newadv; 535 535 536 + if (!phydev) 537 + return -ENODEV; 538 + 536 539 if (!(phydev->supported & SUPPORTED_Pause) || 537 540 (!(phydev->supported & SUPPORTED_Asym_Pause) && 538 541 (epause->rx_pause != epause->tx_pause)))
+42 -29
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 186 186 { 187 187 u16 phy_reg = 0; 188 188 u32 phy_id = 0; 189 - s32 ret_val; 189 + s32 ret_val = 0; 190 190 u16 retry_count; 191 191 u32 mac_reg = 0; 192 192 ··· 217 217 /* In case the PHY needs to be in mdio slow mode, 218 218 * set slow mode and try to get the PHY id again. 219 219 */ 220 - hw->phy.ops.release(hw); 221 - ret_val = e1000_set_mdio_slow_mode_hv(hw); 222 - if (!ret_val) 223 - ret_val = e1000e_get_phy_id(hw); 224 - hw->phy.ops.acquire(hw); 220 + if (hw->mac.type < e1000_pch_lpt) { 221 + hw->phy.ops.release(hw); 222 + ret_val = e1000_set_mdio_slow_mode_hv(hw); 223 + if (!ret_val) 224 + ret_val = e1000e_get_phy_id(hw); 225 + hw->phy.ops.acquire(hw); 226 + } 225 227 226 228 if (ret_val) 227 229 return false; ··· 844 842 } 845 843 } 846 844 845 + if (hw->phy.type == e1000_phy_82579) { 846 + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 847 + &data); 848 + if (ret_val) 849 + goto release; 850 + 851 + data &= ~I82579_LPI_100_PLL_SHUT; 852 + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 853 + data); 854 + } 855 + 847 856 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 848 857 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 849 858 if (ret_val) ··· 1327 1314 return ret_val; 1328 1315 } 1329 1316 1330 - /* When connected at 10Mbps half-duplex, 82579 parts are excessively 1317 + /* When connected at 10Mbps half-duplex, some parts are excessively 1331 1318 * aggressive resulting in many collisions. To avoid this, increase 1332 1319 * the IPG and reduce Rx latency in the PHY. 1333 1320 */ 1334 - if ((hw->mac.type == e1000_pch2lan) && link) { 1321 + if (((hw->mac.type == e1000_pch2lan) || 1322 + (hw->mac.type == e1000_pch_lpt)) && link) { 1335 1323 u32 reg; 1336 1324 reg = er32(STATUS); 1337 1325 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1326 + u16 emi_addr; 1327 + 1338 1328 reg = er32(TIPG); 1339 1329 reg &= ~E1000_TIPG_IPGT_MASK; 1340 1330 reg |= 0xFF; ··· 1348 1332 if (ret_val) 1349 1333 return ret_val; 1350 1334 1351 - ret_val = 1352 - e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); 1335 + if (hw->mac.type == e1000_pch2lan) 1336 + emi_addr = I82579_RX_CONFIG; 1337 + else 1338 + emi_addr = I217_RX_CONFIG; 1339 + 1340 + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); 1353 1341 1354 1342 hw->phy.ops.release(hw); 1355 1343 ··· 2513 2493 * e1000_k1_gig_workaround_lv - K1 Si workaround 2514 2494 * @hw: pointer to the HW structure 2515 2495 * 2516 - * Workaround to set the K1 beacon duration for 82579 parts 2496 + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps 2497 + * Disable K1 in 1000Mbps and 100Mbps 2517 2498 **/ 2518 2499 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2519 2500 { 2520 2501 s32 ret_val = 0; 2521 2502 u16 status_reg = 0; 2522 - u32 mac_reg; 2523 - u16 phy_reg; 2524 2503 2525 2504 if (hw->mac.type != e1000_pch2lan) 2526 2505 return 0; 2527 2506 2528 - /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 2507 + /* Set K1 beacon duration based on 10Mbs speed */ 2529 2508 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2530 2509 if (ret_val) 2531 2510 return ret_val; 2532 2511 2533 2512 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2534 2513 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2535 - mac_reg = er32(FEXTNVM4); 2536 - mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2537 - 2538 - ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); 2539 - if (ret_val) 2540 - return ret_val; 2541 - 2542 - if (status_reg & HV_M_STATUS_SPEED_1000) { 2514 + if (status_reg & 2515 + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { 2543 2516 u16 pm_phy_reg; 2544 2517 2545 - mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2546 - phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2547 - /* LV 1G Packet drop issue wa */ 2518 + /* LV 1G/100 Packet drop issue wa */ 2548 2519 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2549 2520 if (ret_val) 2550 2521 return ret_val; 2551 - pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; 2522 + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; 2552 2523 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2553 2524 if (ret_val) 2554 2525 return ret_val; 2555 2526 } else { 2527 + u32 mac_reg; 2528 + 2529 + mac_reg = er32(FEXTNVM4); 2530 + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2556 2531 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2557 - phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2532 + ew32(FEXTNVM4, mac_reg); 2558 2533 } 2559 - ew32(FEXTNVM4, mac_reg); 2560 - ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); 2561 2534 } 2562 2535 2563 2536 return ret_val;
+3
drivers/net/ethernet/intel/e1000e/ich8lan.h
··· 232 232 #define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 233 233 #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 234 234 #define I82579_RX_CONFIG 0x3412 /* Receive configuration */ 235 + #define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ 235 236 #define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ 236 237 #define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 237 238 #define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 238 239 #define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 239 240 #define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ 240 241 #define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ 242 + #define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ 241 243 #define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ 242 244 #define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ 243 245 #define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ 244 246 #define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ 247 + #define I217_RX_CONFIG 0xB20C /* Receive configuration */ 245 248 246 249 #define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ 247 250 #define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+3 -3
drivers/net/ethernet/intel/e1000e/netdev.c
··· 1165 1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1166 1166 adapter->tx_hwtstamp_skb = NULL; 1167 1167 adapter->tx_hwtstamp_timeouts++; 1168 - e_warn("clearing Tx timestamp hang"); 1168 + e_warn("clearing Tx timestamp hang\n"); 1169 1169 } else { 1170 1170 /* reschedule to check later */ 1171 1171 schedule_work(&adapter->tx_hwtstamp_work); ··· 5687 5687 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5688 5688 { 5689 5689 struct e1000_adapter *adapter = netdev_priv(netdev); 5690 - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5690 + int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN; 5691 5691 5692 5692 /* Jumbo frame support */ 5693 5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && ··· 6235 6235 return 0; 6236 6236 } 6237 6237 6238 + #ifdef CONFIG_PM_SLEEP 6238 6239 static int e1000e_pm_thaw(struct device *dev) 6239 6240 { 6240 6241 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); ··· 6256 6255 return 0; 6257 6256 } 6258 6257 6259 - #ifdef CONFIG_PM_SLEEP 6260 6258 static int e1000e_pm_suspend(struct device *dev) 6261 6259 { 6262 6260 struct pci_dev *pdev = to_pci_dev(dev);
+1
drivers/net/ethernet/intel/e1000e/phy.h
··· 164 164 #define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 165 165 #define HV_M_STATUS_SPEED_MASK 0x0300 166 166 #define HV_M_STATUS_SPEED_1000 0x0200 167 + #define HV_M_STATUS_SPEED_100 0x0100 167 168 #define HV_M_STATUS_LINK_UP 0x0040 168 169 169 170 #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+10 -4
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 2897 2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2898 2898 2899 2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2900 - ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2900 + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2901 2901 i40e_ptp_tx_hwtstamp(pf); 2902 - prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK; 2903 2902 } 2904 - 2905 - wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat); 2906 2903 } 2907 2904 2908 2905 /* If a critical error is pending we have no choice but to reset the ··· 4267 4270 err = i40e_vsi_open(vsi); 4268 4271 if (err) 4269 4272 return err; 4273 + 4274 + /* configure global TSO hardware offload settings */ 4275 + wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 4276 + TCP_FLAG_FIN) >> 16); 4277 + wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 4278 + TCP_FLAG_FIN | 4279 + TCP_FLAG_CWR) >> 16); 4280 + wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 4270 4281 4271 4282 #ifdef CONFIG_I40E_VXLAN 4272 4283 vxlan_get_rx_port(netdev); ··· 6717 6712 NETIF_F_HW_VLAN_CTAG_FILTER | 6718 6713 NETIF_F_IPV6_CSUM | 6719 6714 NETIF_F_TSO | 6715 + NETIF_F_TSO_ECN | 6720 6716 NETIF_F_TSO6 | 6721 6717 NETIF_F_RXCSUM | 6722 6718 NETIF_F_NTUPLE |
+1 -1
drivers/net/ethernet/intel/i40e/i40e_nvm.c
··· 160 160 udelay(5); 161 161 } 162 162 if (ret_code == I40E_ERR_TIMEOUT) 163 - hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); 163 + hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n"); 164 164 return ret_code; 165 165 } 166 166
+2 -2
drivers/net/ethernet/intel/i40e/i40e_ptp.c
··· 239 239 dev_kfree_skb_any(pf->ptp_tx_skb); 240 240 pf->ptp_tx_skb = NULL; 241 241 pf->tx_hwtstamp_timeouts++; 242 - dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); 242 + dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n"); 243 243 return; 244 244 } 245 245 ··· 321 321 pf->last_rx_ptp_check = jiffies; 322 322 pf->rx_hwtstamp_cleared++; 323 323 dev_warn(&vsi->back->pdev->dev, 324 - "%s: clearing Rx timestamp hang", 324 + "%s: clearing Rx timestamp hang\n", 325 325 __func__); 326 326 } 327 327 }
+11 -11
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 418 418 } 419 419 break; 420 420 default: 421 - dev_info(&pf->pdev->dev, "Could not specify spec type %d", 421 + dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", 422 422 input->flow_type); 423 423 ret = -EINVAL; 424 424 } ··· 478 478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 479 479 } 480 480 } else { 481 - dev_info(&pdev->dev, "FD filter programming error"); 481 + dev_info(&pdev->dev, "FD filter programming error\n"); 482 482 } 483 483 } else if (error == 484 484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { ··· 1713 1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 1714 1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 1715 1715 struct vlan_ethhdr *vhdr; 1716 - if (skb_header_cloned(skb) && 1717 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1718 - return -ENOMEM; 1716 + int rc; 1717 + 1718 + rc = skb_cow_head(skb, 0); 1719 + if (rc < 0) 1720 + return rc; 1719 1721 vhdr = (struct vlan_ethhdr *)skb->data; 1720 1722 vhdr->h_vlan_TCI = htons(tx_flags >> 1721 1723 I40E_TX_FLAGS_VLAN_SHIFT); ··· 1745 1743 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1746 1744 { 1747 1745 u32 cd_cmd, cd_tso_len, cd_mss; 1746 + struct ipv6hdr *ipv6h; 1748 1747 struct tcphdr *tcph; 1749 1748 struct iphdr *iph; 1750 1749 u32 l4len; 1751 1750 int err; 1752 - struct ipv6hdr *ipv6h; 1753 1751 1754 1752 if (!skb_is_gso(skb)) 1755 1753 return 0; 1756 1754 1757 - if (skb_header_cloned(skb)) { 1758 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1759 - if (err) 1760 - return err; 1761 - } 1755 + err = skb_cow_head(skb, 0); 1756 + if (err < 0) 1757 + return err; 1762 1758 1763 1759 if (protocol == htons(ETH_P_IP)) { 1764 1760 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+1 -1
drivers/net/ethernet/intel/igb/e1000_i210.c
··· 365 365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 366 366 if (word_address == address) { 367 367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 368 - hw_dbg("Read INVM Word 0x%02x = %x", 368 + hw_dbg("Read INVM Word 0x%02x = %x\n", 369 369 address, *data); 370 370 status = E1000_SUCCESS; 371 371 break;
+6 -7
drivers/net/ethernet/intel/igb/e1000_mac.c
··· 929 929 */ 930 930 if (hw->fc.requested_mode == e1000_fc_full) { 931 931 hw->fc.current_mode = e1000_fc_full; 932 - hw_dbg("Flow Control = FULL.\r\n"); 932 + hw_dbg("Flow Control = FULL.\n"); 933 933 } else { 934 934 hw->fc.current_mode = e1000_fc_rx_pause; 935 - hw_dbg("Flow Control = " 936 - "RX PAUSE frames only.\r\n"); 935 + hw_dbg("Flow Control = RX PAUSE frames only.\n"); 937 936 } 938 937 } 939 938 /* For receiving PAUSE frames ONLY. ··· 947 948 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 948 949 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 949 950 hw->fc.current_mode = e1000_fc_tx_pause; 950 - hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 951 + hw_dbg("Flow Control = TX PAUSE frames only.\n"); 951 952 } 952 953 /* For transmitting PAUSE frames ONLY. 953 954 * ··· 961 962 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 962 963 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 963 964 hw->fc.current_mode = e1000_fc_rx_pause; 964 - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 965 + hw_dbg("Flow Control = RX PAUSE frames only.\n"); 965 966 } 966 967 /* Per the IEEE spec, at this point flow control should be 967 968 * disabled. However, we want to consider that we could ··· 987 988 (hw->fc.requested_mode == e1000_fc_tx_pause) || 988 989 (hw->fc.strict_ieee)) { 989 990 hw->fc.current_mode = e1000_fc_none; 990 - hw_dbg("Flow Control = NONE.\r\n"); 991 + hw_dbg("Flow Control = NONE.\n"); 991 992 } else { 992 993 hw->fc.current_mode = e1000_fc_rx_pause; 993 - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 994 + hw_dbg("Flow Control = RX PAUSE frames only.\n"); 994 995 } 995 996 996 997 /* Now we need to do one last check... If we auto-
+3 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 5193 5193 5194 5194 rcu_read_lock(); 5195 5195 for (i = 0; i < adapter->num_rx_queues; i++) { 5196 - u32 rqdpc = rd32(E1000_RQDPC(i)); 5197 5196 struct igb_ring *ring = adapter->rx_ring[i]; 5197 + u32 rqdpc = rd32(E1000_RQDPC(i)); 5198 + if (hw->mac.type >= e1000_i210) 5199 + wr32(E1000_RQDPC(i), 0); 5198 5200 5199 5201 if (rqdpc) { 5200 5202 ring->rx_stats.drops += rqdpc;
+2 -2
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 389 389 adapter->ptp_tx_skb = NULL; 390 390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 391 391 adapter->tx_hwtstamp_timeouts++; 392 - dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); 392 + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); 393 393 return; 394 394 } 395 395 ··· 451 451 rd32(E1000_RXSTMPH); 452 452 adapter->last_rx_ptp_check = jiffies; 453 453 adapter->rx_hwtstamp_cleared++; 454 - dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); 454 + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); 455 455 } 456 456 } 457 457
+2 -19
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 256 256 struct ixgbe_tx_buffer *tx_buffer_info; 257 257 struct ixgbe_rx_buffer *rx_buffer_info; 258 258 }; 259 - unsigned long last_rx_timestamp; 260 259 unsigned long state; 261 260 u8 __iomem *tail; 262 261 dma_addr_t dma; /* phys. address of descriptor ring */ ··· 769 770 unsigned long ptp_tx_start; 770 771 unsigned long last_overflow_check; 771 772 unsigned long last_rx_ptp_check; 773 + unsigned long last_rx_timestamp; 772 774 spinlock_t tmreg_lock; 773 775 struct cyclecounter cc; 774 776 struct timecounter tc; ··· 944 944 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 945 945 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 946 946 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 947 - void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 948 - struct sk_buff *skb); 949 - static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, 950 - union ixgbe_adv_rx_desc *rx_desc, 951 - struct sk_buff *skb) 952 - { 953 - if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) 954 - return; 955 - 956 - __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); 957 - 958 - /* 959 - * Update the last_rx_timestamp timer in order to enable watchdog check 960 - * for error case of latched timestamp on a dropped packet. 961 - */ 962 - rx_ring->last_rx_timestamp = jiffies; 963 - } 964 - 947 + void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); 965 948 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 966 949 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 967 950 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
··· 1195 1195 */ 1196 1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1197 1197 1198 - hw_dbg(hw, "Detected EEPROM page size = %d words.", 1198 + hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1199 1199 hw->eeprom.word_page_size); 1200 1200 out: 1201 1201 return status;
+2 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 1664 1664 1665 1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1666 1666 1667 - ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1667 + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) 1668 + ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); 1668 1669 1669 1670 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1670 1671 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+3 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
··· 536 536 537 537 if (time_out == max_time_out) { 538 538 status = IXGBE_ERR_LINK_SETUP; 539 - hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); 539 + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n"); 540 540 } 541 541 542 542 return status; ··· 745 745 746 746 if (time_out == max_time_out) { 747 747 status = IXGBE_ERR_LINK_SETUP; 748 - hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); 748 + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n"); 749 749 } 750 750 751 751 return status; ··· 1175 1175 status = 0; 1176 1176 } else { 1177 1177 if (hw->allow_unsupported_sfp) { 1178 - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); 1178 + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); 1179 1179 status = 0; 1180 1180 } else { 1181 1181 hw_dbg(hw,
+13 -27
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
··· 435 435 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) 436 436 { 437 437 struct ixgbe_hw *hw = &adapter->hw; 438 - struct ixgbe_ring *rx_ring; 439 438 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 440 439 unsigned long rx_event; 441 - int n; 442 440 443 441 /* if we don't have a valid timestamp in the registers, just update the 444 442 * timeout counter and exit ··· 448 450 449 451 /* determine the most recent watchdog or rx_timestamp event */ 450 452 rx_event = adapter->last_rx_ptp_check; 451 - for (n = 0; n < adapter->num_rx_queues; n++) { 452 - rx_ring = adapter->rx_ring[n]; 453 - if (time_after(rx_ring->last_rx_timestamp, rx_event)) 454 - rx_event = rx_ring->last_rx_timestamp; 455 - } 453 + if (time_after(adapter->last_rx_timestamp, rx_event)) 454 + rx_event = adapter->last_rx_timestamp; 456 455 457 456 /* only need to read the high RXSTMP register to clear the lock */ 458 457 if (time_is_before_jiffies(rx_event + 5*HZ)) { 459 458 IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 460 459 adapter->last_rx_ptp_check = jiffies; 461 460 462 - e_warn(drv, "clearing RX Timestamp hang"); 461 + e_warn(drv, "clearing RX Timestamp hang\n"); 463 462 } 464 463 } 465 464 ··· 512 517 dev_kfree_skb_any(adapter->ptp_tx_skb); 513 518 adapter->ptp_tx_skb = NULL; 514 519 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 515 - e_warn(drv, "clearing Tx Timestamp hang"); 520 + e_warn(drv, "clearing Tx Timestamp hang\n"); 516 521 return; 517 522 } 518 523 ··· 525 530 } 526 531 527 532 /** 528 - * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 529 - * @q_vector: structure containing interrupt and ring information 533 + * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 534 + * @adapter: pointer to adapter struct 530 535 * @skb: particular skb to send timestamp with 531 536 * 532 537 * if the timestamp is valid, we convert it into the timecounter ns 533 538 * value, then store that result into the shhwtstamps structure which 534 539 * is passed up the network stack 535 540 */ 536 - void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 537 - struct sk_buff *skb) 541 + void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) 538 542 { 539 - struct ixgbe_adapter *adapter; 540 - struct ixgbe_hw *hw; 543 + struct ixgbe_hw *hw = &adapter->hw; 541 544 struct skb_shared_hwtstamps *shhwtstamps; 542 545 u64 regval = 0, ns; 543 546 u32 tsyncrxctl; 544 547 unsigned long flags; 545 548 546 - /* we cannot process timestamps on a ring without a q_vector */ 547 - if (!q_vector || !q_vector->adapter) 548 - return; 549 - 550 - adapter = q_vector->adapter; 551 - hw = &adapter->hw; 552 - 553 - /* 554 - * Read the tsyncrxctl register afterwards in order to prevent taking an 555 - * I/O hit on every packet. 556 - */ 557 549 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 558 550 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 559 551 return; ··· 548 566 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 549 567 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 550 568 551 - 552 569 spin_lock_irqsave(&adapter->tmreg_lock, flags); 553 570 ns = timecounter_cyc2time(&adapter->tc, regval); 554 571 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 555 572 556 573 shhwtstamps = skb_hwtstamps(skb); 557 574 shhwtstamps->hwtstamp = ns_to_ktime(ns); 575 + 576 + /* Update the last_rx_timestamp timer in order to enable watchdog check 577 + * for error case of latched timestamp on a dropped packet. 578 + */ 579 + adapter->last_rx_timestamp = jiffies; 558 580 } 559 581 560 582 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+4 -1
drivers/net/ethernet/marvell/mvmdio.c
··· 232 232 clk_prepare_enable(dev->clk); 233 233 234 234 dev->err_interrupt = platform_get_irq(pdev, 0); 235 - if (dev->err_interrupt != -ENXIO) { 235 + if (dev->err_interrupt > 0) { 236 236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt, 237 237 orion_mdio_err_irq, 238 238 IRQF_SHARED, pdev->name, dev); ··· 241 241 242 242 writel(MVMDIO_ERR_INT_SMI_DONE, 243 243 dev->regs + MVMDIO_ERR_INT_MASK); 244 + 245 + } else if (dev->err_interrupt == -EPROBE_DEFER) { 246 + return -EPROBE_DEFER; 244 247 } 245 248 246 249 mutex_init(&dev->lock);
+4 -3
drivers/net/ethernet/mellanox/mlx4/main.c
··· 754 754 has_eth_port = true; 755 755 } 756 756 757 - if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 758 - request_module_nowait(IB_DRV_NAME); 759 757 if (has_eth_port) 760 758 request_module_nowait(EN_DRV_NAME); 759 + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 760 + request_module_nowait(IB_DRV_NAME); 761 761 } 762 762 763 763 /* ··· 2440 2440 * No return code for this call, just warn the user in case of PCI 2441 2441 * express device capabilities are under-satisfied by the bus. 2442 2442 */ 2443 - mlx4_check_pcie_caps(dev); 2443 + if (!mlx4_is_slave(dev)) 2444 + mlx4_check_pcie_caps(dev); 2444 2445 2445 2446 /* In master functions, the communication channel must be initialized 2446 2447 * after obtaining its address from fw */
+20 -15
drivers/net/ethernet/mellanox/mlx4/port.c
··· 1106 1106 } 1107 1107 1108 1108 if (found_ix >= 0) { 1109 + /* Calculate a slave_gid which is the slave number in the gid 1110 + * table and not a globally unique slave number. 1111 + */ 1109 1112 if (found_ix < MLX4_ROCE_PF_GIDS) 1110 1113 slave_gid = 0; 1111 1114 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * ··· 1121 1118 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1122 1119 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1123 1120 1121 + /* Calculate the globally unique slave id */ 1124 1122 if (slave_gid) { 1125 1123 struct mlx4_active_ports exclusive_ports; 1126 1124 struct mlx4_active_ports actv_ports; 1127 1125 struct mlx4_slaves_pport slaves_pport_actv; 1128 1126 unsigned max_port_p_one; 1129 - int num_slaves_before = 1; 1127 + int num_vfs_before = 0; 1128 + int candidate_slave_gid; 1130 1129 1130 + /* Calculate how many VFs are on the previous port, if exists */ 1131 1131 for (i = 1; i < port; i++) { 1132 1132 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1133 - set_bit(i, exclusive_ports.ports); 1133 + set_bit(i - 1, exclusive_ports.ports); 1134 1134 slaves_pport_actv = 1135 1135 mlx4_phys_to_slaves_pport_actv( 1136 1136 dev, &exclusive_ports); 1137 - num_slaves_before += bitmap_weight( 1137 + num_vfs_before += bitmap_weight( 1138 1138 slaves_pport_actv.slaves, 1139 1139 dev->num_vfs + 1); 1140 1140 } 1141 1141 1142 - if (slave_gid < num_slaves_before) { 1143 - bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1144 - set_bit(port - 1, exclusive_ports.ports); 1145 - slaves_pport_actv = 1146 - mlx4_phys_to_slaves_pport_actv( 1147 - dev, &exclusive_ports); 1148 - slave_gid += bitmap_weight( 1149 - slaves_pport_actv.slaves, 1150 - dev->num_vfs + 1) - 1151 - num_slaves_before; 1152 - } 1153 - actv_ports = mlx4_get_active_ports(dev, slave_gid); 1142 + /* candidate_slave_gid isn't necessarily the correct slave, but 1143 + * it has the same number of ports and is assigned to the same 1144 + * ports as the real slave we're looking for. On dual port VF, 1145 + * slave_gid = [single port VFs on port <port>] + 1146 + * [offset of the current slave from the first dual port VF] + 1147 + * 1 (for the PF). 1148 + */ 1149 + candidate_slave_gid = slave_gid + num_vfs_before; 1150 + 1151 + actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); 1154 1152 max_port_p_one = find_first_bit( 1155 1153 actv_ports.ports, dev->caps.num_ports) + 1156 1154 bitmap_weight(actv_ports.ports, 1157 1155 dev->caps.num_ports) + 1; 1158 1156 1157 + /* Calculate the real slave number */ 1159 1158 for (i = 1; i < max_port_p_one; i++) { 1160 1159 if (i == port) 1161 1160 continue;
+23
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 3733 3733 } 3734 3734 } 3735 3735 3736 + static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, 3737 + u8 *gid, enum mlx4_protocol prot) 3738 + { 3739 + int real_port; 3740 + 3741 + if (prot != MLX4_PROT_ETH) 3742 + return 0; 3743 + 3744 + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || 3745 + dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 3746 + real_port = mlx4_slave_convert_port(dev, slave, gid[5]); 3747 + if (real_port < 0) 3748 + return -EINVAL; 3749 + gid[5] = real_port; 3750 + } 3751 + 3752 + return 0; 3753 + } 3754 + 3736 3755 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3737 3756 struct mlx4_vhcr *vhcr, 3738 3757 struct mlx4_cmd_mailbox *inbox, ··· 3787 3768 if (err) 3788 3769 goto ex_detach; 3789 3770 } else { 3771 + err = mlx4_adjust_port(dev, slave, gid, prot); 3772 + if (err) 3773 + goto ex_put; 3774 + 3790 3775 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id); 3791 3776 if (err) 3792 3777 goto ex_put;
+9
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2374 2374 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2375 2375 } 2376 2376 2377 + /* Reset firmware API lock */ 2378 + static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter) 2379 + { 2380 + qlcnic_api_lock(adapter); 2381 + qlcnic_api_unlock(adapter); 2382 + } 2383 + 2384 + 2377 2385 static int 2378 2386 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2379 2387 { ··· 2484 2476 if (qlcnic_82xx_check(adapter)) { 2485 2477 qlcnic_check_vf(adapter, ent); 2486 2478 adapter->portnum = adapter->ahw->pci_func; 2479 + qlcnic_reset_api_lock(adapter); 2487 2480 err = qlcnic_start_firmware(adapter); 2488 2481 if (err) { 2489 2482 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+8 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
··· 1370 1370 1371 1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1372 1372 if (rsp) 1373 - return rsp; 1373 + goto free_cmd; 1374 1374 1375 1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1376 1376 if (rsp) ··· 1425 1425 1426 1426 cleanup_transaction: 1427 1427 qlcnic_sriov_cleanup_transaction(trans); 1428 + 1429 + free_cmd: 1430 + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { 1431 + qlcnic_free_mbx_args(cmd); 1432 + kfree(cmd); 1433 + } 1434 + 1428 1435 return rsp; 1429 1436 } 1430 1437
+2
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
··· 358 358 /* Enable disable checksum offload operations */ 359 359 void (*enable_rx_csum)(void __iomem *ioaddr); 360 360 void (*disable_rx_csum)(void __iomem *ioaddr); 361 + void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num); 362 + void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num); 361 363 }; 362 364 363 365 const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
+22
drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
··· 165 165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); 166 166 } 167 167 168 + static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num) 169 + { 170 + u32 reg_val; 171 + 172 + reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG); 173 + reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); 174 + reg_val |= SXGBE_CORE_RXQ_ENABLE; 175 + writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG); 176 + } 177 + 178 + static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num) 179 + { 180 + u32 reg_val; 181 + 182 + reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG); 183 + reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); 184 + reg_val |= SXGBE_CORE_RXQ_DISABLE; 185 + writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG); 186 + } 187 + 168 188 static void sxgbe_set_eee_mode(void __iomem *ioaddr) 169 189 { 170 190 u32 ctrl; ··· 274 254 .set_eee_pls = sxgbe_set_eee_pls, 275 255 .enable_rx_csum = sxgbe_enable_rx_csum, 276 256 .disable_rx_csum = sxgbe_disable_rx_csum, 257 + .enable_rxqueue = sxgbe_core_enable_rxqueue, 258 + .disable_rxqueue = sxgbe_core_disable_rxqueue, 277 259 }; 278 260 279 261 const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
+9 -2
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
··· 45 45 p->tdes23.tx_rd_des23.first_desc = is_fd; 46 46 p->tdes23.tx_rd_des23.buf1_size = buf1_len; 47 47 48 - p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; 48 + p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len; 49 49 50 50 if (cksum) 51 - p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; 51 + p->tdes23.tx_rd_des23.cksum_ctl = cic_full; 52 52 } 53 53 54 54 /* Set VLAN control information */ ··· 231 231 static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p) 232 232 { 233 233 p->rdes23.rx_rd_des23.own_bit = 1; 234 + } 235 + 236 + /* Set Interrupt on completion bit */ 237 + static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p) 238 + { 239 + p->rdes23.rx_rd_des23.int_on_com = 1; 234 240 } 235 241 236 242 /* Get the receive frame size */ ··· 504 498 .init_rx_desc = sxgbe_init_rx_desc, 505 499 .get_rx_owner = sxgbe_get_rx_owner, 506 500 .set_rx_owner = sxgbe_set_rx_owner, 501 + .set_rx_int_on_com = sxgbe_set_rx_int_on_com, 507 502 .get_rx_frame_len = sxgbe_get_rx_frame_len, 508 503 .get_rx_fd_status = sxgbe_get_rx_fd_status, 509 504 .get_rx_ld_status = sxgbe_get_rx_ld_status,
+20 -22
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
··· 39 39 u32 int_on_com:1; 40 40 /* TDES3 */ 41 41 union { 42 - u32 tcp_payload_len:18; 42 + u16 tcp_payload_len; 43 43 struct { 44 44 u32 total_pkt_len:15; 45 45 u32 reserved1:1; 46 - u32 cksum_ctl:2; 47 - } cksum_pktlen; 46 + } pkt_len; 48 47 } tx_pkt_len; 49 48 50 - u32 tse_bit:1; 51 - u32 tcp_hdr_len:4; 52 - u32 sa_insert_ctl:3; 53 - u32 crc_pad_ctl:2; 54 - u32 last_desc:1; 55 - u32 first_desc:1; 56 - u32 ctxt_bit:1; 57 - u32 own_bit:1; 49 + u16 cksum_ctl:2; 50 + u16 tse_bit:1; 51 + u16 tcp_hdr_len:4; 52 + u16 sa_insert_ctl:3; 53 + u16 crc_pad_ctl:2; 54 + u16 last_desc:1; 55 + u16 first_desc:1; 56 + u16 ctxt_bit:1; 57 + u16 own_bit:1; 58 58 } tx_rd_des23; 59 59 60 60 /* tx write back Desc 2,3 */ ··· 70 70 71 71 struct sxgbe_rx_norm_desc { 72 72 union { 73 - u32 rdes0; /* buf1 address */ 74 - struct { 73 + u64 rdes01; /* buf1 address */ 74 + union { 75 75 u32 out_vlan_tag:16; 76 76 u32 in_vlan_tag:16; 77 - } wb_rx_des0; 78 - } rd_wb_des0; 79 - 80 - union { 81 - u32 rdes1; /* buf2 address or buf1[63:32] */ 82 - u32 rss_hash; /* Write-back RX */ 83 - } rd_wb_des1; 77 + u32 rss_hash; 78 + } rx_wb_des01; 79 + } rdes01; 84 80 85 81 union { 86 82 /* RX Read format Desc 2,3 */ 87 83 struct{ 88 84 /* RDES2 */ 89 - u32 buf2_addr; 85 + u64 buf2_addr:62; 90 86 /* RDES3 */ 91 - u32 buf2_hi_addr:30; 92 87 u32 int_on_com:1; 93 88 u32 own_bit:1; 94 89 } rx_rd_des23; ··· 257 262 258 263 /* Set own bit */ 259 264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); 265 + 266 + /* Set Interrupt on completion bit */ 267 + void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p); 260 268 261 269 /* Get the receive frame size */ 262 270 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
-13
drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
··· 23 23 /* DMA core initialization */ 24 24 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) 25 25 { 26 - int retry_count = 10; 27 26 u32 reg_val; 28 - 29 - /* reset the DMA */ 30 - writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG); 31 - while (retry_count--) { 32 - if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) & 33 - SXGBE_DMA_SOFT_RESET)) 34 - break; 35 - mdelay(10); 36 - } 37 - 38 - if (retry_count < 0) 39 - return -EBUSY; 40 27 41 28 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); 42 29
+31
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 1076 1076 1077 1077 /* Initialize the MAC Core */ 1078 1078 priv->hw->mac->core_init(priv->ioaddr); 1079 + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { 1080 + priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); 1081 + } 1079 1082 1080 1083 /* Request the IRQ lines */ 1081 1084 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, ··· 1456 1453 /* Added memory barrier for RX descriptor modification */ 1457 1454 wmb(); 1458 1455 priv->hw->desc->set_rx_owner(p); 1456 + priv->hw->desc->set_rx_int_on_com(p); 1459 1457 /* Added memory barrier for RX descriptor modification */ 1460 1458 wmb(); 1461 1459 } ··· 2074 2070 return 0; 2075 2071 } 2076 2072 2073 + static int sxgbe_sw_reset(void __iomem *addr) 2074 + { 2075 + int retry_count = 10; 2076 + 2077 + writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG); 2078 + while (retry_count--) { 2079 + if (!(readl(addr + SXGBE_DMA_MODE_REG) & 2080 + SXGBE_DMA_SOFT_RESET)) 2081 + break; 2082 + mdelay(10); 2083 + } 2084 + 2085 + if (retry_count < 0) 2086 + return -EBUSY; 2087 + 2088 + return 0; 2089 + } 2090 + 2077 2091 /** 2078 2092 * sxgbe_drv_probe 2079 2093 * @device: device pointer ··· 2123 2101 sxgbe_set_ethtool_ops(ndev); 2124 2102 priv->plat = plat_dat; 2125 2103 priv->ioaddr = addr; 2104 + 2105 + ret = sxgbe_sw_reset(priv->ioaddr); 2106 + if (ret) 2107 + goto error_free_netdev; 2126 2108 2127 2109 /* Verify driver arguments */ 2128 2110 sxgbe_verify_args(); ··· 2244 2218 int sxgbe_drv_remove(struct net_device *ndev) 2245 2219 { 2246 2220 struct sxgbe_priv_data *priv = netdev_priv(ndev); 2221 + u8 queue_num; 2247 2222 2248 2223 netdev_info(ndev, "%s: removing driver\n", __func__); 2224 + 2225 + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { 2226 + priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); 2227 + } 2249 2228 2250 2229 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); 2251 2230 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+12 -2
drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
··· 27 27 #define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ 28 28 #define SXGBE_SMA_READ_CMD 0x03 /* read command */ 29 29 #define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ 30 - #define SXGBE_MII_BUSY 0x00800000 /* mii busy */ 30 + #define SXGBE_MII_BUSY 0x00400000 /* mii busy */ 31 31 32 32 static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) 33 33 { ··· 147 147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; 148 148 int err, phy_addr; 149 149 int *irqlist; 150 + bool phy_found = false; 150 151 bool act; 151 152 152 153 /* allocate the new mdio bus */ ··· 163 162 irqlist = priv->mii_irq; 164 163 165 164 /* assign mii bus fields */ 166 - mdio_bus->name = "samsxgbe"; 165 + mdio_bus->name = "sxgbe"; 167 166 mdio_bus->read = &sxgbe_mdio_read; 168 167 mdio_bus->write = &sxgbe_mdio_write; 169 168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", ··· 217 216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", 218 217 phy->phy_id, phy_addr, irq_str, 219 218 dev_name(&phy->dev), act ? " active" : ""); 219 + phy_found = true; 220 220 } 221 + } 222 + 223 + if (!phy_found) { 224 + netdev_err(ndev, "PHY not found\n"); 225 + goto phyfound_err; 221 226 } 222 227 223 228 priv->mii = mdio_bus; 224 229 225 230 return 0; 226 231 232 + phyfound_err: 233 + err = -ENODEV; 234 + mdiobus_unregister(mdio_bus); 227 235 mdiobus_err: 228 236 mdiobus_free(mdio_bus); 229 237 return err;
+4
drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
··· 52 52 #define SXGBE_CORE_RX_CTL2_REG 0x00A8 53 53 #define SXGBE_CORE_RX_CTL3_REG 0x00AC 54 54 55 + #define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003 56 + #define SXGBE_CORE_RXQ_ENABLE 0x0002 57 + #define SXGBE_CORE_RXQ_DISABLE 0x0000 58 + 55 59 /* Interrupt Registers */ 56 60 #define SXGBE_CORE_INT_STATUS_REG 0x00B0 57 61 #define SXGBE_CORE_INT_ENABLE_REG 0x00B4
+13 -12
drivers/net/ethernet/smsc/smc91x.c
··· 147 147 */ 148 148 #define MII_DELAY 1 149 149 150 - #if SMC_DEBUG > 0 151 - #define DBG(n, dev, args...) \ 152 - do { \ 153 - if (SMC_DEBUG >= (n)) \ 154 - netdev_dbg(dev, args); \ 150 + #define DBG(n, dev, fmt, ...) \ 151 + do { \ 152 + if (SMC_DEBUG >= (n)) \ 153 + netdev_dbg(dev, fmt, ##__VA_ARGS__); \ 155 154 } while (0) 156 155 157 - #define PRINTK(dev, args...) netdev_info(dev, args) 158 - #else 159 - #define DBG(n, dev, args...) do { } while (0) 160 - #define PRINTK(dev, args...) netdev_dbg(dev, args) 161 - #endif 156 + #define PRINTK(dev, fmt, ...) \ 157 + do { \ 158 + if (SMC_DEBUG > 0) \ 159 + netdev_info(dev, fmt, ##__VA_ARGS__); \ 160 + else \ 161 + netdev_dbg(dev, fmt, ##__VA_ARGS__); \ 162 + } while (0) 162 163 163 164 #if SMC_DEBUG > 3 164 165 static void PRINT_PKT(u_char *buf, int length) ··· 192 191 pr_cont("\n"); 193 192 } 194 193 #else 195 - #define PRINT_PKT(x...) do { } while (0) 194 + static inline void PRINT_PKT(u_char *buf, int length) { } 196 195 #endif 197 196 198 197 ··· 1782 1781 int timeout = 20; 1783 1782 unsigned long cookie; 1784 1783 1785 - DBG(2, dev, "%s: %s\n", CARDNAME, __func__); 1784 + DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__); 1786 1785 1787 1786 cookie = probe_irq_on(); 1788 1787
+4
drivers/net/hyperv/netvsc_drv.c
··· 382 382 if (skb_is_gso(skb)) 383 383 goto do_lso; 384 384 385 + if ((skb->ip_summed == CHECKSUM_NONE) || 386 + (skb->ip_summed == CHECKSUM_UNNECESSARY)) 387 + goto do_send; 388 + 385 389 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 386 390 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 387 391 TCPIP_CHKSUM_PKTINFO);
-3
drivers/net/macvlan.c
··· 263 263 const struct macvlan_dev *vlan = netdev_priv(dev); 264 264 const struct macvlan_port *port = vlan->port; 265 265 const struct macvlan_dev *dest; 266 - __u8 ip_summed = skb->ip_summed; 267 266 268 267 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 269 268 const struct ethhdr *eth = (void *)skb->data; 270 - skb->ip_summed = CHECKSUM_UNNECESSARY; 271 269 272 270 /* send to other bridge ports directly */ 273 271 if (is_multicast_ether_addr(eth->h_dest)) { ··· 283 285 } 284 286 285 287 xmit_world: 286 - skb->ip_summed = ip_summed; 287 288 skb->dev = vlan->lowerdev; 288 289 return dev_queue_xmit(skb); 289 290 }
+9
drivers/net/macvtap.c
··· 322 322 segs = nskb; 323 323 } 324 324 } else { 325 + /* If we receive a partial checksum and the tap side 326 + * doesn't support checksum offload, compute the checksum. 327 + * Note: it doesn't matter which checksum feature to 328 + * check, we either support them all or none. 329 + */ 330 + if (skb->ip_summed == CHECKSUM_PARTIAL && 331 + !(features & NETIF_F_ALL_CSUM) && 332 + skb_checksum_help(skb)) 333 + goto drop; 325 334 skb_queue_tail(&q->sk.sk_receive_queue, skb); 326 335 } 327 336
+3 -3
drivers/net/phy/micrel.c
··· 246 246 if (val1 != -1) 247 247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); 248 248 249 - if (val2 != -1) 249 + if (val2 != -2) 250 250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); 251 251 252 - if (val3 != -1) 252 + if (val3 != -3) 253 253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); 254 254 255 - if (val4 != -1) 255 + if (val4 != -4) 256 256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); 257 257 258 258 return kszphy_extended_write(phydev, reg, newval);
+11
drivers/net/phy/phy.c
··· 765 765 break; 766 766 767 767 if (phydev->link) { 768 + if (AUTONEG_ENABLE == phydev->autoneg) { 769 + err = phy_aneg_done(phydev); 770 + if (err < 0) 771 + break; 772 + 773 + if (!err) { 774 + phydev->state = PHY_AN; 775 + phydev->link_timeout = PHY_AN_TIMEOUT; 776 + break; 777 + } 778 + } 768 779 phydev->state = PHY_RUNNING; 769 780 netif_carrier_on(phydev->attached_dev); 770 781 phydev->adjust_link(phydev->attached_dev);
+3 -3
drivers/net/slip/slip.c
··· 429 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 430 430 return; 431 431 432 - spin_lock(&sl->lock); 432 + spin_lock_bh(&sl->lock); 433 433 if (sl->xleft <= 0) { 434 434 /* Now serial buffer is almost free & we can start 435 435 * transmission of another packet */ 436 436 sl->dev->stats.tx_packets++; 437 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 438 - spin_unlock(&sl->lock); 438 + spin_unlock_bh(&sl->lock); 439 439 sl_unlock(sl); 440 440 return; 441 441 } ··· 443 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 444 444 sl->xleft -= actual; 445 445 sl->xhead += actual; 446 - spin_unlock(&sl->lock); 446 + spin_unlock_bh(&sl->lock); 447 447 } 448 448 449 449 static void sl_tx_timeout(struct net_device *dev)
+2
drivers/net/team/team.c
··· 2834 2834 case NETDEV_UP: 2835 2835 if (netif_carrier_ok(dev)) 2836 2836 team_port_change_check(port, true); 2837 + break; 2837 2838 case NETDEV_DOWN: 2838 2839 team_port_change_check(port, false); 2840 + break; 2839 2841 case NETDEV_CHANGE: 2840 2842 if (netif_running(port->dev)) 2841 2843 team_port_change_check(port,
+1 -1
drivers/net/usb/cdc_ncm.c
··· 785 785 skb_out->len > CDC_NCM_MIN_TX_PKT) 786 786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 787 787 ctx->tx_max - skb_out->len); 788 - else if ((skb_out->len % dev->maxpacket) == 0) 788 + else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) 789 789 *skb_put(skb_out, 1) = 0; /* force short packet */ 790 790 791 791 /* set final frame length */
+28
drivers/net/usb/qmi_wwan.c
··· 669 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 670 670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 671 671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 672 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 673 + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 674 + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ 675 + {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */ 676 + {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */ 677 + {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */ 678 + {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */ 679 + {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */ 680 + {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */ 681 + {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */ 682 + {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */ 683 + {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */ 684 + {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */ 685 + {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */ 686 + {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */ 687 + {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */ 672 688 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 673 689 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 674 690 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, ··· 746 730 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 747 731 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 748 732 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 733 + {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ 734 + {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ 735 + {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */ 749 736 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 737 + {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 738 + {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 750 739 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 751 740 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 741 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 752 742 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 753 743 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 754 744 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 755 745 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 756 746 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 747 + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ 757 748 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 758 749 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 750 + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 751 + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 752 + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 753 + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 754 + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 759 755 760 756 /* 4. Gobi 1000 devices */ 761 757 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+1 -1
drivers/net/virtio_net.c
··· 1285 1285 if (channels->rx_count || channels->tx_count || channels->other_count) 1286 1286 return -EINVAL; 1287 1287 1288 - if (queue_pairs > vi->max_queue_pairs) 1288 + if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1289 1289 return -EINVAL; 1290 1290 1291 1291 get_online_cpus();
+21 -17
drivers/net/vxlan.c
··· 389 389 + nla_total_size(sizeof(struct nda_cacheinfo)); 390 390 } 391 391 392 - static void vxlan_fdb_notify(struct vxlan_dev *vxlan, 393 - struct vxlan_fdb *fdb, int type) 392 + static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 393 + struct vxlan_rdst *rd, int type) 394 394 { 395 395 struct net *net = dev_net(vxlan->dev); 396 396 struct sk_buff *skb; ··· 400 400 if (skb == NULL) 401 401 goto errout; 402 402 403 - err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, 404 - first_remote_rtnl(fdb)); 403 + err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 405 404 if (err < 0) { 406 405 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 407 406 WARN_ON(err == -EMSGSIZE); ··· 426 427 .remote_vni = VXLAN_N_VID, 427 428 }; 428 429 429 - INIT_LIST_HEAD(&f.remotes); 430 - list_add_rcu(&remote.list, &f.remotes); 431 - 432 - vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 430 + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 433 431 } 434 432 435 433 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) ··· 434 438 struct vxlan_fdb f = { 435 439 .state = NUD_STALE, 436 440 }; 441 + struct vxlan_rdst remote = { }; 437 442 438 - INIT_LIST_HEAD(&f.remotes); 439 443 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 440 444 441 - vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 445 + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); 442 446 } 443 447 444 448 /* Hash Ethernet address */ ··· 529 533 530 534 /* Add/update destinations for multicast */ 531 535 static int vxlan_fdb_append(struct vxlan_fdb *f, 532 - union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) 536 + union vxlan_addr *ip, __be16 port, __u32 vni, 537 + __u32 ifindex, struct vxlan_rdst **rdp) 533 538 { 534 539 struct vxlan_rdst *rd; 535 540 ··· 548 551 549 552 list_add_tail_rcu(&rd->list, &f->remotes); 550 553 554 + *rdp = rd; 551 555 return 1; 552 556 } 553 557 ··· 688 690 __be16 port, __u32 vni, __u32 ifindex, 689 691 __u8 ndm_flags) 690 692 { 693 + struct vxlan_rdst *rd = NULL; 691 694 struct vxlan_fdb *f; 692 695 int notify = 0; 693 696 ··· 725 726 if ((flags & NLM_F_APPEND) && 726 727 (is_multicast_ether_addr(f->eth_addr) || 727 728 is_zero_ether_addr(f->eth_addr))) { 728 - int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); 729 + int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, 730 + &rd); 729 731 730 732 if (rc < 0) 731 733 return rc; ··· 756 756 INIT_LIST_HEAD(&f->remotes); 757 757 memcpy(f->eth_addr, mac, ETH_ALEN); 758 758 759 - vxlan_fdb_append(f, ip, port, vni, ifindex); 759 + vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 760 760 761 761 ++vxlan->addrcnt; 762 762 hlist_add_head_rcu(&f->hlist, 763 763 vxlan_fdb_head(vxlan, mac)); 764 764 } 765 765 766 - if (notify) 767 - vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 766 + if (notify) { 767 + if (rd == NULL) 768 + rd = first_remote_rtnl(f); 769 + vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); 770 + } 768 771 769 772 return 0; 770 773 } ··· 788 785 "delete %pM\n", f->eth_addr); 789 786 790 787 --vxlan->addrcnt; 791 - vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 788 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 792 789 793 790 hlist_del_rcu(&f->hlist); 794 791 call_rcu(&f->rcu, vxlan_fdb_free); ··· 922 919 */ 923 920 if (rd && !list_is_singular(&f->remotes)) { 924 921 list_del_rcu(&rd->list); 922 + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 925 923 kfree_rcu(rd, rcu); 926 924 goto out; 927 925 } ··· 997 993 998 994 rdst->remote_ip = *src_ip; 999 995 f->updated = jiffies; 1000 - vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 996 + vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); 1001 997 } else { 1002 998 /* learned new entry */ 1003 999 spin_lock(&vxlan->hash_lock);
-4
drivers/net/wireless/ath/ath9k/ahb.c
··· 86 86 int irq; 87 87 int ret = 0; 88 88 struct ath_hw *ah; 89 - struct ath_common *common; 90 89 char hw_name[64]; 91 90 92 91 if (!dev_get_platdata(&pdev->dev)) { ··· 145 146 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 146 147 hw_name, (unsigned long)mem, irq); 147 148 148 - common = ath9k_hw_common(sc->sc_ah); 149 - /* Will be cleared in ath9k_start() */ 150 - set_bit(ATH_OP_INVALID, &common->op_flags); 151 149 return 0; 152 150 153 151 err_irq:
+6
drivers/net/wireless/ath/ath9k/ani.c
··· 155 155 ATH9K_ANI_RSSI_THR_LOW, 156 156 ATH9K_ANI_RSSI_THR_HIGH); 157 157 158 + if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL) 159 + immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; 160 + 158 161 if (!scan) 159 162 aniState->ofdmNoiseImmunityLevel = immunityLevel; 160 163 ··· 237 234 aniState->cckNoiseImmunityLevel, immunityLevel, 238 235 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW, 239 236 ATH9K_ANI_RSSI_THR_HIGH); 237 + 238 + if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL) 239 + immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; 240 240 241 241 if (ah->opmode == NL80211_IFTYPE_STATION && 242 242 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
-1
drivers/net/wireless/ath/ath9k/ath9k.h
··· 251 251 252 252 s8 bar_index; 253 253 bool sched; 254 - bool paused; 255 254 bool active; 256 255 }; 257 256
+2 -3
drivers/net/wireless/ath/ath9k/debug_sta.c
··· 72 72 ath_txq_lock(sc, txq); 73 73 if (tid->active) { 74 74 len += scnprintf(buf + len, size - len, 75 - "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n", 75 + "%3d%11d%10d%10d%10d%10d%9d%6d\n", 76 76 tid->tidno, 77 77 tid->seq_start, 78 78 tid->seq_next, ··· 80 80 tid->baw_head, 81 81 tid->baw_tail, 82 82 tid->bar_index, 83 - tid->sched, 84 - tid->paused); 83 + tid->sched); 85 84 } 86 85 ath_txq_unlock(sc, txq); 87 86 }
+3
drivers/net/wireless/ath/ath9k/init.c
··· 783 783 common = ath9k_hw_common(ah); 784 784 ath9k_set_hw_capab(sc, hw); 785 785 786 + /* Will be cleared in ath9k_start() */ 787 + set_bit(ATH_OP_INVALID, &common->op_flags); 788 + 786 789 /* Initialize regulatory */ 787 790 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 788 791 ath9k_reg_notifier);
-5
drivers/net/wireless/ath/ath9k/pci.c
··· 784 784 { 785 785 struct ath_softc *sc; 786 786 struct ieee80211_hw *hw; 787 - struct ath_common *common; 788 787 u8 csz; 789 788 u32 val; 790 789 int ret = 0; ··· 875 876 ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); 876 877 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 877 878 hw_name, (unsigned long)sc->mem, pdev->irq); 878 - 879 - /* Will be cleared in ath9k_start() */ 880 - common = ath9k_hw_common(sc->sc_ah); 881 - set_bit(ATH_OP_INVALID, &common->op_flags); 882 879 883 880 return 0; 884 881
+6 -3
drivers/net/wireless/ath/ath9k/recv.c
··· 975 975 u64 tsf = 0; 976 976 unsigned long flags; 977 977 dma_addr_t new_buf_addr; 978 + unsigned int budget = 512; 978 979 979 980 if (edma) 980 981 dma_type = DMA_BIDIRECTIONAL; ··· 1114 1113 } 1115 1114 requeue: 1116 1115 list_add_tail(&bf->list, &sc->rx.rxbuf); 1117 - if (flush) 1118 - continue; 1119 1116 1120 1117 if (edma) { 1121 1118 ath_rx_edma_buf_link(sc, qtype); 1122 1119 } else { 1123 1120 ath_rx_buf_relink(sc, bf); 1124 - ath9k_hw_rxena(ah); 1121 + if (!flush) 1122 + ath9k_hw_rxena(ah); 1125 1123 } 1124 + 1125 + if (!budget--) 1126 + break; 1126 1127 } while (1); 1127 1128 1128 1129 if (!(ah->imask & ATH9K_INT_RXEOL)) {
+1 -13
drivers/net/wireless/ath/ath9k/xmit.c
··· 107 107 { 108 108 struct ath_atx_ac *ac = tid->ac; 109 109 110 - if (tid->paused) 111 - return; 112 - 113 110 if (tid->sched) 114 111 return; 115 112 ··· 1404 1407 ath_tx_tid_change_state(sc, txtid); 1405 1408 1406 1409 txtid->active = true; 1407 - txtid->paused = true; 1408 1410 *ssn = txtid->seq_start = txtid->seq_next; 1409 1411 txtid->bar_index = -1; 1410 1412 ··· 1423 1427 1424 1428 ath_txq_lock(sc, txq); 1425 1429 txtid->active = false; 1426 - txtid->paused = false; 1427 1430 ath_tx_flush_tid(sc, txtid); 1428 1431 ath_tx_tid_change_state(sc, txtid); 1429 1432 ath_txq_unlock_complete(sc, txq); ··· 1482 1487 ath_txq_lock(sc, txq); 1483 1488 ac->clear_ps_filter = true; 1484 1489 1485 - if (!tid->paused && ath_tid_has_buffered(tid)) { 1490 + if (ath_tid_has_buffered(tid)) { 1486 1491 ath_tx_queue_tid(txq, tid); 1487 1492 ath_txq_schedule(sc, txq); 1488 1493 } ··· 1505 1510 ath_txq_lock(sc, txq); 1506 1511 1507 1512 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1508 - tid->paused = false; 1509 1513 1510 1514 if (ath_tid_has_buffered(tid)) { 1511 1515 ath_tx_queue_tid(txq, tid); ··· 1538 1544 continue; 1539 1545 1540 1546 tid = ATH_AN_2_TID(an, i); 1541 - if (tid->paused) 1542 - continue; 1543 1547 1544 1548 ath_txq_lock(sc, tid->ac->txq); 1545 1549 while (nframes > 0) { ··· 1835 1843 list); 1836 1844 list_del(&tid->list); 1837 1845 tid->sched = false; 1838 - 1839 - if (tid->paused) 1840 - continue; 1841 1846 1842 1847 if (ath_tx_sched_aggr(sc, txq, tid, &stop)) 1843 1848 sent = true; ··· 2687 2698 tid->baw_size = WME_MAX_BA; 2688 2699 tid->baw_head = tid->baw_tail = 0; 2689 2700 tid->sched = false; 2690 - tid->paused = false; 2691 2701 tid->active = false; 2692 2702 __skb_queue_head_init(&tid->buf_q); 2693 2703 __skb_queue_head_init(&tid->retry_q);
+3 -2
drivers/net/wireless/brcm80211/brcmfmac/chip.c
··· 303 303 304 304 ci = core->chip; 305 305 306 - /* if core is already in reset, just return */ 306 + /* if core is already in reset, skip reset */ 307 307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 308 308 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 309 - return; 309 + goto in_reset_configure; 310 310 311 311 /* configure reset */ 312 312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, ··· 322 322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 323 323 BCMA_RESET_CTL_RESET, 300); 324 324 325 + in_reset_configure: 325 326 /* in-reset configure */ 326 327 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 327 328 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+12 -10
drivers/net/wireless/rt2x00/rt2x00mac.c
··· 621 621 bss_conf->bssid); 622 622 623 623 /* 624 - * Update the beacon. This is only required on USB devices. PCI 625 - * devices fetch beacons periodically. 626 - */ 627 - if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev)) 628 - rt2x00queue_update_beacon(rt2x00dev, vif); 629 - 630 - /* 631 624 * Start/stop beaconing. 632 625 */ 633 626 if (changes & BSS_CHANGED_BEACON_ENABLED) { 634 627 if (!bss_conf->enable_beacon && intf->enable_beacon) { 635 - rt2x00queue_clear_beacon(rt2x00dev, vif); 636 628 rt2x00dev->intf_beaconing--; 637 629 intf->enable_beacon = false; 630 + /* 631 + * Clear beacon in the H/W for this vif. This is needed 632 + * to disable beaconing on this particular interface 633 + * and keep it running on other interfaces. 634 + */ 635 + rt2x00queue_clear_beacon(rt2x00dev, vif); 638 636 639 637 if (rt2x00dev->intf_beaconing == 0) { 640 638 /* ··· 643 645 rt2x00queue_stop_queue(rt2x00dev->bcn); 644 646 mutex_unlock(&intf->beacon_skb_mutex); 645 647 } 646 - 647 - 648 648 } else if (bss_conf->enable_beacon && !intf->enable_beacon) { 649 649 rt2x00dev->intf_beaconing++; 650 650 intf->enable_beacon = true; 651 + /* 652 + * Upload beacon to the H/W. This is only required on 653 + * USB devices. PCI devices fetch beacons periodically. 654 + */ 655 + if (rt2x00_is_usb(rt2x00dev)) 656 + rt2x00queue_update_beacon(rt2x00dev, vif); 651 657 652 658 if (rt2x00dev->intf_beaconing == 1) { 653 659 /*
+1 -1
drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
··· 293 293 u8 *psaddr; 294 294 __le16 fc; 295 295 u16 type, ufc; 296 - bool match_bssid, packet_toself, packet_beacon, addr; 296 + bool match_bssid, packet_toself, packet_beacon = false, addr; 297 297 298 298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; 299 299
+1 -1
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
··· 1001 1001 err = _rtl92cu_init_mac(hw); 1002 1002 if (err) { 1003 1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); 1004 - return err; 1004 + goto exit; 1005 1005 } 1006 1006 err = rtl92c_download_fw(hw); 1007 1007 if (err) {
+6
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
··· 49 49 if (ieee80211_is_nullfunc(fc)) 50 50 return QSLT_HIGH; 51 51 52 + /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use 53 + * queue V0 at priority 7; however, the RTL8192SE appears to have 54 + * that queue at priority 6 55 + */ 56 + if (skb->priority == 7) 57 + return QSLT_VO; 52 58 return skb->priority; 53 59 } 54 60
+27 -1
drivers/of/irq.c
··· 364 364 365 365 memset(r, 0, sizeof(*r)); 366 366 /* 367 - * Get optional "interrupts-names" property to add a name 367 + * Get optional "interrupt-names" property to add a name 368 368 * to the resource. 369 369 */ 370 370 of_property_read_string_index(dev, "interrupt-names", index, ··· 378 378 return irq; 379 379 } 380 380 EXPORT_SYMBOL_GPL(of_irq_to_resource); 381 + 382 + /** 383 + * of_irq_get - Decode a node's IRQ and return it as a Linux irq number 384 + * @dev: pointer to device tree node 385 + * @index: zero-based index of the irq 386 + * 387 + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain 388 + * is not yet created. 389 + * 390 + */ 391 + int of_irq_get(struct device_node *dev, int index) 392 + { 393 + int rc; 394 + struct of_phandle_args oirq; 395 + struct irq_domain *domain; 396 + 397 + rc = of_irq_parse_one(dev, index, &oirq); 398 + if (rc) 399 + return rc; 400 + 401 + domain = irq_find_host(oirq.np); 402 + if (!domain) 403 + return -EPROBE_DEFER; 404 + 405 + return irq_create_of_mapping(&oirq); 406 + } 381 407 382 408 /** 383 409 * of_irq_count - Count the number of IRQs a node uses
+3 -1
drivers/of/platform.c
··· 168 168 rc = of_address_to_resource(np, i, res); 169 169 WARN_ON(rc); 170 170 } 171 - WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); 171 + if (of_irq_to_resource_table(np, res, num_irq) != num_irq) 172 + pr_debug("not all legacy IRQ resources mapped for %s\n", 173 + np->name); 172 174 } 173 175 174 176 dev->dev.of_node = of_node_get(np);
+32
drivers/of/selftest.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/of.h> 12 12 #include <linux/of_irq.h> 13 + #include <linux/of_platform.h> 13 14 #include <linux/list.h> 14 15 #include <linux/mutex.h> 15 16 #include <linux/slab.h> ··· 428 427 } 429 428 } 430 429 430 + static void __init of_selftest_platform_populate(void) 431 + { 432 + int irq; 433 + struct device_node *np; 434 + struct platform_device *pdev; 435 + 436 + np = of_find_node_by_path("/testcase-data"); 437 + of_platform_populate(np, of_default_bus_match_table, NULL, NULL); 438 + 439 + /* Test that a missing irq domain returns -EPROBE_DEFER */ 440 + np = of_find_node_by_path("/testcase-data/testcase-device1"); 441 + pdev = of_find_device_by_node(np); 442 + if (!pdev) 443 + selftest(0, "device 1 creation failed\n"); 444 + irq = platform_get_irq(pdev, 0); 445 + if (irq != -EPROBE_DEFER) 446 + selftest(0, "device deferred probe failed - %d\n", irq); 447 + 448 + /* Test that a parsing failure does not return -EPROBE_DEFER */ 449 + np = of_find_node_by_path("/testcase-data/testcase-device2"); 450 + pdev = of_find_device_by_node(np); 451 + if (!pdev) 452 + selftest(0, "device 2 creation failed\n"); 453 + irq = platform_get_irq(pdev, 0); 454 + if (irq >= 0 || irq == -EPROBE_DEFER) 455 + selftest(0, "device parsing error failed - %d\n", irq); 456 + 457 + selftest(1, "passed"); 458 + } 459 + 431 460 static int __init of_selftest(void) 432 461 { 433 462 struct device_node *np; ··· 476 445 of_selftest_parse_interrupts(); 477 446 of_selftest_parse_interrupts_extended(); 478 447 of_selftest_match_node(); 448 + of_selftest_platform_populate(); 479 449 pr_info("end of selftest - %i passed, %i failed\n", 480 450 selftest_results.passed, selftest_results.failed); 481 451 return 0;
+13
drivers/of/testcase-data/tests-interrupts.dtsi
··· 54 54 <&test_intmap1 1 2>; 55 55 }; 56 56 }; 57 + 58 + testcase-device1 { 59 + compatible = "testcase-device"; 60 + interrupt-parent = <&test_intc0>; 61 + interrupts = <1>; 62 + }; 63 + 64 + testcase-device2 { 65 + compatible = "testcase-device"; 66 + interrupt-parent = <&test_intc2>; 67 + interrupts = <1>; /* invalid specifier - too short */ 68 + }; 57 69 }; 70 + 58 71 };
+11 -6
drivers/pinctrl/pinctrl-as3722.c
··· 64 64 }; 65 65 66 66 struct as3722_gpio_pin_control { 67 - bool enable_gpio_invert; 68 67 unsigned mode_prop; 69 68 int io_function; 70 69 }; ··· 319 320 return mode; 320 321 } 321 322 322 - if (as_pci->gpio_control[offset].enable_gpio_invert) 323 - mode |= AS3722_GPIO_INV; 324 - 325 - return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode); 323 + return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset), 324 + AS3722_GPIO_MODE_MASK, mode); 326 325 } 327 326 328 327 static const struct pinmux_ops as3722_pinmux_ops = { ··· 493 496 { 494 497 struct as3722_pctrl_info *as_pci = to_as_pci(chip); 495 498 struct as3722 *as3722 = as_pci->as3722; 496 - int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; 499 + int en_invert; 497 500 u32 val; 498 501 int ret; 502 + 503 + ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val); 504 + if (ret < 0) { 505 + dev_err(as_pci->dev, 506 + "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret); 507 + return; 508 + } 509 + en_invert = !!(val & AS3722_GPIO_INV); 499 510 500 511 if (value) 501 512 val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
+13
drivers/pinctrl/pinctrl-single.c
··· 810 810 static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, 811 811 unsigned pin_pos) 812 812 { 813 + struct pcs_soc_data *pcs_soc = &pcs->socdata; 813 814 struct pinctrl_pin_desc *pin; 814 815 struct pcs_name *pn; 815 816 int i; ··· 820 819 dev_err(pcs->dev, "too many pins, max %i\n", 821 820 pcs->desc.npins); 822 821 return -ENOMEM; 822 + } 823 + 824 + if (pcs_soc->irq_enable_mask) { 825 + unsigned val; 826 + 827 + val = pcs->read(pcs->base + offset); 828 + if (val & pcs_soc->irq_enable_mask) { 829 + dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n", 830 + (unsigned long)pcs->res->start + offset, val); 831 + val &= ~pcs_soc->irq_enable_mask; 832 + pcs->write(val, pcs->base + offset); 833 + } 823 834 } 824 835 825 836 pin = &pcs->pins.pa[i];
+1 -2
drivers/pinctrl/pinctrl-tb10x.c
··· 629 629 */ 630 630 for (i = 0; i < state->pinfuncgrpcnt; i++) { 631 631 const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; 632 - unsigned int port = pfg->port; 633 632 unsigned int mode = pfg->mode; 634 - int j; 633 + int j, port = pfg->port; 635 634 636 635 /* 637 636 * Skip pin groups which are always mapped and don't need
+1 -2
drivers/pinctrl/sh-pfc/pfc-r8a7790.c
··· 4794 4794 FN_MSIOF0_SCK_B, 0, 4795 4795 /* IP5_23_21 [3] */ 4796 4796 FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, 4797 - FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, 4798 - FN_IERX_C, 0, 4797 + FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C, 4799 4798 /* IP5_20_18 [3] */ 4800 4799 FN_WE0_N, FN_IECLK, FN_CAN_CLK, 4801 4800 FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
+1 -1
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
··· 5288 5288 /* SEL_SCIF3 [2] */ 5289 5289 FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, 5290 5290 /* SEL_IEB [2] */ 5291 - FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 5291 + FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0, 5292 5292 /* SEL_MMC [1] */ 5293 5293 FN_SEL_MMC_0, FN_SEL_MMC_1, 5294 5294 /* SEL_SCIF5 [1] */
+26 -18
drivers/pnp/pnpacpi/core.c
··· 83 83 { 84 84 struct acpi_device *acpi_dev; 85 85 acpi_handle handle; 86 - struct acpi_buffer buffer; 87 - int ret; 86 + int ret = 0; 88 87 89 88 pnp_dbg(&dev->dev, "set resources\n"); 90 89 ··· 96 97 if (WARN_ON_ONCE(acpi_dev != dev->data)) 97 98 dev->data = acpi_dev; 98 99 99 - ret = pnpacpi_build_resource_template(dev, &buffer); 100 - if (ret) 101 - return ret; 102 - ret = pnpacpi_encode_resources(dev, &buffer); 103 - if (ret) { 100 + if (acpi_has_method(handle, METHOD_NAME__SRS)) { 101 + struct acpi_buffer buffer; 102 + 103 + ret = pnpacpi_build_resource_template(dev, &buffer); 104 + if (ret) 105 + return ret; 106 + 107 + ret = pnpacpi_encode_resources(dev, &buffer); 108 + if (!ret) { 109 + acpi_status status; 110 + 111 + status = acpi_set_current_resources(handle, &buffer); 112 + if (ACPI_FAILURE(status)) 113 + ret = -EIO; 114 + } 104 115 kfree(buffer.pointer); 105 - return ret; 106 116 } 107 - if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) 108 - ret = -EINVAL; 109 - else if (acpi_bus_power_manageable(handle)) 117 + if (!ret && acpi_bus_power_manageable(handle)) 110 118 ret = acpi_bus_set_power(handle, ACPI_STATE_D0); 111 - kfree(buffer.pointer); 119 + 112 120 return ret; 113 121 } 114 122 ··· 123 117 { 124 118 struct acpi_device *acpi_dev; 125 119 acpi_handle handle; 126 - int ret; 120 + acpi_status status; 127 121 128 122 dev_dbg(&dev->dev, "disable resources\n"); 129 123 ··· 134 128 } 135 129 136 130 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ 137 - ret = 0; 138 131 if (acpi_bus_power_manageable(handle)) 139 132 acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); 140 - /* continue even if acpi_bus_set_power() fails */ 141 - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) 142 - ret = -ENODEV; 143 - return ret; 133 + 134 + /* continue even if acpi_bus_set_power() fails */ 135 + status = acpi_evaluate_object(handle, "_DIS", NULL, NULL); 136 + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) 137 + return -ENODEV; 138 + 139 + return 0; 144 140 } 145 141 146 142 #ifdef CONFIG_ACPI_SLEEP
+1 -1
drivers/pnp/pnpbios/bioscalls.c
··· 37 37 * kernel begins at offset 3GB... 38 38 */ 39 39 40 - asmlinkage void pnp_bios_callfunc(void); 40 + asmlinkage __visible void pnp_bios_callfunc(void); 41 41 42 42 __asm__(".text \n" 43 43 __ALIGN_STR "\n"
+2 -2
drivers/pnp/quirks.c
··· 335 335 } 336 336 #endif 337 337 338 - #ifdef CONFIG_X86 338 + #ifdef CONFIG_PCI 339 339 /* Device IDs of parts that have 32KB MCH space */ 340 340 static const unsigned int mch_quirk_devices[] = { 341 341 0x0154, /* Ivy Bridge */ ··· 440 440 #ifdef CONFIG_AMD_NB 441 441 {"PNP0c01", quirk_amd_mmconfig_area}, 442 442 #endif 443 - #ifdef CONFIG_X86 443 + #ifdef CONFIG_PCI 444 444 {"PNP0c02", quirk_intel_mch}, 445 445 #endif 446 446 {""}
+2 -2
drivers/rtc/rtc-pcf8523.c
··· 206 206 tm->tm_hour = bcd2bin(regs[2] & 0x3f); 207 207 tm->tm_mday = bcd2bin(regs[3] & 0x3f); 208 208 tm->tm_wday = regs[4] & 0x7; 209 - tm->tm_mon = bcd2bin(regs[5] & 0x1f); 209 + tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1; 210 210 tm->tm_year = bcd2bin(regs[6]) + 100; 211 211 212 212 return rtc_valid_tm(tm); ··· 229 229 regs[3] = bin2bcd(tm->tm_hour); 230 230 regs[4] = bin2bcd(tm->tm_mday); 231 231 regs[5] = tm->tm_wday; 232 - regs[6] = bin2bcd(tm->tm_mon); 232 + regs[6] = bin2bcd(tm->tm_mon + 1); 233 233 regs[7] = bin2bcd(tm->tm_year - 100); 234 234 235 235 msg.addr = client->addr;
+17 -5
drivers/s390/cio/chsc.c
··· 541 541 542 542 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 543 543 { 544 - do { 544 + static int ntsm_unsupported; 545 + 546 + while (true) { 545 547 memset(sei, 0, sizeof(*sei)); 546 548 sei->request.length = 0x0010; 547 549 sei->request.code = 0x000e; 548 - sei->ntsm = ntsm; 550 + if (!ntsm_unsupported) 551 + sei->ntsm = ntsm; 549 552 550 553 if (chsc(sei)) 551 554 break; 552 555 553 556 if (sei->response.code != 0x0001) { 554 - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 555 - sei->response.code); 557 + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", 558 + sei->response.code, sei->ntsm); 559 + 560 + if (sei->response.code == 3 && sei->ntsm) { 561 + /* Fallback for old firmware. */ 562 + ntsm_unsupported = 1; 563 + continue; 564 + } 556 565 break; 557 566 } 558 567 ··· 577 568 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 578 569 break; 579 570 } 580 - } while (sei->u.nt0_area.flags & 0x80); 571 + 572 + if (!(sei->u.nt0_area.flags & 0x80)) 573 + break; 574 + } 581 575 } 582 576 583 577 /*
-1
drivers/scsi/mpt2sas/mpt2sas_scsih.c
··· 8293 8293 8294 8294 mpt2sas_base_free_resources(ioc); 8295 8295 pci_save_state(pdev); 8296 - pci_disable_device(pdev); 8297 8296 pci_set_power_state(pdev, device_state); 8298 8297 return 0; 8299 8298 }
+1 -1
drivers/scsi/scsi_netlink.c
··· 77 77 goto next_msg; 78 78 } 79 79 80 - if (!capable(CAP_SYS_ADMIN)) { 80 + if (!netlink_capable(skb, CAP_SYS_ADMIN)) { 81 81 err = -EPERM; 82 82 goto next_msg; 83 83 }
+5 -1
drivers/scsi/virtio_scsi.c
··· 750 750 751 751 vscsi->affinity_hint_set = true; 752 752 } else { 753 - for (i = 0; i < vscsi->num_queues; i++) 753 + for (i = 0; i < vscsi->num_queues; i++) { 754 + if (!vscsi->req_vqs[i].vq) 755 + continue; 756 + 754 757 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); 758 + } 755 759 756 760 vscsi->affinity_hint_set = false; 757 761 }
+2 -1
drivers/staging/iio/resolver/ad2s1200.c
··· 107 107 int pn, ret = 0; 108 108 unsigned short *pins = spi->dev.platform_data; 109 109 110 - for (pn = 0; pn < AD2S1200_PN; pn++) 110 + for (pn = 0; pn < AD2S1200_PN; pn++) { 111 111 ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT, 112 112 DRV_NAME); 113 113 if (ret) { ··· 115 115 pins[pn]); 116 116 return ret; 117 117 } 118 + } 118 119 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 119 120 if (!indio_dev) 120 121 return -ENOMEM;
+1 -1
drivers/tty/hvc/hvc_console.c
··· 190 190 return hvc_driver; 191 191 } 192 192 193 - static int __init hvc_console_setup(struct console *co, char *options) 193 + static int hvc_console_setup(struct console *co, char *options) 194 194 { 195 195 if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) 196 196 return -ENODEV;
+4
drivers/tty/n_tty.c
··· 2353 2353 if (tty->ops->flush_chars) 2354 2354 tty->ops->flush_chars(tty); 2355 2355 } else { 2356 + struct n_tty_data *ldata = tty->disc_data; 2357 + 2356 2358 while (nr > 0) { 2359 + mutex_lock(&ldata->output_lock); 2357 2360 c = tty->ops->write(tty, b, nr); 2361 + mutex_unlock(&ldata->output_lock); 2358 2362 if (c < 0) { 2359 2363 retval = c; 2360 2364 goto break_out;
+1 -1
drivers/tty/serial/8250/8250_core.c
··· 555 555 */ 556 556 if ((p->port.type == PORT_XR17V35X) || 557 557 (p->port.type == PORT_XR17D15X)) { 558 - serial_out(p, UART_EXAR_SLEEP, 0xff); 558 + serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0); 559 559 return; 560 560 } 561 561
+14 -15
drivers/tty/tty_buffer.c
··· 255 255 if (change || left < size) { 256 256 /* This is the slow path - looking for new buffers to use */ 257 257 if ((n = tty_buffer_alloc(port, size)) != NULL) { 258 - unsigned long iflags; 259 - 260 258 n->flags = flags; 261 259 buf->tail = n; 262 - 263 - spin_lock_irqsave(&buf->flush_lock, iflags); 264 260 b->commit = b->used; 261 + /* paired w/ barrier in flush_to_ldisc(); ensures the 262 + * latest commit value can be read before the head is 263 + * advanced to the next buffer 264 + */ 265 + smp_wmb(); 265 266 b->next = n; 266 - spin_unlock_irqrestore(&buf->flush_lock, iflags); 267 - 268 267 } else if (change) 269 268 size = 0; 270 269 else ··· 447 448 mutex_lock(&buf->lock); 448 449 449 450 while (1) { 450 - unsigned long flags; 451 451 struct tty_buffer *head = buf->head; 452 + struct tty_buffer *next; 452 453 int count; 453 454 454 455 /* Ldisc or user is trying to gain exclusive access */ 455 456 if (atomic_read(&buf->priority)) 456 457 break; 457 458 458 - spin_lock_irqsave(&buf->flush_lock, flags); 459 + next = head->next; 460 + /* paired w/ barrier in __tty_buffer_request_room(); 461 + * ensures commit value read is not stale if the head 462 + * is advancing to the next buffer 463 + */ 464 + smp_rmb(); 459 465 count = head->commit - head->read; 460 466 if (!count) { 461 - if (head->next == NULL) { 462 - spin_unlock_irqrestore(&buf->flush_lock, flags); 467 + if (next == NULL) 463 468 break; 464 - } 465 - buf->head = head->next; 466 - spin_unlock_irqrestore(&buf->flush_lock, flags); 469 + buf->head = next; 467 470 tty_buffer_free(port, head); 468 471 continue; 469 472 } 470 - spin_unlock_irqrestore(&buf->flush_lock, flags); 471 473 472 474 count = receive_buf(tty, head, count); 473 475 if (!count) ··· 523 523 struct tty_bufhead *buf = &port->buf; 524 524 525 525 mutex_init(&buf->lock); 526 - spin_lock_init(&buf->flush_lock); 527 526 tty_buffer_reset(&buf->sentinel, 0); 528 527 buf->head = &buf->sentinel; 529 528 buf->tail = &buf->sentinel;
-10
drivers/usb/gadget/at91_udc.c
··· 1709 1709 return -ENODEV; 1710 1710 } 1711 1711 1712 - if (pdev->num_resources != 2) { 1713 - DBG("invalid num_resources\n"); 1714 - return -ENODEV; 1715 - } 1716 - if ((pdev->resource[0].flags != IORESOURCE_MEM) 1717 - || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 1718 - DBG("invalid resource type\n"); 1719 - return -ENODEV; 1720 - } 1721 - 1722 1712 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1723 1713 if (!res) 1724 1714 return -ENXIO;
+2 -1
drivers/usb/host/ehci-fsl.c
··· 248 248 break; 249 249 } 250 250 251 - if (pdata->have_sysif_regs && pdata->controller_ver && 251 + if (pdata->have_sysif_regs && 252 + pdata->controller_ver > FSL_USB_VER_1_6 && 252 253 (phy_mode == FSL_USB2_PHY_ULPI)) { 253 254 /* check PHY_CLK_VALID to get phy clk valid */ 254 255 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+18
drivers/usb/host/ohci-hub.c
··· 90 90 dl_done_list (ohci); 91 91 finish_unlinks (ohci, ohci_frame_no(ohci)); 92 92 93 + /* 94 + * Some controllers don't handle "global" suspend properly if 95 + * there are unsuspended ports. For these controllers, put all 96 + * the enabled ports into suspend before suspending the root hub. 97 + */ 98 + if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) { 99 + __hc32 __iomem *portstat = ohci->regs->roothub.portstatus; 100 + int i; 101 + unsigned temp; 102 + 103 + for (i = 0; i < ohci->num_ports; (++i, ++portstat)) { 104 + temp = ohci_readl(ohci, portstat); 105 + if ((temp & (RH_PS_PES | RH_PS_PSS)) == 106 + RH_PS_PES) 107 + ohci_writel(ohci, RH_PS_PSS, portstat); 108 + } 109 + } 110 + 93 111 /* maybe resume can wake root hub */ 94 112 if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) { 95 113 ohci->hc_control |= OHCI_CTRL_RWE;
+1
drivers/usb/host/ohci-pci.c
··· 160 160 ohci_dbg(ohci, "enabled AMD prefetch quirk\n"); 161 161 } 162 162 163 + ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND; 163 164 return 0; 164 165 } 165 166
+2
drivers/usb/host/ohci.h
··· 405 405 #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 406 406 #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ 407 407 #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 408 + #define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */ 409 + 408 410 // there are also chip quirks/bugs in init logic 409 411 410 412 struct work_struct nec_work; /* Worker for NEC quirk */
+5 -4
drivers/usb/phy/phy-fsm-usb.c
··· 303 303 otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE); 304 304 break; 305 305 case OTG_STATE_A_WAIT_VRISE: 306 - if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld || 307 - fsm->a_wait_vrise_tmout) { 306 + if (fsm->a_vbus_vld) 308 307 otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); 309 - } 308 + else if (fsm->id || fsm->a_bus_drop || 309 + fsm->a_wait_vrise_tmout) 310 + otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); 310 311 break; 311 312 case OTG_STATE_A_WAIT_BCON: 312 313 if (!fsm->a_vbus_vld) 313 314 otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); 314 315 else if (fsm->b_conn) 315 316 otg_set_state(fsm, OTG_STATE_A_HOST); 316 - else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout) 317 + else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout) 317 318 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); 318 319 break; 319 320 case OTG_STATE_A_HOST:
+15
drivers/usb/serial/qcserial.c
··· 151 151 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ 152 152 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ 153 153 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ 154 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ 155 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ 156 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ 157 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */ 158 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */ 159 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */ 160 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */ 161 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */ 162 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */ 163 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ 164 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ 165 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ 166 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ 167 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ 168 + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */ 154 169 155 170 { } /* Terminating entry */ 156 171 };
+1 -1
drivers/usb/storage/shuttle_usbat.c
··· 1851 1851 us->transport_name = "Shuttle USBAT"; 1852 1852 us->transport = usbat_flash_transport; 1853 1853 us->transport_reset = usb_stor_CB_reset; 1854 - us->max_lun = 1; 1854 + us->max_lun = 0; 1855 1855 1856 1856 result = usb_stor_probe2(us); 1857 1857 return result;
+14
drivers/usb/storage/unusual_devs.h
··· 234 234 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 235 235 US_FL_MAX_SECTORS_64 ), 236 236 237 + /* Reported by Daniele Forsi <dforsi@gmail.com> */ 238 + UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350, 239 + "Nokia", 240 + "5300", 241 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 242 + US_FL_MAX_SECTORS_64 ), 243 + 244 + /* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */ 245 + UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742, 246 + "Nokia", 247 + "305", 248 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 249 + US_FL_MAX_SECTORS_64), 250 + 237 251 /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */ 238 252 UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110, 239 253 "Nokia",
-2
fs/affs/super.c
··· 340 340 &blocksize,&sbi->s_prefix, 341 341 sbi->s_volume, &mount_flags)) { 342 342 printk(KERN_ERR "AFFS: Error parsing options\n"); 343 - kfree(sbi->s_prefix); 344 - kfree(sbi); 345 343 return -EINVAL; 346 344 } 347 345 /* N.B. after this point s_prefix must be released */
+34 -8
fs/aio.c
··· 112 112 113 113 struct work_struct free_work; 114 114 115 + /* 116 + * signals when all in-flight requests are done 117 + */ 118 + struct completion *requests_done; 119 + 115 120 struct { 116 121 /* 117 122 * This counts the number of available slots in the ringbuffer, ··· 513 508 { 514 509 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 515 510 511 + /* At this point we know that there are no any in-flight requests */ 512 + if (ctx->requests_done) 513 + complete(ctx->requests_done); 514 + 516 515 INIT_WORK(&ctx->free_work, free_ioctx); 517 516 schedule_work(&ctx->free_work); 518 517 } ··· 727 718 * when the processes owning a context have all exited to encourage 728 719 * the rapid destruction of the kioctx. 729 720 */ 730 - static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) 721 + static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 722 + struct completion *requests_done) 731 723 { 732 724 if (!atomic_xchg(&ctx->dead, 1)) { 733 725 struct kioctx_table *table; ··· 757 747 if (ctx->mmap_size) 758 748 vm_munmap(ctx->mmap_base, ctx->mmap_size); 759 749 750 + ctx->requests_done = requests_done; 760 751 percpu_ref_kill(&ctx->users); 752 + } else { 753 + if (requests_done) 754 + complete(requests_done); 761 755 } 762 756 } 763 757 ··· 823 809 */ 824 810 ctx->mmap_size = 0; 825 811 826 - kill_ioctx(mm, ctx); 812 + kill_ioctx(mm, ctx, NULL); 827 813 } 828 814 } 829 815 ··· 1199 1185 if (!IS_ERR(ioctx)) { 1200 1186 ret = put_user(ioctx->user_id, ctxp); 1201 1187 if (ret) 1202 - kill_ioctx(current->mm, ioctx); 1188 + kill_ioctx(current->mm, ioctx, NULL); 1203 1189 percpu_ref_put(&ioctx->users); 1204 1190 } 1205 1191 ··· 1217 1203 { 1218 1204 struct kioctx *ioctx = lookup_ioctx(ctx); 1219 1205 if (likely(NULL != ioctx)) { 1220 - kill_ioctx(current->mm, ioctx); 1206 + struct completion requests_done = 1207 + COMPLETION_INITIALIZER_ONSTACK(requests_done); 1208 + 1209 + /* Pass requests_done to kill_ioctx() where it can be set 1210 + * in a thread-safe way. If we try to set it here then we have 1211 + * a race condition if two io_destroy() called simultaneously. 1212 + */ 1213 + kill_ioctx(current->mm, ioctx, &requests_done); 1221 1214 percpu_ref_put(&ioctx->users); 1215 + 1216 + /* Wait until all IO for the context are done. Otherwise kernel 1217 + * keep using user-space buffers even if user thinks the context 1218 + * is destroyed. 1219 + */ 1220 + wait_for_completion(&requests_done); 1221 + 1222 1222 return 0; 1223 1223 } 1224 1224 pr_debug("EINVAL: io_destroy: invalid context id\n"); ··· 1327 1299 &iovec, compat) 1328 1300 : aio_setup_single_vector(req, rw, buf, &nr_segs, 1329 1301 iovec); 1330 - if (ret) 1331 - return ret; 1332 - 1333 - ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); 1302 + if (!ret) 1303 + ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); 1334 1304 if (ret < 0) { 1335 1305 if (iovec != &inline_vec) 1336 1306 kfree(iovec);
+2 -2
fs/autofs4/root.c
··· 179 179 spin_lock(&active->d_lock); 180 180 181 181 /* Already gone? */ 182 - if (!d_count(active)) 182 + if ((int) d_count(active) <= 0) 183 183 goto next; 184 184 185 185 qstr = &active->d_name; ··· 230 230 231 231 spin_lock(&expiring->d_lock); 232 232 233 - /* Bad luck, we've already been dentry_iput */ 233 + /* We've already been dentry_iput or unlinked */ 234 234 if (!expiring->d_inode) 235 235 goto next; 236 236
+1 -1
fs/ceph/caps.c
··· 3261 3261 rel->seq = cpu_to_le32(cap->seq); 3262 3262 rel->issue_seq = cpu_to_le32(cap->issue_seq), 3263 3263 rel->mseq = cpu_to_le32(cap->mseq); 3264 - rel->caps = cpu_to_le32(cap->issued); 3264 + rel->caps = cpu_to_le32(cap->implemented); 3265 3265 rel->wanted = cpu_to_le32(cap->mds_wanted); 3266 3266 rel->dname_len = 0; 3267 3267 rel->dname_seq = 0;
+18 -15
fs/ceph/dir.c
··· 141 141 142 142 /* start at beginning? */ 143 143 if (ctx->pos == 2 || last == NULL || 144 - ctx->pos < ceph_dentry(last)->offset) { 144 + fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) { 145 145 if (list_empty(&parent->d_subdirs)) 146 146 goto out_unlock; 147 147 p = parent->d_subdirs.prev; ··· 182 182 spin_unlock(&dentry->d_lock); 183 183 spin_unlock(&parent->d_lock); 184 184 185 + /* make sure a dentry wasn't dropped while we didn't have parent lock */ 186 + if (!ceph_dir_is_complete(dir)) { 187 + dout(" lost dir complete on %p; falling back to mds\n", dir); 188 + dput(dentry); 189 + err = -EAGAIN; 190 + goto out; 191 + } 192 + 185 193 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos, 186 194 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 187 - ctx->pos = di->offset; 188 195 if (!dir_emit(ctx, dentry->d_name.name, 189 196 dentry->d_name.len, 190 197 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), ··· 205 198 return 0; 206 199 } 207 200 201 + ctx->pos = di->offset + 1; 202 + 208 203 if (last) 209 204 dput(last); 210 205 last = dentry; 211 - 212 - ctx->pos++; 213 - 214 - /* make sure a dentry wasn't dropped while we didn't have parent lock */ 215 - if (!ceph_dir_is_complete(dir)) { 216 - dout(" lost dir complete on %p; falling back to mds\n", dir); 217 - err = -EAGAIN; 218 - goto out; 219 - } 220 206 221 207 spin_lock(&parent->d_lock); 222 208 p = p->prev; /* advance to next dentry */ ··· 296 296 err = __dcache_readdir(file, ctx, shared_gen); 297 297 if (err != -EAGAIN) 298 298 return err; 299 + frag = fpos_frag(ctx->pos); 300 + off = fpos_off(ctx->pos); 299 301 } else { 300 302 spin_unlock(&ci->i_ceph_lock); 301 303 } ··· 448 446 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { 449 447 dout(" marking %p complete\n", inode); 450 448 __ceph_dir_set_complete(ci, fi->dir_release_count); 451 - ci->i_max_offset = ctx->pos; 452 449 } 453 450 spin_unlock(&ci->i_ceph_lock); 454 451 ··· 936 935 * to do it here. 937 936 */ 938 937 939 - /* d_move screws up d_subdirs order */ 940 - ceph_dir_clear_complete(new_dir); 941 - 942 938 d_move(old_dentry, new_dentry); 943 939 944 940 /* ensure target dentry is invalidated, despite 945 941 rehashing bug in vfs_rename_dir */ 946 942 ceph_invalidate_dentry_lease(new_dentry); 943 + 944 + /* d_move screws up sibling dentries' offsets */ 945 + ceph_dir_clear_complete(old_dir); 946 + ceph_dir_clear_complete(new_dir); 947 + 947 948 } 948 949 ceph_mdsc_put_request(req); 949 950 return err;
+16 -55
fs/ceph/inode.c
··· 744 744 !__ceph_dir_is_complete(ci)) { 745 745 dout(" marking %p complete (empty)\n", inode); 746 746 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count)); 747 - ci->i_max_offset = 2; 748 747 } 749 748 no_change: 750 749 /* only update max_size on auth cap */ ··· 889 890 } 890 891 891 892 /* 892 - * Set dentry's directory position based on the current dir's max, and 893 - * order it in d_subdirs, so that dcache_readdir behaves. 894 - * 895 - * Always called under directory's i_mutex. 896 - */ 897 - static void ceph_set_dentry_offset(struct dentry *dn) 898 - { 899 - struct dentry *dir = dn->d_parent; 900 - struct inode *inode = dir->d_inode; 901 - struct ceph_inode_info *ci; 902 - struct ceph_dentry_info *di; 903 - 904 - BUG_ON(!inode); 905 - 906 - ci = ceph_inode(inode); 907 - di = ceph_dentry(dn); 908 - 909 - spin_lock(&ci->i_ceph_lock); 910 - if (!__ceph_dir_is_complete(ci)) { 911 - spin_unlock(&ci->i_ceph_lock); 912 - return; 913 - } 914 - di->offset = ceph_inode(inode)->i_max_offset++; 915 - spin_unlock(&ci->i_ceph_lock); 916 - 917 - spin_lock(&dir->d_lock); 918 - spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 919 - list_move(&dn->d_u.d_child, &dir->d_subdirs); 920 - dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 921 - dn->d_u.d_child.prev, dn->d_u.d_child.next); 922 - spin_unlock(&dn->d_lock); 923 - spin_unlock(&dir->d_lock); 924 - } 925 - 926 - /* 927 893 * splice a dentry to an inode. 928 894 * caller must hold directory i_mutex for this to be safe. 929 895 * ··· 897 933 * the caller) if we fail. 898 934 */ 899 935 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 900 - bool *prehash, bool set_offset) 936 + bool *prehash) 901 937 { 902 938 struct dentry *realdn; 903 939 ··· 929 965 } 930 966 if ((!prehash || *prehash) && d_unhashed(dn)) 931 967 d_rehash(dn); 932 - if (set_offset) 933 - ceph_set_dentry_offset(dn); 934 968 out: 935 969 return dn; 936 970 } ··· 949 987 { 950 988 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 951 989 struct inode *in = NULL; 952 - struct ceph_mds_reply_inode *ininfo; 953 990 struct ceph_vino vino; 954 991 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 955 992 int err = 0; ··· 1122 1161 1123 1162 /* rename? */ 1124 1163 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1164 + struct inode *olddir = req->r_old_dentry_dir; 1165 + BUG_ON(!olddir); 1166 + 1125 1167 dout(" src %p '%.*s' dst %p '%.*s'\n", 1126 1168 req->r_old_dentry, 1127 1169 req->r_old_dentry->d_name.len, ··· 1144 1180 rehashing bug in vfs_rename_dir */ 1145 1181 ceph_invalidate_dentry_lease(dn); 1146 1182 1147 - /* 1148 - * d_move() puts the renamed dentry at the end of 1149 - * d_subdirs. We need to assign it an appropriate 1150 - * directory offset so we can behave when dir is 1151 - * complete. 1152 - */ 1153 - ceph_set_dentry_offset(req->r_old_dentry); 1183 + /* d_move screws up sibling dentries' offsets */ 1184 + ceph_dir_clear_complete(dir); 1185 + ceph_dir_clear_complete(olddir); 1186 + 1154 1187 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1155 1188 ceph_dentry(req->r_old_dentry)->offset); 1156 1189 ··· 1174 1213 1175 1214 /* attach proper inode */ 1176 1215 if (!dn->d_inode) { 1216 + ceph_dir_clear_complete(dir); 1177 1217 ihold(in); 1178 - dn = splice_dentry(dn, in, &have_lease, true); 1218 + dn = splice_dentry(dn, in, &have_lease); 1179 1219 if (IS_ERR(dn)) { 1180 1220 err = PTR_ERR(dn); 1181 1221 goto done; ··· 1197 1235 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1198 1236 req->r_op == CEPH_MDS_OP_MKSNAP)) { 1199 1237 struct dentry *dn = req->r_dentry; 1238 + struct inode *dir = req->r_locked_dir; 1200 1239 1201 1240 /* fill out a snapdir LOOKUPSNAP dentry */ 1202 1241 BUG_ON(!dn); 1203 - BUG_ON(!req->r_locked_dir); 1204 - BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1205 - ininfo = rinfo->targeti.in; 1206 - vino.ino = le64_to_cpu(ininfo->ino); 1207 - vino.snap = le64_to_cpu(ininfo->snapid); 1242 + BUG_ON(!dir); 1243 + BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); 1208 1244 dout(" linking snapped dir %p to dn %p\n", in, dn); 1245 + ceph_dir_clear_complete(dir); 1209 1246 ihold(in); 1210 - dn = splice_dentry(dn, in, NULL, true); 1247 + dn = splice_dentry(dn, in, NULL); 1211 1248 if (IS_ERR(dn)) { 1212 1249 err = PTR_ERR(dn); 1213 1250 goto done; ··· 1368 1407 } 1369 1408 1370 1409 if (!dn->d_inode) { 1371 - dn = splice_dentry(dn, in, NULL, false); 1410 + dn = splice_dentry(dn, in, NULL); 1372 1411 if (IS_ERR(dn)) { 1373 1412 err = PTR_ERR(dn); 1374 1413 dn = NULL;
+3
fs/ceph/ioctl.c
··· 109 109 return PTR_ERR(req); 110 110 req->r_inode = inode; 111 111 ihold(inode); 112 + req->r_num_caps = 1; 113 + 112 114 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; 113 115 114 116 req->r_args.setlayout.layout.fl_stripe_unit = ··· 155 153 return PTR_ERR(req); 156 154 req->r_inode = inode; 157 155 ihold(inode); 156 + req->r_num_caps = 1; 158 157 159 158 req->r_args.setlayout.layout.fl_stripe_unit = 160 159 cpu_to_le32(l.stripe_unit);
+1
fs/ceph/locks.c
··· 45 45 return PTR_ERR(req); 46 46 req->r_inode = inode; 47 47 ihold(inode); 48 + req->r_num_caps = 1; 48 49 49 50 /* mds requires start and length rather than start and end */ 50 51 if (LLONG_MAX == fl->fl_end)
-1
fs/ceph/super.h
··· 266 266 struct timespec i_rctime; 267 267 u64 i_rbytes, i_rfiles, i_rsubdirs; 268 268 u64 i_files, i_subdirs; 269 - u64 i_max_offset; /* largest readdir offset, set with complete dir */ 270 269 271 270 struct rb_root i_fragtree; 272 271 struct mutex i_fragtree_mutex;
+102 -216
fs/dcache.c
··· 246 246 kmem_cache_free(dentry_cache, dentry); 247 247 } 248 248 249 - /* 250 - * no locks, please. 251 - */ 252 - static void d_free(struct dentry *dentry) 249 + static void dentry_free(struct dentry *dentry) 253 250 { 254 - BUG_ON((int)dentry->d_lockref.count > 0); 255 - this_cpu_dec(nr_dentry); 256 - if (dentry->d_op && dentry->d_op->d_release) 257 - dentry->d_op->d_release(dentry); 258 - 259 251 /* if dentry was never visible to RCU, immediate free is OK */ 260 252 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 261 253 __d_free(&dentry->d_u.d_rcu); ··· 395 403 d_lru_add(dentry); 396 404 } 397 405 398 - /* 399 - * Remove a dentry with references from the LRU. 400 - * 401 - * If we are on the shrink list, then we can get to try_prune_one_dentry() and 402 - * lose our last reference through the parent walk. In this case, we need to 403 - * remove ourselves from the shrink list, not the LRU. 404 - */ 405 - static void dentry_lru_del(struct dentry *dentry) 406 - { 407 - if (dentry->d_flags & DCACHE_LRU_LIST) { 408 - if (dentry->d_flags & DCACHE_SHRINK_LIST) 409 - return d_shrink_del(dentry); 410 - d_lru_del(dentry); 411 - } 412 - } 413 - 414 - /** 415 - * d_kill - kill dentry and return parent 416 - * @dentry: dentry to kill 417 - * @parent: parent dentry 418 - * 419 - * The dentry must already be unhashed and removed from the LRU. 420 - * 421 - * If this is the root of the dentry tree, return NULL. 422 - * 423 - * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 424 - * d_kill. 425 - */ 426 - static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 427 - __releases(dentry->d_lock) 428 - __releases(parent->d_lock) 429 - __releases(dentry->d_inode->i_lock) 430 - { 431 - list_del(&dentry->d_u.d_child); 432 - /* 433 - * Inform d_walk() that we are no longer attached to the 434 - * dentry tree 435 - */ 436 - dentry->d_flags |= DCACHE_DENTRY_KILLED; 437 - if (parent) 438 - spin_unlock(&parent->d_lock); 439 - dentry_iput(dentry); 440 - /* 441 - * dentry_iput drops the locks, at which point nobody (except 442 - * transient RCU lookups) can reach this dentry. 443 - */ 444 - d_free(dentry); 445 - return parent; 446 - } 447 - 448 406 /** 449 407 * d_drop - drop a dentry 450 408 * @dentry: dentry to drop ··· 452 510 __releases(dentry->d_lock) 453 511 { 454 512 struct inode *inode; 455 - struct dentry *parent; 513 + struct dentry *parent = NULL; 514 + bool can_free = true; 515 + 516 + if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { 517 + can_free = dentry->d_flags & DCACHE_MAY_FREE; 518 + spin_unlock(&dentry->d_lock); 519 + goto out; 520 + } 456 521 457 522 inode = dentry->d_inode; 458 523 if (inode && !spin_trylock(&inode->i_lock)) { ··· 470 521 } 471 522 return dentry; /* try again with same dentry */ 472 523 } 473 - if (IS_ROOT(dentry)) 474 - parent = NULL; 475 - else 524 + if (!IS_ROOT(dentry)) 476 525 parent = dentry->d_parent; 477 526 if (parent && !spin_trylock(&parent->d_lock)) { 478 527 if (inode) ··· 490 543 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) 491 544 dentry->d_op->d_prune(dentry); 492 545 493 - dentry_lru_del(dentry); 546 + if (dentry->d_flags & DCACHE_LRU_LIST) { 547 + if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) 548 + d_lru_del(dentry); 549 + } 494 550 /* if it was on the hash then remove it */ 495 551 __d_drop(dentry); 496 - return d_kill(dentry, parent); 552 + list_del(&dentry->d_u.d_child); 553 + /* 554 + * Inform d_walk() that we are no longer attached to the 555 + * dentry tree 556 + */ 557 + dentry->d_flags |= DCACHE_DENTRY_KILLED; 558 + if (parent) 559 + spin_unlock(&parent->d_lock); 560 + dentry_iput(dentry); 561 + /* 562 + * dentry_iput drops the locks, at which point nobody (except 563 + * transient RCU lookups) can reach this dentry. 564 + */ 565 + BUG_ON((int)dentry->d_lockref.count > 0); 566 + this_cpu_dec(nr_dentry); 567 + if (dentry->d_op && dentry->d_op->d_release) 568 + dentry->d_op->d_release(dentry); 569 + 570 + spin_lock(&dentry->d_lock); 571 + if (dentry->d_flags & DCACHE_SHRINK_LIST) { 572 + dentry->d_flags |= DCACHE_MAY_FREE; 573 + can_free = false; 574 + } 575 + spin_unlock(&dentry->d_lock); 576 + out: 577 + if (likely(can_free)) 578 + dentry_free(dentry); 579 + return parent; 497 580 } 498 581 499 582 /* ··· 792 815 } 793 816 EXPORT_SYMBOL(d_prune_aliases); 794 817 795 - /* 796 - * Try to throw away a dentry - free the inode, dput the parent. 797 - * Requires dentry->d_lock is held, and dentry->d_count == 0. 798 - * Releases dentry->d_lock. 799 - * 800 - * This may fail if locks cannot be acquired no problem, just try again. 801 - */ 802 - static struct dentry * try_prune_one_dentry(struct dentry *dentry) 803 - __releases(dentry->d_lock) 804 - { 805 - struct dentry *parent; 806 - 807 - parent = dentry_kill(dentry, 0); 808 - /* 809 - * If dentry_kill returns NULL, we have nothing more to do. 810 - * if it returns the same dentry, trylocks failed. In either 811 - * case, just loop again. 812 - * 813 - * Otherwise, we need to prune ancestors too. This is necessary 814 - * to prevent quadratic behavior of shrink_dcache_parent(), but 815 - * is also expected to be beneficial in reducing dentry cache 816 - * fragmentation. 817 - */ 818 - if (!parent) 819 - return NULL; 820 - if (parent == dentry) 821 - return dentry; 822 - 823 - /* Prune ancestors. */ 824 - dentry = parent; 825 - while (dentry) { 826 - if (lockref_put_or_lock(&dentry->d_lockref)) 827 - return NULL; 828 - dentry = dentry_kill(dentry, 1); 829 - } 830 - return NULL; 831 - } 832 - 833 818 static void shrink_dentry_list(struct list_head *list) 834 819 { 835 - struct dentry *dentry; 820 + struct dentry *dentry, *parent; 836 821 837 - rcu_read_lock(); 838 - for (;;) { 839 - dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 840 - if (&dentry->d_lru == list) 841 - break; /* empty */ 842 - 843 - /* 844 - * Get the dentry lock, and re-verify that the dentry is 845 - * this on the shrinking list. If it is, we know that 846 - * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set. 847 - */ 822 + while (!list_empty(list)) { 823 + dentry = list_entry(list->prev, struct dentry, d_lru); 848 824 spin_lock(&dentry->d_lock); 849 - if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 850 - spin_unlock(&dentry->d_lock); 851 - continue; 852 - } 853 - 854 825 /* 855 826 * The dispose list is isolated and dentries are not accounted 856 827 * to the LRU here, so we can simply remove it from the list ··· 810 885 * We found an inuse dentry which was not removed from 811 886 * the LRU because of laziness during lookup. Do not free it. 812 887 */ 813 - if (dentry->d_lockref.count) { 888 + if ((int)dentry->d_lockref.count > 0) { 814 889 spin_unlock(&dentry->d_lock); 815 890 continue; 816 891 } 817 - rcu_read_unlock(); 818 892 893 + parent = dentry_kill(dentry, 0); 819 894 /* 820 - * If 'try_to_prune()' returns a dentry, it will 821 - * be the same one we passed in, and d_lock will 822 - * have been held the whole time, so it will not 823 - * have been added to any other lists. We failed 824 - * to get the inode lock. 825 - * 826 - * We just add it back to the shrink list. 895 + * If dentry_kill returns NULL, we have nothing more to do. 827 896 */ 828 - dentry = try_prune_one_dentry(dentry); 897 + if (!parent) 898 + continue; 829 899 830 - rcu_read_lock(); 831 - if (dentry) { 900 + if (unlikely(parent == dentry)) { 901 + /* 902 + * trylocks have failed and d_lock has been held the 903 + * whole time, so it could not have been added to any 904 + * other lists. Just add it back to the shrink list. 905 + */ 832 906 d_shrink_add(dentry, list); 833 907 spin_unlock(&dentry->d_lock); 908 + continue; 834 909 } 910 + /* 911 + * We need to prune ancestors too. This is necessary to prevent 912 + * quadratic behavior of shrink_dcache_parent(), but is also 913 + * expected to be beneficial in reducing dentry cache 914 + * fragmentation. 915 + */ 916 + dentry = parent; 917 + while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) 918 + dentry = dentry_kill(dentry, 1); 835 919 } 836 - rcu_read_unlock(); 837 920 } 838 921 839 922 static enum lru_status ··· 1194 1261 if (data->start == dentry) 1195 1262 goto out; 1196 1263 1197 - /* 1198 - * move only zero ref count dentries to the dispose list. 1199 - * 1200 - * Those which are presently on the shrink list, being processed 1201 - * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1202 - * loop in shrink_dcache_parent() might not make any progress 1203 - * and loop forever. 1204 - */ 1205 - if (dentry->d_lockref.count) { 1206 - dentry_lru_del(dentry); 1207 - } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1208 - /* 1209 - * We can't use d_lru_shrink_move() because we 1210 - * need to get the global LRU lock and do the 1211 - * LRU accounting. 1212 - */ 1213 - d_lru_del(dentry); 1214 - d_shrink_add(dentry, &data->dispose); 1264 + if (dentry->d_flags & DCACHE_SHRINK_LIST) { 1215 1265 data->found++; 1216 - ret = D_WALK_NORETRY; 1266 + } else { 1267 + if (dentry->d_flags & DCACHE_LRU_LIST) 1268 + d_lru_del(dentry); 1269 + if (!dentry->d_lockref.count) { 1270 + d_shrink_add(dentry, &data->dispose); 1271 + data->found++; 1272 + } 1217 1273 } 1218 1274 /* 1219 1275 * We can return to the caller if we have found some (this 1220 1276 * ensures forward progress). We'll be coming back to find 1221 1277 * the rest. 1222 1278 */ 1223 - if (data->found && need_resched()) 1224 - ret = D_WALK_QUIT; 1279 + if (!list_empty(&data->dispose)) 1280 + ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; 1225 1281 out: 1226 1282 return ret; 1227 1283 } ··· 1240 1318 } 1241 1319 EXPORT_SYMBOL(shrink_dcache_parent); 1242 1320 1243 - static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) 1321 + static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) 1244 1322 { 1245 - struct select_data *data = _data; 1246 - enum d_walk_ret ret = D_WALK_CONTINUE; 1323 + /* it has busy descendents; complain about those instead */ 1324 + if (!list_empty(&dentry->d_subdirs)) 1325 + return D_WALK_CONTINUE; 1247 1326 1248 - if (dentry->d_lockref.count) { 1249 - dentry_lru_del(dentry); 1250 - if (likely(!list_empty(&dentry->d_subdirs))) 1251 - goto out; 1252 - if (dentry == data->start && dentry->d_lockref.count == 1) 1253 - goto out; 1254 - printk(KERN_ERR 1255 - "BUG: Dentry %p{i=%lx,n=%s}" 1256 - " still in use (%d)" 1257 - " [unmount of %s %s]\n", 1327 + /* root with refcount 1 is fine */ 1328 + if (dentry == _data && dentry->d_lockref.count == 1) 1329 + return D_WALK_CONTINUE; 1330 + 1331 + printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " 1332 + " still in use (%d) [unmount of %s %s]\n", 1258 1333 dentry, 1259 1334 dentry->d_inode ? 1260 1335 dentry->d_inode->i_ino : 0UL, 1261 - dentry->d_name.name, 1336 + dentry, 1262 1337 dentry->d_lockref.count, 1263 1338 dentry->d_sb->s_type->name, 1264 1339 dentry->d_sb->s_id); 1265 - BUG(); 1266 - } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1267 - /* 1268 - * We can't use d_lru_shrink_move() because we 1269 - * need to get the global LRU lock and do the 1270 - * LRU accounting. 1271 - */ 1272 - if (dentry->d_flags & DCACHE_LRU_LIST) 1273 - d_lru_del(dentry); 1274 - d_shrink_add(dentry, &data->dispose); 1275 - data->found++; 1276 - ret = D_WALK_NORETRY; 1277 - } 1278 - out: 1279 - if (data->found && need_resched()) 1280 - ret = D_WALK_QUIT; 1281 - return ret; 1340 + WARN_ON(1); 1341 + return D_WALK_CONTINUE; 1342 + } 1343 + 1344 + static void do_one_tree(struct dentry *dentry) 1345 + { 1346 + shrink_dcache_parent(dentry); 1347 + d_walk(dentry, dentry, umount_check, NULL); 1348 + d_drop(dentry); 1349 + dput(dentry); 1282 1350 } 1283 1351 1284 1352 /* ··· 1278 1366 { 1279 1367 struct dentry *dentry; 1280 1368 1281 - if (down_read_trylock(&sb->s_umount)) 1282 - BUG(); 1369 + WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); 1283 1370 1284 1371 dentry = sb->s_root; 1285 1372 sb->s_root = NULL; 1286 - for (;;) { 1287 - struct select_data data; 1288 - 1289 - INIT_LIST_HEAD(&data.dispose); 1290 - data.start = dentry; 1291 - data.found = 0; 1292 - 1293 - d_walk(dentry, &data, umount_collect, NULL); 1294 - if (!data.found) 1295 - break; 1296 - 1297 - shrink_dentry_list(&data.dispose); 1298 - cond_resched(); 1299 - } 1300 - d_drop(dentry); 1301 - dput(dentry); 1373 + do_one_tree(dentry); 1302 1374 1303 1375 while (!hlist_bl_empty(&sb->s_anon)) { 1304 - struct select_data data; 1305 - dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 1306 - 1307 - INIT_LIST_HEAD(&data.dispose); 1308 - data.start = NULL; 1309 - data.found = 0; 1310 - 1311 - d_walk(dentry, &data, umount_collect, NULL); 1312 - if (data.found) 1313 - shrink_dentry_list(&data.dispose); 1314 - cond_resched(); 1376 + dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); 1377 + do_one_tree(dentry); 1315 1378 } 1316 1379 } 1317 1380 ··· 1534 1647 unsigned add_flags = d_flags_for_inode(inode); 1535 1648 1536 1649 spin_lock(&dentry->d_lock); 1537 - dentry->d_flags &= ~DCACHE_ENTRY_TYPE; 1538 - dentry->d_flags |= add_flags; 1650 + __d_set_type(dentry, add_flags); 1539 1651 if (inode) 1540 1652 hlist_add_head(&dentry->d_alias, &inode->i_dentry); 1541 1653 dentry->d_inode = inode;
+1 -1
fs/fuse/control.c
··· 348 348 return register_filesystem(&fuse_ctl_fs_type); 349 349 } 350 350 351 - void fuse_ctl_cleanup(void) 351 + void __exit fuse_ctl_cleanup(void) 352 352 { 353 353 unregister_filesystem(&fuse_ctl_fs_type); 354 354 }
+100 -46
fs/fuse/dir.c
··· 679 679 return create_new_entry(fc, req, dir, entry, S_IFLNK); 680 680 } 681 681 682 + static inline void fuse_update_ctime(struct inode *inode) 683 + { 684 + if (!IS_NOCMTIME(inode)) { 685 + inode->i_ctime = current_fs_time(inode->i_sb); 686 + mark_inode_dirty_sync(inode); 687 + } 688 + } 689 + 682 690 static int fuse_unlink(struct inode *dir, struct dentry *entry) 683 691 { 684 692 int err; ··· 721 713 fuse_invalidate_attr(inode); 722 714 fuse_invalidate_attr(dir); 723 715 fuse_invalidate_entry_cache(entry); 716 + fuse_update_ctime(inode); 724 717 } else if (err == -EINTR) 725 718 fuse_invalidate_entry(entry); 726 719 return err; ··· 752 743 return err; 753 744 } 754 745 755 - static int fuse_rename(struct inode *olddir, struct dentry *oldent, 756 - struct inode *newdir, struct dentry *newent) 746 + static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, 747 + struct inode *newdir, struct dentry *newent, 748 + unsigned int flags, int opcode, size_t argsize) 757 749 { 758 750 int err; 759 - struct fuse_rename_in inarg; 751 + struct fuse_rename2_in inarg; 760 752 struct fuse_conn *fc = get_fuse_conn(olddir); 761 - struct fuse_req *req = fuse_get_req_nopages(fc); 753 + struct fuse_req *req; 762 754 755 + req = fuse_get_req_nopages(fc); 763 756 if (IS_ERR(req)) 764 757 return PTR_ERR(req); 765 758 766 - memset(&inarg, 0, sizeof(inarg)); 759 + memset(&inarg, 0, argsize); 767 760 inarg.newdir = get_node_id(newdir); 768 - req->in.h.opcode = FUSE_RENAME; 761 + inarg.flags = flags; 762 + req->in.h.opcode = opcode; 769 763 req->in.h.nodeid = get_node_id(olddir); 770 764 req->in.numargs = 3; 771 - req->in.args[0].size = sizeof(inarg); 765 + req->in.args[0].size = argsize; 772 766 req->in.args[0].value = &inarg; 773 767 req->in.args[1].size = oldent->d_name.len + 1; 774 768 req->in.args[1].value = oldent->d_name.name; ··· 783 771 if (!err) { 784 772 /* ctime changes */ 785 773 fuse_invalidate_attr(oldent->d_inode); 774 + fuse_update_ctime(oldent->d_inode); 775 + 776 + if (flags & RENAME_EXCHANGE) { 777 + fuse_invalidate_attr(newent->d_inode); 778 + fuse_update_ctime(newent->d_inode); 779 + } 786 780 787 781 fuse_invalidate_attr(olddir); 788 782 if (olddir != newdir) 789 783 fuse_invalidate_attr(newdir); 790 784 791 785 /* newent will end up negative */ 792 - if (newent->d_inode) { 786 + if (!(flags & RENAME_EXCHANGE) && newent->d_inode) { 793 787 fuse_invalidate_attr(newent->d_inode); 794 788 fuse_invalidate_entry_cache(newent); 789 + fuse_update_ctime(newent->d_inode); 795 790 } 796 791 } else if (err == -EINTR) { 797 792 /* If request was interrupted, DEITY only knows if the ··· 812 793 } 813 794 814 795 return err; 796 + } 797 + 798 + static int fuse_rename(struct inode *olddir, struct dentry *oldent, 799 + struct inode *newdir, struct dentry *newent) 800 + { 801 + return fuse_rename_common(olddir, oldent, newdir, newent, 0, 802 + FUSE_RENAME, sizeof(struct fuse_rename_in)); 803 + } 804 + 805 + static int fuse_rename2(struct inode *olddir, struct dentry *oldent, 806 + struct inode *newdir, struct dentry *newent, 807 + unsigned int flags) 808 + { 809 + struct fuse_conn *fc = get_fuse_conn(olddir); 810 + int err; 811 + 812 + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 813 + return -EINVAL; 814 + 815 + if (fc->no_rename2 || fc->minor < 23) 816 + return -EINVAL; 817 + 818 + err = fuse_rename_common(olddir, oldent, newdir, newent, flags, 819 + FUSE_RENAME2, sizeof(struct fuse_rename2_in)); 820 + if (err == -ENOSYS) { 821 + fc->no_rename2 = 1; 822 + err = -EINVAL; 823 + } 824 + return err; 825 + 815 826 } 816 827 817 828 static int fuse_link(struct dentry *entry, struct inode *newdir, ··· 878 829 inc_nlink(inode); 879 830 spin_unlock(&fc->lock); 880 831 fuse_invalidate_attr(inode); 832 + fuse_update_ctime(inode); 881 833 } else if (err == -EINTR) { 882 834 fuse_invalidate_attr(inode); 883 835 } ··· 896 846 attr->size = i_size_read(inode); 897 847 attr->mtime = inode->i_mtime.tv_sec; 898 848 attr->mtimensec = inode->i_mtime.tv_nsec; 849 + attr->ctime = inode->i_ctime.tv_sec; 850 + attr->ctimensec = inode->i_ctime.tv_nsec; 899 851 } 900 852 901 853 stat->dev = inode->i_sb->s_dev; ··· 1556 1504 } 1557 1505 1558 1506 static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg, 1559 - bool trust_local_mtime) 1507 + bool trust_local_cmtime) 1560 1508 { 1561 1509 unsigned ivalid = iattr->ia_valid; 1562 1510 ··· 1575 1523 if (!(ivalid & ATTR_ATIME_SET)) 1576 1524 arg->valid |= FATTR_ATIME_NOW; 1577 1525 } 1578 - if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_mtime)) { 1526 + if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) { 1579 1527 arg->valid |= FATTR_MTIME; 1580 1528 arg->mtime = iattr->ia_mtime.tv_sec; 1581 1529 arg->mtimensec = iattr->ia_mtime.tv_nsec; 1582 - if (!(ivalid & ATTR_MTIME_SET) && !trust_local_mtime) 1530 + if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime) 1583 1531 arg->valid |= FATTR_MTIME_NOW; 1532 + } 1533 + if ((ivalid & ATTR_CTIME) && trust_local_cmtime) { 1534 + arg->valid |= FATTR_CTIME; 1535 + arg->ctime = iattr->ia_ctime.tv_sec; 1536 + arg->ctimensec = iattr->ia_ctime.tv_nsec; 1584 1537 } 1585 1538 } 1586 1539 ··· 1654 1597 /* 1655 1598 * Flush inode->i_mtime to the server 1656 1599 */ 1657 - int fuse_flush_mtime(struct file *file, bool nofail) 1600 + int fuse_flush_times(struct inode *inode, struct fuse_file *ff) 1658 1601 { 1659 - struct inode *inode = file->f_mapping->host; 1660 - struct fuse_inode *fi = get_fuse_inode(inode); 1661 1602 struct fuse_conn *fc = get_fuse_conn(inode); 1662 - struct fuse_req *req = NULL; 1603 + struct fuse_req *req; 1663 1604 struct fuse_setattr_in inarg; 1664 1605 struct fuse_attr_out outarg; 1665 1606 int err; 1666 1607 1667 - if (nofail) { 1668 - req = fuse_get_req_nofail_nopages(fc, file); 1669 - } else { 1670 - req = fuse_get_req_nopages(fc); 1671 - if (IS_ERR(req)) 1672 - return PTR_ERR(req); 1673 - } 1608 + req = fuse_get_req_nopages(fc); 1609 + if (IS_ERR(req)) 1610 + return PTR_ERR(req); 1674 1611 1675 1612 memset(&inarg, 0, sizeof(inarg)); 1676 1613 memset(&outarg, 0, sizeof(outarg)); 1677 1614 1678 - inarg.valid |= FATTR_MTIME; 1615 + inarg.valid = FATTR_MTIME; 1679 1616 inarg.mtime = inode->i_mtime.tv_sec; 1680 1617 inarg.mtimensec = inode->i_mtime.tv_nsec; 1681 - 1618 + if (fc->minor >= 23) { 1619 + inarg.valid |= FATTR_CTIME; 1620 + inarg.ctime = inode->i_ctime.tv_sec; 1621 + inarg.ctimensec = inode->i_ctime.tv_nsec; 1622 + } 1623 + if (ff) { 1624 + inarg.valid |= FATTR_FH; 1625 + inarg.fh = ff->fh; 1626 + } 1682 1627 fuse_setattr_fill(fc, req, inode, &inarg, &outarg); 1683 1628 fuse_request_send(fc, req); 1684 1629 err = req->out.h.error; 1685 1630 fuse_put_request(fc, req); 1686 - 1687 - if (!err) 1688 - clear_bit(FUSE_I_MTIME_DIRTY, &fi->state); 1689 1631 1690 1632 return err; 1691 1633 } ··· 1709 1653 bool is_wb = fc->writeback_cache; 1710 1654 loff_t oldsize; 1711 1655 int err; 1712 - bool trust_local_mtime = is_wb && S_ISREG(inode->i_mode); 1656 + bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode); 1713 1657 1714 1658 if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) 1715 1659 attr->ia_valid |= ATTR_FORCE; ··· 1734 1678 if (is_truncate) { 1735 1679 fuse_set_nowrite(inode); 1736 1680 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1681 + if (trust_local_cmtime && attr->ia_size != inode->i_size) 1682 + attr->ia_valid |= ATTR_MTIME | ATTR_CTIME; 1737 1683 } 1738 1684 1739 1685 memset(&inarg, 0, sizeof(inarg)); 1740 1686 memset(&outarg, 0, sizeof(outarg)); 1741 - iattr_to_fattr(attr, &inarg, trust_local_mtime); 1687 + iattr_to_fattr(attr, &inarg, trust_local_cmtime); 1742 1688 if (file) { 1743 1689 struct fuse_file *ff = file->private_data; 1744 1690 inarg.valid |= FATTR_FH; ··· 1769 1711 1770 1712 spin_lock(&fc->lock); 1771 1713 /* the kernel maintains i_mtime locally */ 1772 - if (trust_local_mtime && (attr->ia_valid & ATTR_MTIME)) { 1773 - inode->i_mtime = attr->ia_mtime; 1774 - clear_bit(FUSE_I_MTIME_DIRTY, &fi->state); 1714 + if (trust_local_cmtime) { 1715 + if (attr->ia_valid & ATTR_MTIME) 1716 + inode->i_mtime = attr->ia_mtime; 1717 + if (attr->ia_valid & ATTR_CTIME) 1718 + inode->i_ctime = attr->ia_ctime; 1719 + /* FIXME: clear I_DIRTY_SYNC? */ 1775 1720 } 1776 1721 1777 1722 fuse_change_attributes_common(inode, &outarg.attr, ··· 1871 1810 fc->no_setxattr = 1; 1872 1811 err = -EOPNOTSUPP; 1873 1812 } 1874 - if (!err) 1813 + if (!err) { 1875 1814 fuse_invalidate_attr(inode); 1815 + fuse_update_ctime(inode); 1816 + } 1876 1817 return err; 1877 1818 } 1878 1819 ··· 2004 1941 fc->no_removexattr = 1; 2005 1942 err = -EOPNOTSUPP; 2006 1943 } 2007 - if (!err) 1944 + if (!err) { 2008 1945 fuse_invalidate_attr(inode); 2009 - return err; 2010 - } 2011 - 2012 - static int fuse_update_time(struct inode *inode, struct timespec *now, 2013 - int flags) 2014 - { 2015 - if (flags & S_MTIME) { 2016 - inode->i_mtime = *now; 2017 - set_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state); 2018 - BUG_ON(!S_ISREG(inode->i_mode)); 1946 + fuse_update_ctime(inode); 2019 1947 } 2020 - return 0; 1948 + return err; 2021 1949 } 2022 1950 2023 1951 static const struct inode_operations fuse_dir_inode_operations = { ··· 2018 1964 .unlink = fuse_unlink, 2019 1965 .rmdir = fuse_rmdir, 2020 1966 .rename = fuse_rename, 1967 + .rename2 = fuse_rename2, 2021 1968 .link = fuse_link, 2022 1969 .setattr = fuse_setattr, 2023 1970 .create = fuse_create, ··· 2051 1996 .getxattr = fuse_getxattr, 2052 1997 .listxattr = fuse_listxattr, 2053 1998 .removexattr = fuse_removexattr, 2054 - .update_time = fuse_update_time, 2055 1999 }; 2056 2000 2057 2001 static const struct inode_operations fuse_symlink_inode_operations = {
+53 -31
fs/fuse/file.c
··· 223 223 i_size_write(inode, 0); 224 224 spin_unlock(&fc->lock); 225 225 fuse_invalidate_attr(inode); 226 + if (fc->writeback_cache) 227 + file_update_time(file); 226 228 } 227 229 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 228 230 fuse_link_write_file(file); ··· 234 232 { 235 233 struct fuse_conn *fc = get_fuse_conn(inode); 236 234 int err; 235 + bool lock_inode = (file->f_flags & O_TRUNC) && 236 + fc->atomic_o_trunc && 237 + fc->writeback_cache; 237 238 238 239 err = generic_file_open(inode, file); 239 240 if (err) 240 241 return err; 241 242 243 + if (lock_inode) 244 + mutex_lock(&inode->i_mutex); 245 + 242 246 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 243 - if (err) 244 - return err; 245 247 246 - fuse_finish_open(inode, file); 248 + if (!err) 249 + fuse_finish_open(inode, file); 247 250 248 - return 0; 251 + if (lock_inode) 252 + mutex_unlock(&inode->i_mutex); 253 + 254 + return err; 249 255 } 250 256 251 257 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) ··· 324 314 325 315 /* see fuse_vma_close() for !writeback_cache case */ 326 316 if (fc->writeback_cache) 327 - filemap_write_and_wait(file->f_mapping); 328 - 329 - if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state)) 330 - fuse_flush_mtime(file, true); 317 + write_inode_now(inode, 1); 331 318 332 319 fuse_release_common(file, FUSE_RELEASE); 333 320 ··· 446 439 if (fc->no_flush) 447 440 return 0; 448 441 449 - err = filemap_write_and_wait(file->f_mapping); 442 + err = write_inode_now(inode, 1); 450 443 if (err) 451 444 return err; 452 445 ··· 487 480 if (is_bad_inode(inode)) 488 481 return -EIO; 489 482 490 - err = filemap_write_and_wait_range(inode->i_mapping, start, end); 491 - if (err) 492 - return err; 493 - 494 - if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 495 - return 0; 496 - 497 483 mutex_lock(&inode->i_mutex); 498 484 499 485 /* ··· 494 494 * wait for all outstanding writes, before sending the FSYNC 495 495 * request. 496 496 */ 497 - err = write_inode_now(inode, 0); 497 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 498 498 if (err) 499 499 goto out; 500 500 501 501 fuse_sync_writes(inode); 502 + err = sync_inode_metadata(inode, 1); 503 + if (err) 504 + goto out; 502 505 503 - if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state)) { 504 - int err = fuse_flush_mtime(file, false); 505 - if (err) 506 - goto out; 507 - } 506 + if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 507 + goto out; 508 508 509 509 req = fuse_get_req_nopages(fc); 510 510 if (IS_ERR(req)) { ··· 1659 1659 fuse_writepage_free(fc, req); 1660 1660 } 1661 1661 1662 - static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, 1663 - struct fuse_inode *fi) 1662 + static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc, 1663 + struct fuse_inode *fi) 1664 1664 { 1665 1665 struct fuse_file *ff = NULL; 1666 1666 1667 1667 spin_lock(&fc->lock); 1668 - if (!WARN_ON(list_empty(&fi->write_files))) { 1668 + if (!list_empty(&fi->write_files)) { 1669 1669 ff = list_entry(fi->write_files.next, struct fuse_file, 1670 1670 write_entry); 1671 1671 fuse_file_get(ff); ··· 1673 1673 spin_unlock(&fc->lock); 1674 1674 1675 1675 return ff; 1676 + } 1677 + 1678 + static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, 1679 + struct fuse_inode *fi) 1680 + { 1681 + struct fuse_file *ff = __fuse_write_file_get(fc, fi); 1682 + WARN_ON(!ff); 1683 + return ff; 1684 + } 1685 + 1686 + int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 1687 + { 1688 + struct fuse_conn *fc = get_fuse_conn(inode); 1689 + struct fuse_inode *fi = get_fuse_inode(inode); 1690 + struct fuse_file *ff; 1691 + int err; 1692 + 1693 + ff = __fuse_write_file_get(fc, fi); 1694 + err = fuse_flush_times(inode, ff); 1695 + if (ff) 1696 + fuse_file_put(ff, 0); 1697 + 1698 + return err; 1676 1699 } 1677 1700 1678 1701 static int fuse_writepage_locked(struct page *page) ··· 2995 2972 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || 2996 2973 (mode & FALLOC_FL_PUNCH_HOLE); 2997 2974 2975 + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2976 + return -EOPNOTSUPP; 2977 + 2998 2978 if (fc->no_fallocate) 2999 2979 return -EOPNOTSUPP; 3000 2980 ··· 3043 3017 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 3044 3018 bool changed = fuse_write_update_size(inode, offset + length); 3045 3019 3046 - if (changed && fc->writeback_cache) { 3047 - struct fuse_inode *fi = get_fuse_inode(inode); 3048 - 3049 - inode->i_mtime = current_fs_time(inode->i_sb); 3050 - set_bit(FUSE_I_MTIME_DIRTY, &fi->state); 3051 - } 3020 + if (changed && fc->writeback_cache) 3021 + file_update_time(file); 3052 3022 } 3053 3023 3054 3024 if (mode & FALLOC_FL_PUNCH_HOLE)
+6 -4
fs/fuse/fuse_i.h
··· 119 119 FUSE_I_INIT_RDPLUS, 120 120 /** An operation changing file size is in progress */ 121 121 FUSE_I_SIZE_UNSTABLE, 122 - /** i_mtime has been updated locally; a flush to userspace needed */ 123 - FUSE_I_MTIME_DIRTY, 124 122 }; 125 123 126 124 struct fuse_conn; ··· 542 544 /** Is fallocate not implemented by fs? */ 543 545 unsigned no_fallocate:1; 544 546 547 + /** Is rename with flags implemented by fs? */ 548 + unsigned no_rename2:1; 549 + 545 550 /** Use enhanced/automatic page cache invalidation. */ 546 551 unsigned auto_inval_data:1; 547 552 ··· 726 725 void fuse_dev_cleanup(void); 727 726 728 727 int fuse_ctl_init(void); 729 - void fuse_ctl_cleanup(void); 728 + void __exit fuse_ctl_cleanup(void); 730 729 731 730 /** 732 731 * Allocate a request ··· 892 891 893 892 bool fuse_write_update_size(struct inode *inode, loff_t pos); 894 893 895 - int fuse_flush_mtime(struct file *file, bool nofail); 894 + int fuse_flush_times(struct inode *inode, struct fuse_file *ff); 895 + int fuse_write_inode(struct inode *inode, struct writeback_control *wbc); 896 896 897 897 int fuse_do_setattr(struct inode *inode, struct iattr *attr, 898 898 struct file *file);
+12 -4
fs/fuse/inode.c
··· 175 175 if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) { 176 176 inode->i_mtime.tv_sec = attr->mtime; 177 177 inode->i_mtime.tv_nsec = attr->mtimensec; 178 + inode->i_ctime.tv_sec = attr->ctime; 179 + inode->i_ctime.tv_nsec = attr->ctimensec; 178 180 } 179 - inode->i_ctime.tv_sec = attr->ctime; 180 - inode->i_ctime.tv_nsec = attr->ctimensec; 181 181 182 182 if (attr->blksize != 0) 183 183 inode->i_blkbits = ilog2(attr->blksize); ··· 256 256 inode->i_size = attr->size; 257 257 inode->i_mtime.tv_sec = attr->mtime; 258 258 inode->i_mtime.tv_nsec = attr->mtimensec; 259 + inode->i_ctime.tv_sec = attr->ctime; 260 + inode->i_ctime.tv_nsec = attr->ctimensec; 259 261 if (S_ISREG(inode->i_mode)) { 260 262 fuse_init_common(inode); 261 263 fuse_init_file_inode(inode); ··· 305 303 306 304 if ((inode->i_state & I_NEW)) { 307 305 inode->i_flags |= S_NOATIME; 308 - if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) 306 + if (!fc->writeback_cache || !S_ISREG(attr->mode)) 309 307 inode->i_flags |= S_NOCMTIME; 310 308 inode->i_generation = generation; 311 309 inode->i_data.backing_dev_info = &fc->bdi; ··· 790 788 .alloc_inode = fuse_alloc_inode, 791 789 .destroy_inode = fuse_destroy_inode, 792 790 .evict_inode = fuse_evict_inode, 791 + .write_inode = fuse_write_inode, 793 792 .drop_inode = generic_delete_inode, 794 793 .remount_fs = fuse_remount_fs, 795 794 .put_super = fuse_put_super, ··· 893 890 fc->async_dio = 1; 894 891 if (arg->flags & FUSE_WRITEBACK_CACHE) 895 892 fc->writeback_cache = 1; 893 + if (arg->time_gran && arg->time_gran <= 1000000000) 894 + fc->sb->s_time_gran = arg->time_gran; 895 + else 896 + fc->sb->s_time_gran = 1000000000; 897 + 896 898 } else { 897 899 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 898 900 fc->no_lock = 1; ··· 1004 996 if (sb->s_flags & MS_MANDLOCK) 1005 997 goto err; 1006 998 1007 - sb->s_flags &= ~MS_NOSEC; 999 + sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1008 1000 1009 1001 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 1010 1002 goto err;
+5
fs/hugetlbfs/inode.c
··· 1030 1030 int error; 1031 1031 int i; 1032 1032 1033 + if (!hugepages_supported()) { 1034 + pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n"); 1035 + return -ENOTSUPP; 1036 + } 1037 + 1033 1038 error = bdi_init(&hugetlbfs_backing_dev_info); 1034 1039 if (error) 1035 1040 return error;
+3 -3
fs/namei.c
··· 1542 1542 inode = path->dentry->d_inode; 1543 1543 } 1544 1544 err = -ENOENT; 1545 - if (!inode) 1545 + if (!inode || d_is_negative(path->dentry)) 1546 1546 goto out_path_put; 1547 1547 1548 1548 if (should_follow_link(path->dentry, follow)) { ··· 2249 2249 mutex_unlock(&dir->d_inode->i_mutex); 2250 2250 2251 2251 done: 2252 - if (!dentry->d_inode) { 2252 + if (!dentry->d_inode || d_is_negative(dentry)) { 2253 2253 error = -ENOENT; 2254 2254 dput(dentry); 2255 2255 goto out; ··· 2994 2994 finish_lookup: 2995 2995 /* we _can_ be in RCU mode here */ 2996 2996 error = -ENOENT; 2997 - if (d_is_negative(path->dentry)) { 2997 + if (!inode || d_is_negative(path->dentry)) { 2998 2998 path_to_nameidata(path, nd); 2999 2999 goto out; 3000 3000 }
+2
fs/notify/fanotify/fanotify_user.c
··· 698 698 } 699 699 group->overflow_event = &oevent->fse; 700 700 701 + if (force_o_largefile()) 702 + event_f_flags |= O_LARGEFILE; 701 703 group->fanotify_data.f_flags = event_f_flags; 702 704 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 703 705 spin_lock_init(&group->fanotify_data.access_lock);
+6
fs/posix_acl.c
··· 246 246 umode_t mode = 0; 247 247 int not_equiv = 0; 248 248 249 + /* 250 + * A null ACL can always be presented as mode bits. 251 + */ 252 + if (!acl) 253 + return 0; 254 + 249 255 FOREACH_ACL_ENTRY(pa, acl, pe) { 250 256 switch (pa->e_tag) { 251 257 case ACL_USER_OBJ:
+1 -1
fs/ubifs/super.c
··· 1556 1556 if (c->space_fixup) { 1557 1557 err = ubifs_fixup_free_space(c); 1558 1558 if (err) 1559 - return err; 1559 + goto out; 1560 1560 } 1561 1561 1562 1562 err = check_free_space(c);
+23 -1
fs/xfs/xfs_attr.c
··· 213 213 * Out of line attribute, cannot double split, but 214 214 * make room for the attribute value itself. 215 215 */ 216 - uint dblocks = XFS_B_TO_FSB(mp, valuelen); 216 + uint dblocks = xfs_attr3_rmt_blocks(mp, valuelen); 217 217 nblks += dblocks; 218 218 nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); 219 219 } ··· 698 698 699 699 trace_xfs_attr_leaf_replace(args); 700 700 701 + /* save the attribute state for later removal*/ 701 702 args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */ 702 703 args->blkno2 = args->blkno; /* set 2nd entry info*/ 703 704 args->index2 = args->index; 704 705 args->rmtblkno2 = args->rmtblkno; 705 706 args->rmtblkcnt2 = args->rmtblkcnt; 707 + args->rmtvaluelen2 = args->rmtvaluelen; 708 + 709 + /* 710 + * clear the remote attr state now that it is saved so that the 711 + * values reflect the state of the attribute we are about to 712 + * add, not the attribute we just found and will remove later. 713 + */ 714 + args->rmtblkno = 0; 715 + args->rmtblkcnt = 0; 716 + args->rmtvaluelen = 0; 706 717 } 707 718 708 719 /* ··· 805 794 args->blkno = args->blkno2; 806 795 args->rmtblkno = args->rmtblkno2; 807 796 args->rmtblkcnt = args->rmtblkcnt2; 797 + args->rmtvaluelen = args->rmtvaluelen2; 808 798 if (args->rmtblkno) { 809 799 error = xfs_attr_rmtval_remove(args); 810 800 if (error) ··· 1011 999 1012 1000 trace_xfs_attr_node_replace(args); 1013 1001 1002 + /* save the attribute state for later removal*/ 1014 1003 args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ 1015 1004 args->blkno2 = args->blkno; /* set 2nd entry info*/ 1016 1005 args->index2 = args->index; 1017 1006 args->rmtblkno2 = args->rmtblkno; 1018 1007 args->rmtblkcnt2 = args->rmtblkcnt; 1008 + args->rmtvaluelen2 = args->rmtvaluelen; 1009 + 1010 + /* 1011 + * clear the remote attr state now that it is saved so that the 1012 + * values reflect the state of the attribute we are about to 1013 + * add, not the attribute we just found and will remove later. 1014 + */ 1019 1015 args->rmtblkno = 0; 1020 1016 args->rmtblkcnt = 0; 1017 + args->rmtvaluelen = 0; 1021 1018 } 1022 1019 1023 1020 retval = xfs_attr3_leaf_add(blk->bp, state->args); ··· 1154 1133 args->blkno = args->blkno2; 1155 1134 args->rmtblkno = args->rmtblkno2; 1156 1135 args->rmtblkcnt = args->rmtblkcnt2; 1136 + args->rmtvaluelen = args->rmtvaluelen2; 1157 1137 if (args->rmtblkno) { 1158 1138 error = xfs_attr_rmtval_remove(args); 1159 1139 if (error)
+11 -10
fs/xfs/xfs_attr_leaf.c
··· 1229 1229 name_rmt->valueblk = 0; 1230 1230 args->rmtblkno = 1; 1231 1231 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); 1232 + args->rmtvaluelen = args->valuelen; 1232 1233 } 1233 1234 xfs_trans_log_buf(args->trans, bp, 1234 1235 XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), ··· 2168 2167 if (!xfs_attr_namesp_match(args->flags, entry->flags)) 2169 2168 continue; 2170 2169 args->index = probe; 2171 - args->valuelen = be32_to_cpu(name_rmt->valuelen); 2170 + args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen); 2172 2171 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2173 2172 args->rmtblkcnt = xfs_attr3_rmt_blocks( 2174 2173 args->dp->i_mount, 2175 - args->valuelen); 2174 + args->rmtvaluelen); 2176 2175 return XFS_ERROR(EEXIST); 2177 2176 } 2178 2177 } ··· 2221 2220 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2222 2221 ASSERT(name_rmt->namelen == args->namelen); 2223 2222 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2224 - valuelen = be32_to_cpu(name_rmt->valuelen); 2223 + args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen); 2225 2224 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2226 2225 args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount, 2227 - valuelen); 2226 + args->rmtvaluelen); 2228 2227 if (args->flags & ATTR_KERNOVAL) { 2229 - args->valuelen = valuelen; 2228 + args->valuelen = args->rmtvaluelen; 2230 2229 return 0; 2231 2230 } 2232 - if (args->valuelen < valuelen) { 2233 - args->valuelen = valuelen; 2231 + if (args->valuelen < args->rmtvaluelen) { 2232 + args->valuelen = args->rmtvaluelen; 2234 2233 return XFS_ERROR(ERANGE); 2235 2234 } 2236 - args->valuelen = valuelen; 2235 + args->valuelen = args->rmtvaluelen; 2237 2236 } 2238 2237 return 0; 2239 2238 } ··· 2520 2519 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); 2521 2520 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); 2522 2521 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2523 - name_rmt->valuelen = cpu_to_be32(args->valuelen); 2522 + name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen); 2524 2523 xfs_trans_log_buf(args->trans, bp, 2525 2524 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2526 2525 } ··· 2678 2677 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2679 2678 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index); 2680 2679 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2681 - name_rmt->valuelen = cpu_to_be32(args->valuelen); 2680 + name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen); 2682 2681 xfs_trans_log_buf(args->trans, bp1, 2683 2682 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); 2684 2683 }
+1
fs/xfs/xfs_attr_list.c
··· 447 447 args.dp = context->dp; 448 448 args.whichfork = XFS_ATTR_FORK; 449 449 args.valuelen = valuelen; 450 + args.rmtvaluelen = valuelen; 450 451 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); 451 452 args.rmtblkno = be32_to_cpu(name_rmt->valueblk); 452 453 args.rmtblkcnt = xfs_attr3_rmt_blocks(
+5 -3
fs/xfs/xfs_attr_remote.c
··· 337 337 struct xfs_buf *bp; 338 338 xfs_dablk_t lblkno = args->rmtblkno; 339 339 __uint8_t *dst = args->value; 340 - int valuelen = args->valuelen; 340 + int valuelen; 341 341 int nmap; 342 342 int error; 343 343 int blkcnt = args->rmtblkcnt; ··· 347 347 trace_xfs_attr_rmtval_get(args); 348 348 349 349 ASSERT(!(args->flags & ATTR_KERNOVAL)); 350 + ASSERT(args->rmtvaluelen == args->valuelen); 350 351 352 + valuelen = args->rmtvaluelen; 351 353 while (valuelen > 0) { 352 354 nmap = ATTR_RMTVALUE_MAPSIZE; 353 355 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, ··· 417 415 * attributes have headers, we can't just do a straight byte to FSB 418 416 * conversion and have to take the header space into account. 419 417 */ 420 - blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); 418 + blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen); 421 419 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, 422 420 XFS_ATTR_FORK); 423 421 if (error) ··· 482 480 */ 483 481 lblkno = args->rmtblkno; 484 482 blkcnt = args->rmtblkcnt; 485 - valuelen = args->valuelen; 483 + valuelen = args->rmtvaluelen; 486 484 while (valuelen > 0) { 487 485 struct xfs_buf *bp; 488 486 xfs_daddr_t dblkno;
+2
fs/xfs/xfs_da_btree.h
··· 60 60 int index; /* index of attr of interest in blk */ 61 61 xfs_dablk_t rmtblkno; /* remote attr value starting blkno */ 62 62 int rmtblkcnt; /* remote attr value block count */ 63 + int rmtvaluelen; /* remote attr value length in bytes */ 63 64 xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */ 64 65 int index2; /* index of 2nd attr in blk */ 65 66 xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */ 66 67 int rmtblkcnt2; /* remote attr value block count */ 68 + int rmtvaluelen2; /* remote attr value length in bytes */ 67 69 int op_flags; /* operation flags */ 68 70 enum xfs_dacmp cmpresult; /* name compare result for lookups */ 69 71 } xfs_da_args_t;
+29 -26
fs/xfs/xfs_iops.c
··· 124 124 xfs_dentry_to_name(&teardown, dentry, 0); 125 125 126 126 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 127 - iput(inode); 128 127 } 129 128 130 129 STATIC int 131 - xfs_vn_mknod( 130 + xfs_generic_create( 132 131 struct inode *dir, 133 132 struct dentry *dentry, 134 133 umode_t mode, 135 - dev_t rdev) 134 + dev_t rdev, 135 + bool tmpfile) /* unnamed file */ 136 136 { 137 137 struct inode *inode; 138 138 struct xfs_inode *ip = NULL; ··· 156 156 if (error) 157 157 return error; 158 158 159 - xfs_dentry_to_name(&name, dentry, mode); 160 - error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); 159 + if (!tmpfile) { 160 + xfs_dentry_to_name(&name, dentry, mode); 161 + error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); 162 + } else { 163 + error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); 164 + } 161 165 if (unlikely(error)) 162 166 goto out_free_acl; 163 167 ··· 184 180 } 185 181 #endif 186 182 187 - d_instantiate(dentry, inode); 183 + if (tmpfile) 184 + d_tmpfile(dentry, inode); 185 + else 186 + d_instantiate(dentry, inode); 187 + 188 188 out_free_acl: 189 189 if (default_acl) 190 190 posix_acl_release(default_acl); ··· 197 189 return -error; 198 190 199 191 out_cleanup_inode: 200 - xfs_cleanup_inode(dir, inode, dentry); 192 + if (!tmpfile) 193 + xfs_cleanup_inode(dir, inode, dentry); 194 + iput(inode); 201 195 goto out_free_acl; 196 + } 197 + 198 + STATIC int 199 + xfs_vn_mknod( 200 + struct inode *dir, 201 + struct dentry *dentry, 202 + umode_t mode, 203 + dev_t rdev) 204 + { 205 + return xfs_generic_create(dir, dentry, mode, rdev, false); 202 206 } 203 207 204 208 STATIC int ··· 373 353 374 354 out_cleanup_inode: 375 355 xfs_cleanup_inode(dir, inode, dentry); 356 + iput(inode); 376 357 out: 377 358 return -error; 378 359 } ··· 1074 1053 struct dentry *dentry, 1075 1054 umode_t mode) 1076 1055 { 1077 - int error; 1078 - struct xfs_inode *ip; 1079 - struct inode *inode; 1080 - 1081 - error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); 1082 - if (unlikely(error)) 1083 - return -error; 1084 - 1085 - inode = VFS_I(ip); 1086 - 1087 - error = xfs_init_security(inode, dir, &dentry->d_name); 1088 - if (unlikely(error)) { 1089 - iput(inode); 1090 - return -error; 1091 - } 1092 - 1093 - d_tmpfile(dentry, inode); 1094 - 1095 - return 0; 1056 + return xfs_generic_create(dir, dentry, mode, 0, true); 1096 1057 } 1097 1058 1098 1059 static const struct inode_operations xfs_inode_operations = {
+6 -4
fs/xfs/xfs_log.c
··· 616 616 int error = 0; 617 617 int min_logfsbs; 618 618 619 - if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 620 - xfs_notice(mp, "Mounting Filesystem"); 621 - else { 619 + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 620 + xfs_notice(mp, "Mounting V%d Filesystem", 621 + XFS_SB_VERSION_NUM(&mp->m_sb)); 622 + } else { 622 623 xfs_notice(mp, 623 - "Mounting filesystem in no-recovery mode. Filesystem will be inconsistent."); 624 + "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 625 + XFS_SB_VERSION_NUM(&mp->m_sb)); 624 626 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 625 627 } 626 628
-2
fs/xfs/xfs_mount.c
··· 743 743 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 744 744 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 745 745 mp->m_inode_cluster_size = new_size; 746 - xfs_info(mp, "Using inode cluster size of %d bytes", 747 - mp->m_inode_cluster_size); 748 746 } 749 747 750 748 /*
-4
fs/xfs/xfs_sb.c
··· 201 201 * write validation, we don't need to check feature masks. 202 202 */ 203 203 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { 204 - xfs_alert(mp, 205 - "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n" 206 - "Use of these features in this kernel is at your own risk!"); 207 - 208 204 if (xfs_sb_has_compat_feature(sbp, 209 205 XFS_SB_FEAT_COMPAT_UNKNOWN)) { 210 206 xfs_warn(mp,
+2 -1
include/acpi/acpi_bus.h
··· 261 261 u32 inrush_current:1; /* Serialize Dx->D0 */ 262 262 u32 power_removed:1; /* Optimize Dx->D0 */ 263 263 u32 ignore_parent:1; /* Power is independent of parent power state */ 264 - u32 reserved:27; 264 + u32 dsw_present:1; /* _DSW present? */ 265 + u32 reserved:26; 265 266 }; 266 267 267 268 struct acpi_device_power_state {
+3
include/asm-generic/fixmap.h
··· 93 93 #define set_fixmap_io(idx, phys) \ 94 94 __set_fixmap(idx, phys, FIXMAP_PAGE_IO) 95 95 96 + #define set_fixmap_offset_io(idx, phys) \ 97 + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO) 98 + 96 99 #endif /* __ASSEMBLY__ */ 97 100 #endif /* __ASM_GENERIC_FIXMAP_H */
+1 -1
include/asm-generic/word-at-a-time.h
··· 50 50 } 51 51 52 52 #ifndef zero_bytemask 53 - #define zero_bytemask(mask) (~0ul << __fls(mask) << 1) 53 + #define zero_bytemask(mask) (~1ul << __fls(mask)) 54 54 #endif 55 55 56 56 #endif /* _ASM_WORD_AT_A_TIME_H */
+16
include/drm/drm_pciids.h
··· 637 637 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 638 638 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 639 639 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 640 + {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 641 + {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 642 + {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 643 + {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 644 + {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 645 + {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 646 + {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 647 + {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 648 + {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 649 + {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 650 + {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 651 + {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 652 + {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 653 + {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 654 + {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 655 + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 640 656 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 641 657 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 642 658 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+2 -2
include/drm/i915_pciids.h
··· 191 191 INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ 192 192 INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ 193 193 INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ 194 - INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \ 195 - INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \ 194 + INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \ 195 + INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \ 196 196 INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ 197 197 INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ 198 198 INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
+6
include/linux/acpi.h
··· 554 554 int acpi_dev_suspend_late(struct device *dev); 555 555 int acpi_dev_resume_early(struct device *dev); 556 556 int acpi_subsys_prepare(struct device *dev); 557 + void acpi_subsys_complete(struct device *dev); 557 558 int acpi_subsys_suspend_late(struct device *dev); 558 559 int acpi_subsys_resume_early(struct device *dev); 560 + int acpi_subsys_suspend(struct device *dev); 561 + int acpi_subsys_freeze(struct device *dev); 559 562 #else 560 563 static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } 561 564 static inline int acpi_dev_resume_early(struct device *dev) { return 0; } 562 565 static inline int acpi_subsys_prepare(struct device *dev) { return 0; } 566 + static inline void acpi_subsys_complete(struct device *dev) {} 563 567 static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } 564 568 static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } 569 + static inline int acpi_subsys_suspend(struct device *dev) { return 0; } 570 + static inline int acpi_subsys_freeze(struct device *dev) { return 0; } 565 571 #endif 566 572 567 573 #if defined(CONFIG_ACPI) && defined(CONFIG_PM)
+2
include/linux/dcache.h
··· 221 221 #define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */ 222 222 #define DCACHE_FILE_TYPE 0x00400000 /* Other file type */ 223 223 224 + #define DCACHE_MAY_FREE 0x00800000 225 + 224 226 extern seqlock_t rename_lock; 225 227 226 228 static inline int dname_external(const struct dentry *dentry)
+2
include/linux/ftrace.h
··· 535 535 extern int ftrace_arch_read_dyn_info(char *buf, int size); 536 536 537 537 extern int skip_trace(unsigned long ip); 538 + extern void ftrace_module_init(struct module *mod); 538 539 539 540 extern void ftrace_disable_daemon(void); 540 541 extern void ftrace_enable_daemon(void); ··· 545 544 static inline void ftrace_disable_daemon(void) { } 546 545 static inline void ftrace_enable_daemon(void) { } 547 546 static inline void ftrace_release_mod(struct module *mod) {} 547 + static inline void ftrace_module_init(struct module *mod) {} 548 548 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 549 549 { 550 550 return -EINVAL;
+10
include/linux/hugetlb.h
··· 412 412 return &mm->page_table_lock; 413 413 } 414 414 415 + static inline bool hugepages_supported(void) 416 + { 417 + /* 418 + * Some platform decide whether they support huge pages at boot 419 + * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 420 + * there is no such support 421 + */ 422 + return HPAGE_SHIFT != 0; 423 + } 424 + 415 425 #else /* CONFIG_HUGETLB_PAGE */ 416 426 struct hstate {}; 417 427 #define alloc_huge_page_node(h, nid) NULL
+2 -2
include/linux/interrupt.h
··· 210 210 /** 211 211 * irq_set_affinity - Set the irq affinity of a given irq 212 212 * @irq: Interrupt to set affinity 213 - * @mask: cpumask 213 + * @cpumask: cpumask 214 214 * 215 215 * Fails if cpumask does not contain an online CPU 216 216 */ ··· 223 223 /** 224 224 * irq_force_affinity - Force the irq affinity of a given irq 225 225 * @irq: Interrupt to set affinity 226 - * @mask: cpumask 226 + * @cpumask: cpumask 227 227 * 228 228 * Same as irq_set_affinity, but without checking the mask against 229 229 * online cpus.
+2
include/linux/irq.h
··· 603 603 return d ? irqd_get_trigger_type(d) : 0; 604 604 } 605 605 606 + unsigned int arch_dynirq_lower_bound(unsigned int from); 607 + 606 608 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 607 609 struct module *owner); 608 610
+2 -2
include/linux/linkage.h
··· 12 12 #endif 13 13 14 14 #ifdef __cplusplus 15 - #define CPP_ASMLINKAGE extern "C" __visible 15 + #define CPP_ASMLINKAGE extern "C" 16 16 #else 17 - #define CPP_ASMLINKAGE __visible 17 + #define CPP_ASMLINKAGE 18 18 #endif 19 19 20 20 #ifndef asmlinkage
-1
include/linux/mfd/rtsx_common.h
··· 45 45 struct rtsx_slot { 46 46 struct platform_device *p_dev; 47 47 void (*card_event)(struct platform_device *p_dev); 48 - void (*done_transfer)(struct platform_device *p_dev); 49 48 }; 50 49 51 50 #endif
-6
include/linux/mfd/rtsx_pci.h
··· 943 943 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout); 944 944 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, 945 945 int num_sg, bool read, int timeout); 946 - int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 947 - int num_sg, bool read); 948 - int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 949 - int num_sg, bool read); 950 - int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 951 - int sg_count, bool read); 952 946 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); 953 947 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); 954 948 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
+2
include/linux/mm.h
··· 370 370 } 371 371 #endif 372 372 373 + extern void kvfree(const void *addr); 374 + 373 375 static inline void compound_lock(struct page *page) 374 376 { 375 377 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+7
include/linux/netlink.h
··· 169 169 extern int netlink_add_tap(struct netlink_tap *nt); 170 170 extern int netlink_remove_tap(struct netlink_tap *nt); 171 171 172 + bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, 173 + struct user_namespace *ns, int cap); 174 + bool netlink_ns_capable(const struct sk_buff *skb, 175 + struct user_namespace *ns, int cap); 176 + bool netlink_capable(const struct sk_buff *skb, int cap); 177 + bool netlink_net_capable(const struct sk_buff *skb, int cap); 178 + 172 179 #endif /* __LINUX_NETLINK_H */
+5
include/linux/of_irq.h
··· 44 44 45 45 #ifdef CONFIG_OF_IRQ 46 46 extern int of_irq_count(struct device_node *dev); 47 + extern int of_irq_get(struct device_node *dev, int index); 47 48 #else 48 49 static inline int of_irq_count(struct device_node *dev) 50 + { 51 + return 0; 52 + } 53 + static inline int of_irq_get(struct device_node *dev, int index) 49 54 { 50 55 return 0; 51 56 }
+9
include/linux/slub_def.h
··· 101 101 struct kmem_cache_node *node[MAX_NUMNODES]; 102 102 }; 103 103 104 + #ifdef CONFIG_SYSFS 105 + #define SLAB_SUPPORTS_SYSFS 106 + void sysfs_slab_remove(struct kmem_cache *); 107 + #else 108 + static inline void sysfs_slab_remove(struct kmem_cache *s) 109 + { 110 + } 111 + #endif 112 + 104 113 #endif /* _LINUX_SLUB_DEF_H */
+1 -1
include/linux/sock_diag.h
··· 23 23 void sock_diag_save_cookie(void *sk, __u32 *cookie); 24 24 25 25 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); 26 - int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, 26 + int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, 27 27 struct sk_buff *skb, int attrtype); 28 28 29 29 #endif
+7
include/linux/suspend.h
··· 187 187 void (*recover)(void); 188 188 }; 189 189 190 + struct platform_freeze_ops { 191 + int (*begin)(void); 192 + void (*end)(void); 193 + }; 194 + 190 195 #ifdef CONFIG_SUSPEND 191 196 /** 192 197 * suspend_set_ops - set platform dependent suspend operations ··· 199 194 */ 200 195 extern void suspend_set_ops(const struct platform_suspend_ops *ops); 201 196 extern int suspend_valid_only_mem(suspend_state_t state); 197 + extern void freeze_set_ops(const struct platform_freeze_ops *ops); 202 198 extern void freeze_wake(void); 203 199 204 200 /** ··· 226 220 227 221 static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 228 222 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 223 + static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} 229 224 static inline void freeze_wake(void) {} 230 225 #endif /* !CONFIG_SUSPEND */ 231 226
-1
include/linux/tty.h
··· 61 61 struct tty_buffer *head; /* Queue head */ 62 62 struct work_struct work; 63 63 struct mutex lock; 64 - spinlock_t flush_lock; 65 64 atomic_t priority; 66 65 struct tty_buffer sentinel; 67 66 struct llist_head free; /* Free queue head */
+5 -1
include/net/af_vsock.h
··· 155 155 156 156 /**** CORE ****/ 157 157 158 - int vsock_core_init(const struct vsock_transport *t); 158 + int __vsock_core_init(const struct vsock_transport *t, struct module *owner); 159 + static inline int vsock_core_init(const struct vsock_transport *t) 160 + { 161 + return __vsock_core_init(t, THIS_MODULE); 162 + } 159 163 void vsock_core_exit(void); 160 164 161 165 /**** UTILS ****/
+5
include/net/sock.h
··· 2255 2255 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, 2256 2256 int type); 2257 2257 2258 + bool sk_ns_capable(const struct sock *sk, 2259 + struct user_namespace *user_ns, int cap); 2260 + bool sk_capable(const struct sock *sk, int cap); 2261 + bool sk_net_capable(const struct sock *sk, int cap); 2262 + 2258 2263 /* 2259 2264 * Enable debug/info messages 2260 2265 */
+2 -2
include/trace/events/module.h
··· 25 25 { (1UL << TAINT_OOT_MODULE), "O" }, \ 26 26 { (1UL << TAINT_FORCED_MODULE), "F" }, \ 27 27 { (1UL << TAINT_CRAP), "C" }, \ 28 - { (1UL << TAINT_UNSIGNED_MODULE), "X" }) 28 + { (1UL << TAINT_UNSIGNED_MODULE), "E" }) 29 29 30 30 TRACE_EVENT(module_load, 31 31 ··· 80 80 81 81 TP_fast_assign( 82 82 __entry->ip = ip; 83 - __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); 83 + __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs); 84 84 __assign_str(name, mod->name); 85 85 ), 86 86
+20 -2
include/uapi/linux/fuse.h
··· 96 96 * 97 97 * 7.23 98 98 * - add FUSE_WRITEBACK_CACHE 99 + * - add time_gran to fuse_init_out 100 + * - add reserved space to fuse_init_out 101 + * - add FATTR_CTIME 102 + * - add ctime and ctimensec to fuse_setattr_in 103 + * - add FUSE_RENAME2 request 99 104 */ 100 105 101 106 #ifndef _LINUX_FUSE_H ··· 196 191 #define FATTR_ATIME_NOW (1 << 7) 197 192 #define FATTR_MTIME_NOW (1 << 8) 198 193 #define FATTR_LOCKOWNER (1 << 9) 194 + #define FATTR_CTIME (1 << 10) 199 195 200 196 /** 201 197 * Flags returned by the OPEN request ··· 354 348 FUSE_BATCH_FORGET = 42, 355 349 FUSE_FALLOCATE = 43, 356 350 FUSE_READDIRPLUS = 44, 351 + FUSE_RENAME2 = 45, 357 352 358 353 /* CUSE specific operations */ 359 354 CUSE_INIT = 4096, ··· 433 426 uint64_t newdir; 434 427 }; 435 428 429 + struct fuse_rename2_in { 430 + uint64_t newdir; 431 + uint32_t flags; 432 + uint32_t padding; 433 + }; 434 + 436 435 struct fuse_link_in { 437 436 uint64_t oldnodeid; 438 437 }; ··· 451 438 uint64_t lock_owner; 452 439 uint64_t atime; 453 440 uint64_t mtime; 454 - uint64_t unused2; 441 + uint64_t ctime; 455 442 uint32_t atimensec; 456 443 uint32_t mtimensec; 457 - uint32_t unused3; 444 + uint32_t ctimensec; 458 445 uint32_t mode; 459 446 uint32_t unused4; 460 447 uint32_t uid; ··· 572 559 uint32_t flags; 573 560 }; 574 561 562 + #define FUSE_COMPAT_INIT_OUT_SIZE 8 563 + #define FUSE_COMPAT_22_INIT_OUT_SIZE 24 564 + 575 565 struct fuse_init_out { 576 566 uint32_t major; 577 567 uint32_t minor; ··· 583 567 uint16_t max_background; 584 568 uint16_t congestion_threshold; 585 569 uint32_t max_write; 570 + uint32_t time_gran; 571 + uint32_t unused[9]; 586 572 }; 587 573 588 574 #define CUSE_INIT_INFO_MAX 4096
+1 -1
init/main.c
··· 476 476 vmalloc_init(); 477 477 } 478 478 479 - asmlinkage void __init start_kernel(void) 479 + asmlinkage __visible void __init start_kernel(void) 480 480 { 481 481 char * command_line; 482 482 extern const struct kernel_param __start___param[], __stop___param[];
+2 -2
kernel/audit.c
··· 643 643 if ((task_active_pid_ns(current) != &init_pid_ns)) 644 644 return -EPERM; 645 645 646 - if (!capable(CAP_AUDIT_CONTROL)) 646 + if (!netlink_capable(skb, CAP_AUDIT_CONTROL)) 647 647 err = -EPERM; 648 648 break; 649 649 case AUDIT_USER: 650 650 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 651 651 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 652 - if (!capable(CAP_AUDIT_WRITE)) 652 + if (!netlink_capable(skb, CAP_AUDIT_WRITE)) 653 653 err = -EPERM; 654 654 break; 655 655 default: /* bad msg */
+1 -1
kernel/context_tracking.c
··· 120 120 * instead of preempt_schedule() to exit user context if needed before 121 121 * calling the scheduler. 122 122 */ 123 - asmlinkage void __sched notrace preempt_schedule_context(void) 123 + asmlinkage __visible void __sched notrace preempt_schedule_context(void) 124 124 { 125 125 enum ctx_state prev_ctx; 126 126
+22
kernel/hrtimer.c
··· 234 234 goto again; 235 235 } 236 236 timer->base = new_base; 237 + } else { 238 + if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { 239 + cpu = this_cpu; 240 + goto again; 241 + } 237 242 } 238 243 return new_base; 239 244 } ··· 573 568 return; 574 569 575 570 cpu_base->expires_next.tv64 = expires_next.tv64; 571 + 572 + /* 573 + * If a hang was detected in the last timer interrupt then we 574 + * leave the hang delay active in the hardware. We want the 575 + * system to make progress. That also prevents the following 576 + * scenario: 577 + * T1 expires 50ms from now 578 + * T2 expires 5s from now 579 + * 580 + * T1 is removed, so this code is called and would reprogram 581 + * the hardware to 5s from now. Any hrtimer_start after that 582 + * will not reprogram the hardware due to hang_detected being 583 + * set. So we'd effectivly block all timers until the T2 event 584 + * fires. 585 + */ 586 + if (cpu_base->hang_detected) 587 + return; 576 588 577 589 if (cpu_base->expires_next.tv64 != KTIME_MAX) 578 590 tick_program_event(cpu_base->expires_next, 1);
+7
kernel/irq/irqdesc.c
··· 363 363 if (from > irq) 364 364 return -EINVAL; 365 365 from = irq; 366 + } else { 367 + /* 368 + * For interrupts which are freely allocated the 369 + * architecture can force a lower bound to the @from 370 + * argument. x86 uses this to exclude the GSI space. 371 + */ 372 + from = arch_dynirq_lower_bound(from); 366 373 } 367 374 368 375 mutex_lock(&sparse_irq_lock);
+1 -1
kernel/locking/lockdep.c
··· 4188 4188 } 4189 4189 EXPORT_SYMBOL_GPL(debug_show_held_locks); 4190 4190 4191 - asmlinkage void lockdep_sys_exit(void) 4191 + asmlinkage __visible void lockdep_sys_exit(void) 4192 4192 { 4193 4193 struct task_struct *curr = current; 4194 4194
+3 -3
kernel/module.c
··· 815 815 return -EFAULT; 816 816 name[MODULE_NAME_LEN-1] = '\0'; 817 817 818 - if (!(flags & O_NONBLOCK)) 819 - pr_warn("waiting module removal not supported: please upgrade\n"); 820 - 821 818 if (mutex_lock_interruptible(&module_mutex) != 0) 822 819 return -EINTR; 823 820 ··· 3267 3270 } 3268 3271 3269 3272 dynamic_debug_setup(info->debug, info->num_debug); 3273 + 3274 + /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3275 + ftrace_module_init(mod); 3270 3276 3271 3277 /* Finally it's fully formed, ready to start executing. */ 3272 3278 err = complete_formation(mod, info);
+1 -1
kernel/power/snapshot.c
··· 1586 1586 return -ENOMEM; 1587 1587 } 1588 1588 1589 - asmlinkage int swsusp_save(void) 1589 + asmlinkage __visible int swsusp_save(void) 1590 1590 { 1591 1591 unsigned int nr_pages, nr_highmem; 1592 1592
+15
kernel/power/suspend.c
··· 38 38 }; 39 39 40 40 static const struct platform_suspend_ops *suspend_ops; 41 + static const struct platform_freeze_ops *freeze_ops; 41 42 42 43 static bool need_suspend_ops(suspend_state_t state) 43 44 { ··· 47 46 48 47 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 49 48 static bool suspend_freeze_wake; 49 + 50 + void freeze_set_ops(const struct platform_freeze_ops *ops) 51 + { 52 + lock_system_sleep(); 53 + freeze_ops = ops; 54 + unlock_system_sleep(); 55 + } 50 56 51 57 static void freeze_begin(void) 52 58 { ··· 299 291 error = suspend_ops->begin(state); 300 292 if (error) 301 293 goto Close; 294 + } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { 295 + error = freeze_ops->begin(); 296 + if (error) 297 + goto Close; 302 298 } 303 299 suspend_console(); 304 300 suspend_test_start(); ··· 328 316 Close: 329 317 if (need_suspend_ops(state) && suspend_ops->end) 330 318 suspend_ops->end(); 319 + else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) 320 + freeze_ops->end(); 321 + 331 322 trace_machine_suspend(PWR_EVENT_EXIT); 332 323 return error; 333 324
+2 -2
kernel/printk/printk.c
··· 1674 1674 * 1675 1675 * See the vsnprintf() documentation for format string extensions over C99. 1676 1676 */ 1677 - asmlinkage int printk(const char *fmt, ...) 1677 + asmlinkage __visible int printk(const char *fmt, ...) 1678 1678 { 1679 1679 va_list args; 1680 1680 int r; ··· 1737 1737 } 1738 1738 } 1739 1739 1740 - asmlinkage void early_printk(const char *fmt, ...) 1740 + asmlinkage __visible void early_printk(const char *fmt, ...) 1741 1741 { 1742 1742 va_list ap; 1743 1743
+5 -5
kernel/sched/core.c
··· 2192 2192 * schedule_tail - first thing a freshly forked thread must call. 2193 2193 * @prev: the thread we just switched away from. 2194 2194 */ 2195 - asmlinkage void schedule_tail(struct task_struct *prev) 2195 + asmlinkage __visible void schedule_tail(struct task_struct *prev) 2196 2196 __releases(rq->lock) 2197 2197 { 2198 2198 struct rq *rq = this_rq(); ··· 2741 2741 blk_schedule_flush_plug(tsk); 2742 2742 } 2743 2743 2744 - asmlinkage void __sched schedule(void) 2744 + asmlinkage __visible void __sched schedule(void) 2745 2745 { 2746 2746 struct task_struct *tsk = current; 2747 2747 ··· 2751 2751 EXPORT_SYMBOL(schedule); 2752 2752 2753 2753 #ifdef CONFIG_CONTEXT_TRACKING 2754 - asmlinkage void __sched schedule_user(void) 2754 + asmlinkage __visible void __sched schedule_user(void) 2755 2755 { 2756 2756 /* 2757 2757 * If we come here after a random call to set_need_resched(), ··· 2783 2783 * off of preempt_enable. Kernel preemptions off return from interrupt 2784 2784 * occur there and call schedule directly. 2785 2785 */ 2786 - asmlinkage void __sched notrace preempt_schedule(void) 2786 + asmlinkage __visible void __sched notrace preempt_schedule(void) 2787 2787 { 2788 2788 /* 2789 2789 * If there is a non-zero preempt_count or interrupts are disabled, ··· 2813 2813 * Note, that this is called and return with irqs disabled. This will 2814 2814 * protect us against recursive calling from irq. 2815 2815 */ 2816 - asmlinkage void __sched preempt_schedule_irq(void) 2816 + asmlinkage __visible void __sched preempt_schedule_irq(void) 2817 2817 { 2818 2818 enum ctx_state prev_state; 2819 2819
+7 -2
kernel/softirq.c
··· 223 223 static inline void lockdep_softirq_end(bool in_hardirq) { } 224 224 #endif 225 225 226 - asmlinkage void __do_softirq(void) 226 + asmlinkage __visible void __do_softirq(void) 227 227 { 228 228 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 229 229 unsigned long old_flags = current->flags; ··· 299 299 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 300 300 } 301 301 302 - asmlinkage void do_softirq(void) 302 + asmlinkage __visible void do_softirq(void) 303 303 { 304 304 __u32 pending; 305 305 unsigned long flags; ··· 778 778 int __init __weak arch_early_irq_init(void) 779 779 { 780 780 return 0; 781 + } 782 + 783 + unsigned int __weak arch_dynirq_lower_bound(unsigned int from) 784 + { 785 + return from; 781 786 }
+1 -1
kernel/timer.c
··· 838 838 839 839 bit = find_last_bit(&mask, BITS_PER_LONG); 840 840 841 - mask = (1 << bit) - 1; 841 + mask = (1UL << bit) - 1; 842 842 843 843 expires_limit = expires_limit & ~(mask); 844 844
+4 -23
kernel/trace/ftrace.c
··· 4330 4330 ftrace_process_locs(mod, start, end); 4331 4331 } 4332 4332 4333 - static int ftrace_module_notify_enter(struct notifier_block *self, 4334 - unsigned long val, void *data) 4333 + void ftrace_module_init(struct module *mod) 4335 4334 { 4336 - struct module *mod = data; 4337 - 4338 - if (val == MODULE_STATE_COMING) 4339 - ftrace_init_module(mod, mod->ftrace_callsites, 4340 - mod->ftrace_callsites + 4341 - mod->num_ftrace_callsites); 4342 - return 0; 4335 + ftrace_init_module(mod, mod->ftrace_callsites, 4336 + mod->ftrace_callsites + 4337 + mod->num_ftrace_callsites); 4343 4338 } 4344 4339 4345 4340 static int ftrace_module_notify_exit(struct notifier_block *self, ··· 4348 4353 return 0; 4349 4354 } 4350 4355 #else 4351 - static int ftrace_module_notify_enter(struct notifier_block *self, 4352 - unsigned long val, void *data) 4353 - { 4354 - return 0; 4355 - } 4356 4356 static int ftrace_module_notify_exit(struct notifier_block *self, 4357 4357 unsigned long val, void *data) 4358 4358 { 4359 4359 return 0; 4360 4360 } 4361 4361 #endif /* CONFIG_MODULES */ 4362 - 4363 - struct notifier_block ftrace_module_enter_nb = { 4364 - .notifier_call = ftrace_module_notify_enter, 4365 - .priority = INT_MAX, /* Run before anything that can use kprobes */ 4366 - }; 4367 4362 4368 4363 struct notifier_block ftrace_module_exit_nb = { 4369 4364 .notifier_call = ftrace_module_notify_exit, ··· 4387 4402 ret = ftrace_process_locs(NULL, 4388 4403 __start_mcount_loc, 4389 4404 __stop_mcount_loc); 4390 - 4391 - ret = register_module_notifier(&ftrace_module_enter_nb); 4392 - if (ret) 4393 - pr_warning("Failed to register trace ftrace module enter notifier\n"); 4394 4405 4395 4406 ret = register_module_notifier(&ftrace_module_exit_nb); 4396 4407 if (ret)
+1 -1
kernel/trace/trace_events_trigger.c
··· 77 77 data->ops->func(data); 78 78 continue; 79 79 } 80 - filter = rcu_dereference(data->filter); 80 + filter = rcu_dereference_sched(data->filter); 81 81 if (filter && !filter_match_preds(filter, rec)) 82 82 continue; 83 83 if (data->cmd_ops->post_trigger) {
+2 -2
kernel/tracepoint.c
··· 188 188 WARN_ON_ONCE(1); 189 189 return PTR_ERR(old); 190 190 } 191 - release_probes(old); 192 191 193 192 /* 194 193 * rcu_assign_pointer has a smp_wmb() which makes sure that the new ··· 199 200 rcu_assign_pointer(tp->funcs, tp_funcs); 200 201 if (!static_key_enabled(&tp->key)) 201 202 static_key_slow_inc(&tp->key); 203 + release_probes(old); 202 204 return 0; 203 205 } 204 206 ··· 221 221 WARN_ON_ONCE(1); 222 222 return PTR_ERR(old); 223 223 } 224 - release_probes(old); 225 224 226 225 if (!tp_funcs) { 227 226 /* Removed last function */ ··· 231 232 static_key_slow_dec(&tp->key); 232 233 } 233 234 rcu_assign_pointer(tp->funcs, tp_funcs); 235 + release_probes(old); 234 236 return 0; 235 237 } 236 238
+2 -2
lib/dump_stack.c
··· 23 23 #ifdef CONFIG_SMP 24 24 static atomic_t dump_lock = ATOMIC_INIT(-1); 25 25 26 - asmlinkage void dump_stack(void) 26 + asmlinkage __visible void dump_stack(void) 27 27 { 28 28 int was_locked; 29 29 int old; ··· 55 55 preempt_enable(); 56 56 } 57 57 #else 58 - asmlinkage void dump_stack(void) 58 + asmlinkage __visible void dump_stack(void) 59 59 { 60 60 __dump_stack(); 61 61 }
+12 -10
mm/compaction.c
··· 671 671 struct compact_control *cc) 672 672 { 673 673 struct page *page; 674 - unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn; 674 + unsigned long high_pfn, low_pfn, pfn, z_end_pfn; 675 675 int nr_freepages = cc->nr_freepages; 676 676 struct list_head *freelist = &cc->freepages; 677 677 678 678 /* 679 679 * Initialise the free scanner. The starting point is where we last 680 - * scanned from (or the end of the zone if starting). The low point 681 - * is the end of the pageblock the migration scanner is using. 680 + * successfully isolated from, zone-cached value, or the end of the 681 + * zone when isolating for the first time. We need this aligned to 682 + * the pageblock boundary, because we do pfn -= pageblock_nr_pages 683 + * in the for loop. 684 + * The low boundary is the end of the pageblock the migration scanner 685 + * is using. 682 686 */ 683 - pfn = cc->free_pfn; 687 + pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 684 688 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 685 689 686 690 /* ··· 704 700 for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 705 701 pfn -= pageblock_nr_pages) { 706 702 unsigned long isolated; 703 + unsigned long end_pfn; 707 704 708 705 /* 709 706 * This can iterate a massively long zone without finding any ··· 739 734 isolated = 0; 740 735 741 736 /* 742 - * As pfn may not start aligned, pfn+pageblock_nr_page 743 - * may cross a MAX_ORDER_NR_PAGES boundary and miss 744 - * a pfn_valid check. Ensure isolate_freepages_block() 745 - * only scans within a pageblock 737 + * Take care when isolating in last pageblock of a zone which 738 + * ends in the middle of a pageblock. 746 739 */ 747 - end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 748 - end_pfn = min(end_pfn, z_end_pfn); 740 + end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn); 749 741 isolated = isolate_freepages_block(cc, pfn, end_pfn, 750 742 freelist, false); 751 743 nr_freepages += isolated;
+28 -21
mm/filemap.c
··· 906 906 * Looks up the page cache slot at @mapping & @offset. If there is a 907 907 * page cache page, it is returned with an increased refcount. 908 908 * 909 - * If the slot holds a shadow entry of a previously evicted page, it 910 - * is returned. 909 + * If the slot holds a shadow entry of a previously evicted page, or a 910 + * swap entry from shmem/tmpfs, it is returned. 911 911 * 912 912 * Otherwise, %NULL is returned. 913 913 */ ··· 928 928 if (radix_tree_deref_retry(page)) 929 929 goto repeat; 930 930 /* 931 - * Otherwise, shmem/tmpfs must be storing a swap entry 932 - * here as an exceptional entry: so return it without 933 - * attempting to raise page count. 931 + * A shadow entry of a recently evicted page, 932 + * or a swap entry from shmem/tmpfs. Return 933 + * it without attempting to raise page count. 934 934 */ 935 935 goto out; 936 936 } ··· 983 983 * page cache page, it is returned locked and with an increased 984 984 * refcount. 985 985 * 986 - * If the slot holds a shadow entry of a previously evicted page, it 987 - * is returned. 986 + * If the slot holds a shadow entry of a previously evicted page, or a 987 + * swap entry from shmem/tmpfs, it is returned. 988 988 * 989 989 * Otherwise, %NULL is returned. 990 990 * ··· 1099 1099 * with ascending indexes. There may be holes in the indices due to 1100 1100 * not-present pages. 1101 1101 * 1102 - * Any shadow entries of evicted pages are included in the returned 1103 - * array. 1102 + * Any shadow entries of evicted pages, or swap entries from 1103 + * shmem/tmpfs, are included in the returned array. 1104 1104 * 1105 1105 * find_get_entries() returns the number of pages and shadow entries 1106 1106 * which were found. ··· 1128 1128 if (radix_tree_deref_retry(page)) 1129 1129 goto restart; 1130 1130 /* 1131 - * Otherwise, we must be storing a swap entry 1132 - * here as an exceptional entry: so return it 1133 - * without attempting to raise page count. 1131 + * A shadow entry of a recently evicted page, 1132 + * or a swap entry from shmem/tmpfs. Return 1133 + * it without attempting to raise page count. 1134 1134 */ 1135 1135 goto export; 1136 1136 } ··· 1198 1198 goto restart; 1199 1199 } 1200 1200 /* 1201 - * Otherwise, shmem/tmpfs must be storing a swap entry 1202 - * here as an exceptional entry: so skip over it - 1203 - * we only reach this from invalidate_mapping_pages(). 1201 + * A shadow entry of a recently evicted page, 1202 + * or a swap entry from shmem/tmpfs. Skip 1203 + * over it. 1204 1204 */ 1205 1205 continue; 1206 1206 } ··· 1265 1265 goto restart; 1266 1266 } 1267 1267 /* 1268 - * Otherwise, shmem/tmpfs must be storing a swap entry 1269 - * here as an exceptional entry: so stop looking for 1270 - * contiguous pages. 1268 + * A shadow entry of a recently evicted page, 1269 + * or a swap entry from shmem/tmpfs. Stop 1270 + * looking for contiguous pages. 1271 1271 */ 1272 1272 break; 1273 1273 } ··· 1341 1341 goto restart; 1342 1342 } 1343 1343 /* 1344 - * This function is never used on a shmem/tmpfs 1345 - * mapping, so a swap entry won't be found here. 1344 + * A shadow entry of a recently evicted page. 1345 + * 1346 + * Those entries should never be tagged, but 1347 + * this tree walk is lockless and the tags are 1348 + * looked up in bulk, one radix tree node at a 1349 + * time, so there is a sizable window for page 1350 + * reclaim to evict a page we saw tagged. 1351 + * 1352 + * Skip over it. 1346 1353 */ 1347 - BUG(); 1354 + continue; 1348 1355 } 1349 1356 1350 1357 if (!page_cache_get_speculative(page))
+14 -5
mm/hugetlb.c
··· 1981 1981 { 1982 1982 int i; 1983 1983 1984 - /* Some platform decide whether they support huge pages at boot 1985 - * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 1986 - * there is no such support 1987 - */ 1988 - if (HPAGE_SHIFT == 0) 1984 + if (!hugepages_supported()) 1989 1985 return 0; 1990 1986 1991 1987 if (!size_to_hstate(default_hstate_size)) { ··· 2108 2112 unsigned long tmp; 2109 2113 int ret; 2110 2114 2115 + if (!hugepages_supported()) 2116 + return -ENOTSUPP; 2117 + 2111 2118 tmp = h->max_huge_pages; 2112 2119 2113 2120 if (write && h->order >= MAX_ORDER) ··· 2164 2165 unsigned long tmp; 2165 2166 int ret; 2166 2167 2168 + if (!hugepages_supported()) 2169 + return -ENOTSUPP; 2170 + 2167 2171 tmp = h->nr_overcommit_huge_pages; 2168 2172 2169 2173 if (write && h->order >= MAX_ORDER) ··· 2192 2190 void hugetlb_report_meminfo(struct seq_file *m) 2193 2191 { 2194 2192 struct hstate *h = &default_hstate; 2193 + if (!hugepages_supported()) 2194 + return; 2195 2195 seq_printf(m, 2196 2196 "HugePages_Total: %5lu\n" 2197 2197 "HugePages_Free: %5lu\n" ··· 2210 2206 int hugetlb_report_node_meminfo(int nid, char *buf) 2211 2207 { 2212 2208 struct hstate *h = &default_hstate; 2209 + if (!hugepages_supported()) 2210 + return 0; 2213 2211 return sprintf(buf, 2214 2212 "Node %d HugePages_Total: %5u\n" 2215 2213 "Node %d HugePages_Free: %5u\n" ··· 2225 2219 { 2226 2220 struct hstate *h; 2227 2221 int nid; 2222 + 2223 + if (!hugepages_supported()) 2224 + return; 2228 2225 2229 2226 for_each_node_state(nid, N_MEMORY) 2230 2227 for_each_hstate(h)
+12 -8
mm/memcontrol.c
··· 6686 6686 pgoff = pte_to_pgoff(ptent); 6687 6687 6688 6688 /* page is moved even if it's not RSS of this task(page-faulted). */ 6689 - page = find_get_page(mapping, pgoff); 6690 - 6691 6689 #ifdef CONFIG_SWAP 6692 6690 /* shmem/tmpfs may report page out on swap: account for that too. */ 6693 - if (radix_tree_exceptional_entry(page)) { 6694 - swp_entry_t swap = radix_to_swp_entry(page); 6695 - if (do_swap_account) 6696 - *entry = swap; 6697 - page = find_get_page(swap_address_space(swap), swap.val); 6698 - } 6691 + if (shmem_mapping(mapping)) { 6692 + page = find_get_entry(mapping, pgoff); 6693 + if (radix_tree_exceptional_entry(page)) { 6694 + swp_entry_t swp = radix_to_swp_entry(page); 6695 + if (do_swap_account) 6696 + *entry = swp; 6697 + page = find_get_page(swap_address_space(swp), swp.val); 6698 + } 6699 + } else 6700 + page = find_get_page(mapping, pgoff); 6701 + #else 6702 + page = find_get_page(mapping, pgoff); 6699 6703 #endif 6700 6704 return page; 6701 6705 }
+3 -3
mm/page-writeback.c
··· 593 593 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) 594 594 * => fast response on large errors; small oscillation near setpoint 595 595 */ 596 - static inline long long pos_ratio_polynom(unsigned long setpoint, 596 + static long long pos_ratio_polynom(unsigned long setpoint, 597 597 unsigned long dirty, 598 598 unsigned long limit) 599 599 { 600 600 long long pos_ratio; 601 601 long x; 602 602 603 - x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, 603 + x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, 604 604 limit - setpoint + 1); 605 605 pos_ratio = x; 606 606 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; ··· 842 842 x_intercept = bdi_setpoint + span; 843 843 844 844 if (bdi_dirty < x_intercept - span / 4) { 845 - pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), 845 + pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), 846 846 x_intercept - bdi_setpoint + 1); 847 847 } else 848 848 pos_ratio /= 4;
+3 -3
mm/slab.c
··· 166 166 typedef unsigned short freelist_idx_t; 167 167 #endif 168 168 169 - #define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) 169 + #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) 170 170 171 171 /* 172 172 * true if a page was allocated from pfmemalloc reserves for network-based ··· 2572 2572 return freelist; 2573 2573 } 2574 2574 2575 - static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx) 2575 + static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) 2576 2576 { 2577 2577 return ((freelist_idx_t *)page->freelist)[idx]; 2578 2578 } 2579 2579 2580 2580 static inline void set_free_obj(struct page *page, 2581 - unsigned char idx, freelist_idx_t val) 2581 + unsigned int idx, freelist_idx_t val) 2582 2582 { 2583 2583 ((freelist_idx_t *)(page->freelist))[idx] = val; 2584 2584 }
+1
mm/slab.h
··· 91 91 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 92 92 93 93 int __kmem_cache_shutdown(struct kmem_cache *); 94 + void slab_kmem_cache_release(struct kmem_cache *); 94 95 95 96 struct seq_file; 96 97 struct file;
+11 -2
mm/slab_common.c
··· 323 323 } 324 324 #endif /* CONFIG_MEMCG_KMEM */ 325 325 326 + void slab_kmem_cache_release(struct kmem_cache *s) 327 + { 328 + kfree(s->name); 329 + kmem_cache_free(kmem_cache, s); 330 + } 331 + 326 332 void kmem_cache_destroy(struct kmem_cache *s) 327 333 { 328 334 get_online_cpus(); ··· 358 352 rcu_barrier(); 359 353 360 354 memcg_free_cache_params(s); 361 - kfree(s->name); 362 - kmem_cache_free(kmem_cache, s); 355 + #ifdef SLAB_SUPPORTS_SYSFS 356 + sysfs_slab_remove(s); 357 + #else 358 + slab_kmem_cache_release(s); 359 + #endif 363 360 goto out_put_cpus; 364 361 365 362 out_unlock:
+15 -26
mm/slub.c
··· 210 210 #ifdef CONFIG_SYSFS 211 211 static int sysfs_slab_add(struct kmem_cache *); 212 212 static int sysfs_slab_alias(struct kmem_cache *, const char *); 213 - static void sysfs_slab_remove(struct kmem_cache *); 214 213 static void memcg_propagate_slab_attrs(struct kmem_cache *s); 215 214 #else 216 215 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 217 216 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 218 217 { return 0; } 219 - static inline void sysfs_slab_remove(struct kmem_cache *s) { } 220 - 221 218 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } 222 219 #endif 223 220 ··· 3235 3238 3236 3239 int __kmem_cache_shutdown(struct kmem_cache *s) 3237 3240 { 3238 - int rc = kmem_cache_close(s); 3239 - 3240 - if (!rc) { 3241 - /* 3242 - * Since slab_attr_store may take the slab_mutex, we should 3243 - * release the lock while removing the sysfs entry in order to 3244 - * avoid a deadlock. Because this is pretty much the last 3245 - * operation we do and the lock will be released shortly after 3246 - * that in slab_common.c, we could just move sysfs_slab_remove 3247 - * to a later point in common code. We should do that when we 3248 - * have a common sysfs framework for all allocators. 3249 - */ 3250 - mutex_unlock(&slab_mutex); 3251 - sysfs_slab_remove(s); 3252 - mutex_lock(&slab_mutex); 3253 - } 3254 - 3255 - return rc; 3241 + return kmem_cache_close(s); 3256 3242 } 3257 3243 3258 3244 /******************************************************************** ··· 5051 5071 #ifdef CONFIG_MEMCG_KMEM 5052 5072 int i; 5053 5073 char *buffer = NULL; 5074 + struct kmem_cache *root_cache; 5054 5075 5055 - if (!is_root_cache(s)) 5076 + if (is_root_cache(s)) 5056 5077 return; 5078 + 5079 + root_cache = s->memcg_params->root_cache; 5057 5080 5058 5081 /* 5059 5082 * This mean this cache had no attribute written. Therefore, no point 5060 5083 * in copying default values around 5061 5084 */ 5062 - if (!s->max_attr_size) 5085 + if (!root_cache->max_attr_size) 5063 5086 return; 5064 5087 5065 5088 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { ··· 5084 5101 */ 5085 5102 if (buffer) 5086 5103 buf = buffer; 5087 - else if (s->max_attr_size < ARRAY_SIZE(mbuf)) 5104 + else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) 5088 5105 buf = mbuf; 5089 5106 else { 5090 5107 buffer = (char *) get_zeroed_page(GFP_KERNEL); ··· 5093 5110 buf = buffer; 5094 5111 } 5095 5112 5096 - attr->show(s->memcg_params->root_cache, buf); 5113 + attr->show(root_cache, buf); 5097 5114 attr->store(s, buf, strlen(buf)); 5098 5115 } 5099 5116 5100 5117 if (buffer) 5101 5118 free_page((unsigned long)buffer); 5102 5119 #endif 5120 + } 5121 + 5122 + static void kmem_cache_release(struct kobject *k) 5123 + { 5124 + slab_kmem_cache_release(to_slab(k)); 5103 5125 } 5104 5126 5105 5127 static const struct sysfs_ops slab_sysfs_ops = { ··· 5114 5126 5115 5127 static struct kobj_type slab_ktype = { 5116 5128 .sysfs_ops = &slab_sysfs_ops, 5129 + .release = kmem_cache_release, 5117 5130 }; 5118 5131 5119 5132 static int uevent_filter(struct kset *kset, struct kobject *kobj) ··· 5241 5252 goto out; 5242 5253 } 5243 5254 5244 - static void sysfs_slab_remove(struct kmem_cache *s) 5255 + void sysfs_slab_remove(struct kmem_cache *s) 5245 5256 { 5246 5257 if (slab_state < FULL) 5247 5258 /*
-8
mm/truncate.c
··· 484 484 unsigned long count = 0; 485 485 int i; 486 486 487 - /* 488 - * Note: this function may get called on a shmem/tmpfs mapping: 489 - * pagevec_lookup() might then return 0 prematurely (because it 490 - * got a gangful of swap entries); but it's hardly worth worrying 491 - * about - it can rarely have anything to free from such a mapping 492 - * (most pages are dirty), and already skips over any difficulties. 493 - */ 494 - 495 487 pagevec_init(&pvec, 0); 496 488 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, 497 489 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+10
mm/util.c
··· 10 10 #include <linux/swapops.h> 11 11 #include <linux/mman.h> 12 12 #include <linux/hugetlb.h> 13 + #include <linux/vmalloc.h> 13 14 14 15 #include <asm/uaccess.h> 15 16 ··· 387 386 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 388 387 } 389 388 EXPORT_SYMBOL(vm_mmap); 389 + 390 + void kvfree(const void *addr) 391 + { 392 + if (is_vmalloc_addr(addr)) 393 + vfree(addr); 394 + else 395 + kfree(addr); 396 + } 397 + EXPORT_SYMBOL(kvfree); 390 398 391 399 struct address_space *page_mapping(struct page *page) 392 400 {
+5 -3
mm/vmacache.c
··· 81 81 for (i = 0; i < VMACACHE_SIZE; i++) { 82 82 struct vm_area_struct *vma = current->vmacache[i]; 83 83 84 - if (vma && vma->vm_start <= addr && vma->vm_end > addr) { 85 - BUG_ON(vma->vm_mm != mm); 84 + if (!vma) 85 + continue; 86 + if (WARN_ON_ONCE(vma->vm_mm != mm)) 87 + break; 88 + if (vma->vm_start <= addr && vma->vm_end > addr) 86 89 return vma; 87 - } 88 90 } 89 91 90 92 return NULL;
+18
mm/vmscan.c
··· 1916 1916 get_lru_size(lruvec, LRU_INACTIVE_FILE); 1917 1917 1918 1918 /* 1919 + * Prevent the reclaimer from falling into the cache trap: as 1920 + * cache pages start out inactive, every cache fault will tip 1921 + * the scan balance towards the file LRU. And as the file LRU 1922 + * shrinks, so does the window for rotation from references. 1923 + * This means we have a runaway feedback loop where a tiny 1924 + * thrashing file LRU becomes infinitely more attractive than 1925 + * anon pages. Try to detect this based on file LRU size. 1926 + */ 1927 + if (global_reclaim(sc)) { 1928 + unsigned long free = zone_page_state(zone, NR_FREE_PAGES); 1929 + 1930 + if (unlikely(file + free <= high_wmark_pages(zone))) { 1931 + scan_balance = SCAN_ANON; 1932 + goto out; 1933 + } 1934 + } 1935 + 1936 + /* 1919 1937 * There is enough inactive page cache, do not reclaim 1920 1938 * anything from the anonymous working set right now. 1921 1939 */
+6 -3
net/bluetooth/hci_conn.c
··· 819 819 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 820 820 struct hci_cp_auth_requested cp; 821 821 822 - /* encrypt must be pending if auth is also pending */ 823 - set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 824 - 825 822 cp.handle = cpu_to_le16(conn->handle); 826 823 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 827 824 sizeof(cp), &cp); 825 + 826 + /* If we're already encrypted set the REAUTH_PEND flag, 827 + * otherwise set the ENCRYPT_PEND. 828 + */ 828 829 if (conn->key_type != 0xff) 829 830 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 831 + else 832 + set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 830 833 } 831 834 832 835 return 0;
+6
net/bluetooth/hci_event.c
··· 3330 3330 if (!conn) 3331 3331 goto unlock; 3332 3332 3333 + /* For BR/EDR the necessary steps are taken through the 3334 + * auth_complete event. 3335 + */ 3336 + if (conn->type != LE_LINK) 3337 + goto unlock; 3338 + 3333 3339 if (!ev->status) 3334 3340 conn->sec_level = conn->pending_sec_level; 3335 3341
+15
net/bridge/br_netlink.c
··· 445 445 return 0; 446 446 } 447 447 448 + static int br_dev_newlink(struct net *src_net, struct net_device *dev, 449 + struct nlattr *tb[], struct nlattr *data[]) 450 + { 451 + struct net_bridge *br = netdev_priv(dev); 452 + 453 + if (tb[IFLA_ADDRESS]) { 454 + spin_lock_bh(&br->lock); 455 + br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 456 + spin_unlock_bh(&br->lock); 457 + } 458 + 459 + return register_netdevice(dev); 460 + } 461 + 448 462 static size_t br_get_link_af_size(const struct net_device *dev) 449 463 { 450 464 struct net_port_vlans *pv; ··· 487 473 .priv_size = sizeof(struct net_bridge), 488 474 .setup = br_dev_setup, 489 475 .validate = br_validate, 476 + .newlink = br_dev_newlink, 490 477 .dellink = br_dev_delete, 491 478 }; 492 479
+2 -2
net/can/gw.c
··· 804 804 u8 limhops = 0; 805 805 int err = 0; 806 806 807 - if (!capable(CAP_NET_ADMIN)) 807 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 808 808 return -EPERM; 809 809 810 810 if (nlmsg_len(nlh) < sizeof(*r)) ··· 893 893 u8 limhops = 0; 894 894 int err = 0; 895 895 896 - if (!capable(CAP_NET_ADMIN)) 896 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 897 897 return -EPERM; 898 898 899 899 if (nlmsg_len(nlh) < sizeof(*r))
+5 -4
net/ceph/osdmap.c
··· 1548 1548 return; 1549 1549 1550 1550 for (i = 0; i < len; i++) { 1551 - if (osds[i] != CRUSH_ITEM_NONE && 1552 - osdmap->osd_primary_affinity[i] != 1551 + int osd = osds[i]; 1552 + 1553 + if (osd != CRUSH_ITEM_NONE && 1554 + osdmap->osd_primary_affinity[osd] != 1553 1555 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 1554 1556 break; 1555 1557 } ··· 1565 1563 * osd's pgs get rejected as primary. 1566 1564 */ 1567 1565 for (i = 0; i < len; i++) { 1568 - int osd; 1566 + int osd = osds[i]; 1569 1567 u32 aff; 1570 1568 1571 - osd = osds[i]; 1572 1569 if (osd == CRUSH_ITEM_NONE) 1573 1570 continue; 1574 1571
+9 -7
net/core/filter.c
··· 122 122 return 0; 123 123 } 124 124 125 + /* Register mappings for user programs. */ 126 + #define A_REG 0 127 + #define X_REG 7 128 + #define TMP_REG 8 129 + #define ARG2_REG 2 130 + #define ARG3_REG 3 131 + 125 132 /** 126 133 * __sk_run_filter - run a filter on a given context 127 134 * @ctx: buffer to run the filter on ··· 249 242 250 243 regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; 251 244 regs[ARG1_REG] = (u64) (unsigned long) ctx; 245 + regs[A_REG] = 0; 246 + regs[X_REG] = 0; 252 247 253 248 select_insn: 254 249 goto *jumptable[insn->code]; ··· 651 642 { 652 643 return raw_smp_processor_id(); 653 644 } 654 - 655 - /* Register mappings for user programs. */ 656 - #define A_REG 0 657 - #define X_REG 7 658 - #define TMP_REG 8 659 - #define ARG2_REG 2 660 - #define ARG3_REG 3 661 645 662 646 static bool convert_bpf_extensions(struct sock_filter *fp, 663 647 struct sock_filter_int **insnp)
+33 -20
net/core/rtnetlink.c
··· 774 774 return 0; 775 775 } 776 776 777 - static size_t rtnl_port_size(const struct net_device *dev) 777 + static size_t rtnl_port_size(const struct net_device *dev, 778 + u32 ext_filter_mask) 778 779 { 779 780 size_t port_size = nla_total_size(4) /* PORT_VF */ 780 781 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ ··· 791 790 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 792 791 + port_size; 793 792 794 - if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) 793 + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 794 + !(ext_filter_mask & RTEXT_FILTER_VF)) 795 795 return 0; 796 796 if (dev_num_vf(dev->dev.parent)) 797 797 return port_self_size + vf_ports_size + ··· 828 826 + nla_total_size(ext_filter_mask 829 827 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 830 828 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 831 - + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 829 + + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 832 830 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 833 831 + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */ 834 832 + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */ ··· 890 888 return 0; 891 889 } 892 890 893 - static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) 891 + static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 892 + u32 ext_filter_mask) 894 893 { 895 894 int err; 896 895 897 - if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) 896 + if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 897 + !(ext_filter_mask & RTEXT_FILTER_VF)) 898 898 return 0; 899 899 900 900 err = rtnl_port_self_fill(skb, dev); ··· 1083 1079 nla_nest_end(skb, vfinfo); 1084 1080 } 1085 1081 1086 - if (rtnl_port_fill(skb, dev)) 1082 + if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1087 1083 goto nla_put_failure; 1088 1084 1089 1085 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { ··· 1202 1198 struct hlist_head *head; 1203 1199 struct nlattr *tb[IFLA_MAX+1]; 1204 1200 u32 ext_filter_mask = 0; 1201 + int err; 1205 1202 1206 1203 s_h = cb->args[0]; 1207 1204 s_idx = cb->args[1]; ··· 1223 1218 hlist_for_each_entry_rcu(dev, head, index_hlist) { 1224 1219 if (idx < s_idx) 1225 1220 goto cont; 1226 - if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1227 - NETLINK_CB(cb->skb).portid, 1228 - cb->nlh->nlmsg_seq, 0, 1229 - NLM_F_MULTI, 1230 - ext_filter_mask) <= 0) 1221 + err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1222 + NETLINK_CB(cb->skb).portid, 1223 + cb->nlh->nlmsg_seq, 0, 1224 + NLM_F_MULTI, 1225 + ext_filter_mask); 1226 + /* If we ran out of room on the first message, 1227 + * we're in trouble 1228 + */ 1229 + WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 1230 + 1231 + if (err <= 0) 1231 1232 goto out; 1232 1233 1233 1234 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); ··· 1406 1395 return 0; 1407 1396 } 1408 1397 1409 - static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, 1398 + static int do_setlink(const struct sk_buff *skb, 1399 + struct net_device *dev, struct ifinfomsg *ifm, 1410 1400 struct nlattr **tb, char *ifname, int modified) 1411 1401 { 1412 1402 const struct net_device_ops *ops = dev->netdev_ops; ··· 1419 1407 err = PTR_ERR(net); 1420 1408 goto errout; 1421 1409 } 1422 - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) { 1410 + if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1423 1411 err = -EPERM; 1424 1412 goto errout; 1425 1413 } ··· 1673 1661 if (err < 0) 1674 1662 goto errout; 1675 1663 1676 - err = do_setlink(dev, ifm, tb, ifname, 0); 1664 + err = do_setlink(skb, dev, ifm, tb, ifname, 0); 1677 1665 errout: 1678 1666 return err; 1679 1667 } ··· 1790 1778 } 1791 1779 EXPORT_SYMBOL(rtnl_create_link); 1792 1780 1793 - static int rtnl_group_changelink(struct net *net, int group, 1781 + static int rtnl_group_changelink(const struct sk_buff *skb, 1782 + struct net *net, int group, 1794 1783 struct ifinfomsg *ifm, 1795 1784 struct nlattr **tb) 1796 1785 { ··· 1800 1787 1801 1788 for_each_netdev(net, dev) { 1802 1789 if (dev->group == group) { 1803 - err = do_setlink(dev, ifm, tb, NULL, 0); 1790 + err = do_setlink(skb, dev, ifm, tb, NULL, 0); 1804 1791 if (err < 0) 1805 1792 return err; 1806 1793 } ··· 1942 1929 modified = 1; 1943 1930 } 1944 1931 1945 - return do_setlink(dev, ifm, tb, ifname, modified); 1932 + return do_setlink(skb, dev, ifm, tb, ifname, modified); 1946 1933 } 1947 1934 1948 1935 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 1949 1936 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) 1950 - return rtnl_group_changelink(net, 1937 + return rtnl_group_changelink(skb, net, 1951 1938 nla_get_u32(tb[IFLA_GROUP]), 1952 1939 ifm, tb); 1953 1940 return -ENODEV; ··· 2334 2321 int err = -EINVAL; 2335 2322 __u8 *addr; 2336 2323 2337 - if (!capable(CAP_NET_ADMIN)) 2324 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 2338 2325 return -EPERM; 2339 2326 2340 2327 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); ··· 2786 2773 sz_idx = type>>2; 2787 2774 kind = type&3; 2788 2775 2789 - if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN)) 2776 + if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) 2790 2777 return -EPERM; 2791 2778 2792 2779 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+49
net/core/sock.c
··· 145 145 static DEFINE_MUTEX(proto_list_mutex); 146 146 static LIST_HEAD(proto_list); 147 147 148 + /** 149 + * sk_ns_capable - General socket capability test 150 + * @sk: Socket to use a capability on or through 151 + * @user_ns: The user namespace of the capability to use 152 + * @cap: The capability to use 153 + * 154 + * Test to see if the opener of the socket had when the socket was 155 + * created and the current process has the capability @cap in the user 156 + * namespace @user_ns. 157 + */ 158 + bool sk_ns_capable(const struct sock *sk, 159 + struct user_namespace *user_ns, int cap) 160 + { 161 + return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 162 + ns_capable(user_ns, cap); 163 + } 164 + EXPORT_SYMBOL(sk_ns_capable); 165 + 166 + /** 167 + * sk_capable - Socket global capability test 168 + * @sk: Socket to use a capability on or through 169 + * @cap: The global capbility to use 170 + * 171 + * Test to see if the opener of the socket had when the socket was 172 + * created and the current process has the capability @cap in all user 173 + * namespaces. 174 + */ 175 + bool sk_capable(const struct sock *sk, int cap) 176 + { 177 + return sk_ns_capable(sk, &init_user_ns, cap); 178 + } 179 + EXPORT_SYMBOL(sk_capable); 180 + 181 + /** 182 + * sk_net_capable - Network namespace socket capability test 183 + * @sk: Socket to use a capability on or through 184 + * @cap: The capability to use 185 + * 186 + * Test to see if the opener of the socket had when the socke was created 187 + * and the current process has the capability @cap over the network namespace 188 + * the socket is a member of. 189 + */ 190 + bool sk_net_capable(const struct sock *sk, int cap) 191 + { 192 + return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 193 + } 194 + EXPORT_SYMBOL(sk_net_capable); 195 + 196 + 148 197 #ifdef CONFIG_MEMCG_KMEM 149 198 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 150 199 {
+2 -2
net/core/sock_diag.c
··· 49 49 } 50 50 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); 51 51 52 - int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, 52 + int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, 53 53 struct sk_buff *skb, int attrtype) 54 54 { 55 55 struct sock_fprog_kern *fprog; ··· 58 58 unsigned int flen; 59 59 int err = 0; 60 60 61 - if (!ns_capable(user_ns, CAP_NET_ADMIN)) { 61 + if (!may_report_filterinfo) { 62 62 nla_reserve(skb, attrtype, 0); 63 63 return 0; 64 64 }
+1 -1
net/dcb/dcbnl.c
··· 1669 1669 struct nlmsghdr *reply_nlh = NULL; 1670 1670 const struct reply_func *fn; 1671 1671 1672 - if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN)) 1672 + if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN)) 1673 1673 return -EPERM; 1674 1674 1675 1675 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
+2 -2
net/decnet/dn_dev.c
··· 574 574 struct dn_ifaddr __rcu **ifap; 575 575 int err = -EINVAL; 576 576 577 - if (!capable(CAP_NET_ADMIN)) 577 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 578 578 return -EPERM; 579 579 580 580 if (!net_eq(net, &init_net)) ··· 618 618 struct dn_ifaddr *ifa; 619 619 int err; 620 620 621 - if (!capable(CAP_NET_ADMIN)) 621 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 622 622 return -EPERM; 623 623 624 624 if (!net_eq(net, &init_net))
+2 -2
net/decnet/dn_fib.c
··· 505 505 struct nlattr *attrs[RTA_MAX+1]; 506 506 int err; 507 507 508 - if (!capable(CAP_NET_ADMIN)) 508 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 509 509 return -EPERM; 510 510 511 511 if (!net_eq(net, &init_net)) ··· 530 530 struct nlattr *attrs[RTA_MAX+1]; 531 531 int err; 532 532 533 - if (!capable(CAP_NET_ADMIN)) 533 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 534 534 return -EPERM; 535 535 536 536 if (!net_eq(net, &init_net))
+1 -1
net/decnet/netfilter/dn_rtmsg.c
··· 107 107 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 108 108 return; 109 109 110 - if (!capable(CAP_NET_ADMIN)) 110 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 111 111 RCV_SKB_FAIL(-EPERM); 112 112 113 113 /* Eventually we might send routing messages too */
+2
net/ipv4/ip_tunnel.c
··· 442 442 tunnel->i_seqno = ntohl(tpi->seq) + 1; 443 443 } 444 444 445 + skb_reset_network_header(skb); 446 + 445 447 err = IP_ECN_decapsulate(iph, skb); 446 448 if (unlikely(err)) { 447 449 if (log_ecn_error)
+1 -1
net/ipv4/tcp_cubic.c
··· 409 409 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; 410 410 ratio += cnt; 411 411 412 - ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); 412 + ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT); 413 413 } 414 414 415 415 /* Some calls are for duplicates without timetamps */
+7 -7
net/ipv4/tcp_output.c
··· 2441 2441 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2442 2442 } 2443 2443 2444 - if (likely(!err)) 2444 + if (likely(!err)) { 2445 2445 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 2446 + /* Update global TCP statistics. */ 2447 + TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2448 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2449 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 2450 + tp->total_retrans++; 2451 + } 2446 2452 return err; 2447 2453 } 2448 2454 ··· 2458 2452 int err = __tcp_retransmit_skb(sk, skb); 2459 2453 2460 2454 if (err == 0) { 2461 - /* Update global TCP statistics. */ 2462 - TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2463 - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2464 - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 2465 - tp->total_retrans++; 2466 - 2467 2455 #if FASTRETRANS_DEBUG > 0 2468 2456 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2469 2457 net_dbg_ratelimited("retrans_out leaked\n");
+2 -1
net/ipv6/ip6_fib.c
··· 1459 1459 1460 1460 if (w->skip) { 1461 1461 w->skip--; 1462 - continue; 1462 + goto skip; 1463 1463 } 1464 1464 1465 1465 err = w->func(w); ··· 1469 1469 w->count++; 1470 1470 continue; 1471 1471 } 1472 + skip: 1472 1473 w->state = FWS_U; 1473 1474 case FWS_U: 1474 1475 if (fn == w->root)
+1 -1
net/ipv6/ip6mr.c
··· 1633 1633 { 1634 1634 struct mr6_table *mrt; 1635 1635 struct flowi6 fl6 = { 1636 - .flowi6_iif = skb->skb_iif, 1636 + .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, 1637 1637 .flowi6_oif = skb->dev->ifindex, 1638 1638 .flowi6_mark = skb->mark, 1639 1639 };
+1
net/ipv6/netfilter/ip6t_rpfilter.c
··· 33 33 struct ipv6hdr *iph = ipv6_hdr(skb); 34 34 bool ret = false; 35 35 struct flowi6 fl6 = { 36 + .flowi6_iif = LOOPBACK_IFINDEX, 36 37 .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, 37 38 .flowi6_proto = iph->nexthdr, 38 39 .daddr = iph->saddr,
+2
net/ipv6/route.c
··· 1273 1273 struct flowi6 fl6; 1274 1274 1275 1275 memset(&fl6, 0, sizeof(fl6)); 1276 + fl6.flowi6_iif = LOOPBACK_IFINDEX; 1276 1277 fl6.flowi6_oif = oif; 1277 1278 fl6.flowi6_mark = mark; 1278 1279 fl6.daddr = iph->daddr; ··· 1295 1294 struct flowi6 fl6; 1296 1295 1297 1296 memset(&fl6, 0, sizeof(fl6)); 1297 + fl6.flowi6_iif = LOOPBACK_IFINDEX; 1298 1298 fl6.flowi6_oif = oif; 1299 1299 fl6.flowi6_mark = mark; 1300 1300 fl6.daddr = msg->dest;
+1 -2
net/netfilter/nfnetlink.c
··· 368 368 static void nfnetlink_rcv(struct sk_buff *skb) 369 369 { 370 370 struct nlmsghdr *nlh = nlmsg_hdr(skb); 371 - struct net *net = sock_net(skb->sk); 372 371 int msglen; 373 372 374 373 if (nlh->nlmsg_len < NLMSG_HDRLEN || 375 374 skb->len < nlh->nlmsg_len) 376 375 return; 377 376 378 - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) { 377 + if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 379 378 netlink_ack(skb, nlh, -EPERM); 380 379 return; 381 380 }
+70 -5
net/netlink/af_netlink.c
··· 1360 1360 return err; 1361 1361 } 1362 1362 1363 - static inline int netlink_capable(const struct socket *sock, unsigned int flag) 1363 + /** 1364 + * __netlink_ns_capable - General netlink message capability test 1365 + * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace. 1366 + * @user_ns: The user namespace of the capability to use 1367 + * @cap: The capability to use 1368 + * 1369 + * Test to see if the opener of the socket we received the message 1370 + * from had when the netlink socket was created and the sender of the 1371 + * message has has the capability @cap in the user namespace @user_ns. 1372 + */ 1373 + bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, 1374 + struct user_namespace *user_ns, int cap) 1375 + { 1376 + return sk_ns_capable(nsp->sk, user_ns, cap); 1377 + } 1378 + EXPORT_SYMBOL(__netlink_ns_capable); 1379 + 1380 + /** 1381 + * netlink_ns_capable - General netlink message capability test 1382 + * @skb: socket buffer holding a netlink command from userspace 1383 + * @user_ns: The user namespace of the capability to use 1384 + * @cap: The capability to use 1385 + * 1386 + * Test to see if the opener of the socket we received the message 1387 + * from had when the netlink socket was created and the sender of the 1388 + * message has has the capability @cap in the user namespace @user_ns. 1389 + */ 1390 + bool netlink_ns_capable(const struct sk_buff *skb, 1391 + struct user_namespace *user_ns, int cap) 1392 + { 1393 + return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap); 1394 + } 1395 + EXPORT_SYMBOL(netlink_ns_capable); 1396 + 1397 + /** 1398 + * netlink_capable - Netlink global message capability test 1399 + * @skb: socket buffer holding a netlink command from userspace 1400 + * @cap: The capability to use 1401 + * 1402 + * Test to see if the opener of the socket we received the message 1403 + * from had when the netlink socket was created and the sender of the 1404 + * message has has the capability @cap in all user namespaces. 1405 + */ 1406 + bool netlink_capable(const struct sk_buff *skb, int cap) 1407 + { 1408 + return netlink_ns_capable(skb, &init_user_ns, cap); 1409 + } 1410 + EXPORT_SYMBOL(netlink_capable); 1411 + 1412 + /** 1413 + * netlink_net_capable - Netlink network namespace message capability test 1414 + * @skb: socket buffer holding a netlink command from userspace 1415 + * @cap: The capability to use 1416 + * 1417 + * Test to see if the opener of the socket we received the message 1418 + * from had when the netlink socket was created and the sender of the 1419 + * message has has the capability @cap over the network namespace of 1420 + * the socket we received the message from. 1421 + */ 1422 + bool netlink_net_capable(const struct sk_buff *skb, int cap) 1423 + { 1424 + return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); 1425 + } 1426 + EXPORT_SYMBOL(netlink_net_capable); 1427 + 1428 + static inline int netlink_allowed(const struct socket *sock, unsigned int flag) 1364 1429 { 1365 1430 return (nl_table[sock->sk->sk_protocol].flags & flag) || 1366 1431 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); ··· 1493 1428 1494 1429 /* Only superuser is allowed to listen multicasts */ 1495 1430 if (nladdr->nl_groups) { 1496 - if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV)) 1431 + if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 1497 1432 return -EPERM; 1498 1433 err = netlink_realloc_groups(sk); 1499 1434 if (err) ··· 1555 1490 return -EINVAL; 1556 1491 1557 1492 if ((nladdr->nl_groups || nladdr->nl_pid) && 1558 - !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) 1493 + !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1559 1494 return -EPERM; 1560 1495 1561 1496 if (!nlk->portid) ··· 2161 2096 break; 2162 2097 case NETLINK_ADD_MEMBERSHIP: 2163 2098 case NETLINK_DROP_MEMBERSHIP: { 2164 - if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV)) 2099 + if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 2165 2100 return -EPERM; 2166 2101 err = netlink_realloc_groups(sk); 2167 2102 if (err) ··· 2312 2247 dst_group = ffs(addr->nl_groups); 2313 2248 err = -EPERM; 2314 2249 if ((dst_group || dst_portid) && 2315 - !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) 2250 + !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 2316 2251 goto out; 2317 2252 } else { 2318 2253 dst_portid = nlk->dst_portid;
+1 -1
net/netlink/genetlink.c
··· 561 561 return -EOPNOTSUPP; 562 562 563 563 if ((ops->flags & GENL_ADMIN_PERM) && 564 - !capable(CAP_NET_ADMIN)) 564 + !netlink_capable(skb, CAP_NET_ADMIN)) 565 565 return -EPERM; 566 566 567 567 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+6 -1
net/packet/diag.c
··· 128 128 129 129 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 130 130 struct packet_diag_req *req, 131 + bool may_report_filterinfo, 131 132 struct user_namespace *user_ns, 132 133 u32 portid, u32 seq, u32 flags, int sk_ino) 133 134 { ··· 173 172 goto out_nlmsg_trim; 174 173 175 174 if ((req->pdiag_show & PACKET_SHOW_FILTER) && 176 - sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER)) 175 + sock_diag_put_filterinfo(may_report_filterinfo, sk, skb, 176 + PACKET_DIAG_FILTER)) 177 177 goto out_nlmsg_trim; 178 178 179 179 return nlmsg_end(skb, nlh); ··· 190 188 struct packet_diag_req *req; 191 189 struct net *net; 192 190 struct sock *sk; 191 + bool may_report_filterinfo; 193 192 194 193 net = sock_net(skb->sk); 195 194 req = nlmsg_data(cb->nlh); 195 + may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN); 196 196 197 197 mutex_lock(&net->packet.sklist_lock); 198 198 sk_for_each(sk, &net->packet.sklist) { ··· 204 200 goto next; 205 201 206 202 if (sk_diag_fill(sk, skb, req, 203 + may_report_filterinfo, 207 204 sk_user_ns(NETLINK_CB(cb->skb).sk), 208 205 NETLINK_CB(cb->skb).portid, 209 206 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+4 -4
net/phonet/pn_netlink.c
··· 70 70 int err; 71 71 u8 pnaddr; 72 72 73 - if (!capable(CAP_NET_ADMIN)) 73 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 74 74 return -EPERM; 75 75 76 - if (!capable(CAP_SYS_ADMIN)) 76 + if (!netlink_capable(skb, CAP_SYS_ADMIN)) 77 77 return -EPERM; 78 78 79 79 ASSERT_RTNL(); ··· 233 233 int err; 234 234 u8 dst; 235 235 236 - if (!capable(CAP_NET_ADMIN)) 236 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 237 237 return -EPERM; 238 238 239 - if (!capable(CAP_SYS_ADMIN)) 239 + if (!netlink_capable(skb, CAP_SYS_ADMIN)) 240 240 return -EPERM; 241 241 242 242 ASSERT_RTNL();
+1 -1
net/sched/act_api.c
··· 948 948 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 949 949 int ret = 0, ovr = 0; 950 950 951 - if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN)) 951 + if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) 952 952 return -EPERM; 953 953 954 954 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
+1 -1
net/sched/cls_api.c
··· 134 134 int err; 135 135 int tp_created = 0; 136 136 137 - if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN)) 137 + if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) 138 138 return -EPERM; 139 139 140 140 replay:
+3 -3
net/sched/sch_api.c
··· 1084 1084 struct Qdisc *p = NULL; 1085 1085 int err; 1086 1086 1087 - if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN)) 1087 + if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN)) 1088 1088 return -EPERM; 1089 1089 1090 1090 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); ··· 1151 1151 struct Qdisc *q, *p; 1152 1152 int err; 1153 1153 1154 - if (!capable(CAP_NET_ADMIN)) 1154 + if (!netlink_capable(skb, CAP_NET_ADMIN)) 1155 1155 return -EPERM; 1156 1156 1157 1157 replay: ··· 1490 1490 u32 qid; 1491 1491 int err; 1492 1492 1493 - if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN)) 1493 + if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN)) 1494 1494 return -EPERM; 1495 1495 1496 1496 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+6 -5
net/sched/sch_hhf.c
··· 553 553 if (err < 0) 554 554 return err; 555 555 556 - sch_tree_lock(sch); 557 - 558 - if (tb[TCA_HHF_BACKLOG_LIMIT]) 559 - sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); 560 - 561 556 if (tb[TCA_HHF_QUANTUM]) 562 557 new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]); 563 558 ··· 562 567 non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; 563 568 if (non_hh_quantum > INT_MAX) 564 569 return -EINVAL; 570 + 571 + sch_tree_lock(sch); 572 + 573 + if (tb[TCA_HHF_BACKLOG_LIMIT]) 574 + sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); 575 + 565 576 q->quantum = new_quantum; 566 577 q->hhf_non_hh_weight = new_hhf_non_hh_weight; 567 578
+6 -1
net/sctp/protocol.c
··· 491 491 continue; 492 492 if ((laddr->state == SCTP_ADDR_SRC) && 493 493 (AF_INET == laddr->a.sa.sa_family)) { 494 - fl4->saddr = laddr->a.v4.sin_addr.s_addr; 495 494 fl4->fl4_sport = laddr->a.v4.sin_port; 495 + flowi4_update_output(fl4, 496 + asoc->base.sk->sk_bound_dev_if, 497 + RT_CONN_FLAGS(asoc->base.sk), 498 + daddr->v4.sin_addr.s_addr, 499 + laddr->a.v4.sin_addr.s_addr); 500 + 496 501 rt = ip_route_output_key(sock_net(sk), fl4); 497 502 if (!IS_ERR(rt)) { 498 503 dst = &rt->dst;
+3 -4
net/sctp/sm_sideeffect.c
··· 496 496 497 497 /* If the transport error count is greater than the pf_retrans 498 498 * threshold, and less than pathmaxrtx, and if the current state 499 - * is not SCTP_UNCONFIRMED, then mark this transport as Partially 500 - * Failed, see SCTP Quick Failover Draft, section 5.1 499 + * is SCTP_ACTIVE, then mark this transport as Partially Failed, 500 + * see SCTP Quick Failover Draft, section 5.1 501 501 */ 502 - if ((transport->state != SCTP_PF) && 503 - (transport->state != SCTP_UNCONFIRMED) && 502 + if ((transport->state == SCTP_ACTIVE) && 504 503 (asoc->pf_retrans < transport->pathmaxrxt) && 505 504 (transport->error_count > asoc->pf_retrans)) { 506 505
+1 -1
net/tipc/netlink.c
··· 47 47 int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN); 48 48 u16 cmd; 49 49 50 - if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 50 + if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN))) 51 51 cmd = TIPC_CMD_NOT_NET_ADMIN; 52 52 else 53 53 cmd = req_userhdr->cmd;
+22 -25
net/vmw_vsock/af_vsock.c
··· 1925 1925 .fops = &vsock_device_ops, 1926 1926 }; 1927 1927 1928 - static int __vsock_core_init(void) 1928 + int __vsock_core_init(const struct vsock_transport *t, struct module *owner) 1929 1929 { 1930 - int err; 1930 + int err = mutex_lock_interruptible(&vsock_register_mutex); 1931 + 1932 + if (err) 1933 + return err; 1934 + 1935 + if (transport) { 1936 + err = -EBUSY; 1937 + goto err_busy; 1938 + } 1939 + 1940 + /* Transport must be the owner of the protocol so that it can't 1941 + * unload while there are open sockets. 1942 + */ 1943 + vsock_proto.owner = owner; 1944 + transport = t; 1931 1945 1932 1946 vsock_init_tables(); 1933 1947 ··· 1965 1951 goto err_unregister_proto; 1966 1952 } 1967 1953 1954 + mutex_unlock(&vsock_register_mutex); 1968 1955 return 0; 1969 1956 1970 1957 err_unregister_proto: 1971 1958 proto_unregister(&vsock_proto); 1972 1959 err_misc_deregister: 1973 1960 misc_deregister(&vsock_device); 1961 + transport = NULL; 1962 + err_busy: 1963 + mutex_unlock(&vsock_register_mutex); 1974 1964 return err; 1975 1965 } 1976 - 1977 - int vsock_core_init(const struct vsock_transport *t) 1978 - { 1979 - int retval = mutex_lock_interruptible(&vsock_register_mutex); 1980 - if (retval) 1981 - return retval; 1982 - 1983 - if (transport) { 1984 - retval = -EBUSY; 1985 - goto out; 1986 - } 1987 - 1988 - transport = t; 1989 - retval = __vsock_core_init(); 1990 - if (retval) 1991 - transport = NULL; 1992 - 1993 - out: 1994 - mutex_unlock(&vsock_register_mutex); 1995 - return retval; 1996 - } 1997 - EXPORT_SYMBOL_GPL(vsock_core_init); 1966 + EXPORT_SYMBOL_GPL(__vsock_core_init); 1998 1967 1999 1968 void vsock_core_exit(void) 2000 1969 { ··· 1997 2000 1998 2001 MODULE_AUTHOR("VMware, Inc."); 1999 2002 MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2000 - MODULE_VERSION("1.0.0.0-k"); 2003 + MODULE_VERSION("1.0.1.0-k"); 2001 2004 MODULE_LICENSE("GPL v2");
+1 -1
net/xfrm/xfrm_user.c
··· 2377 2377 link = &xfrm_dispatch[type]; 2378 2378 2379 2379 /* All operations require privileges, even GET */ 2380 - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2380 + if (!netlink_net_capable(skb, CAP_NET_ADMIN)) 2381 2381 return -EPERM; 2382 2382 2383 2383 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
+5
scripts/sortextable.c
··· 35 35 #define EM_ARCOMPACT 93 36 36 #endif 37 37 38 + #ifndef EM_XTENSA 39 + #define EM_XTENSA 94 40 + #endif 41 + 38 42 #ifndef EM_AARCH64 39 43 #define EM_AARCH64 183 40 44 #endif ··· 285 281 case EM_AARCH64: 286 282 case EM_MICROBLAZE: 287 283 case EM_MIPS: 284 + case EM_XTENSA: 288 285 break; 289 286 } /* end switch */ 290 287
-1
security/apparmor/include/apparmor.h
··· 66 66 char *aa_split_fqname(char *args, char **ns_name); 67 67 void aa_info_message(const char *str); 68 68 void *__aa_kvmalloc(size_t size, gfp_t flags); 69 - void kvfree(void *buffer); 70 69 71 70 static inline void *kvmalloc(size_t size) 72 71 {
-14
security/apparmor/lib.c
··· 104 104 } 105 105 return buffer; 106 106 } 107 - 108 - /** 109 - * kvfree - free an allocation do by kvmalloc 110 - * @buffer: buffer to free (MAYBE_NULL) 111 - * 112 - * Free a buffer allocated by kvmalloc 113 - */ 114 - void kvfree(void *buffer) 115 - { 116 - if (is_vmalloc_addr(buffer)) 117 - vfree(buffer); 118 - else 119 - kfree(buffer); 120 - }
+18 -16
sound/pci/hda/hda_controller.c
··· 1059 1059 1060 1060 /* reset the corb hw read pointer */ 1061 1061 azx_writew(chip, CORBRP, ICH6_CORBRP_RST); 1062 - for (timeout = 1000; timeout > 0; timeout--) { 1063 - if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST) 1064 - break; 1065 - udelay(1); 1066 - } 1067 - if (timeout <= 0) 1068 - dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n", 1069 - azx_readw(chip, CORBRP)); 1062 + if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) { 1063 + for (timeout = 1000; timeout > 0; timeout--) { 1064 + if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST) 1065 + break; 1066 + udelay(1); 1067 + } 1068 + if (timeout <= 0) 1069 + dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n", 1070 + azx_readw(chip, CORBRP)); 1070 1071 1071 - azx_writew(chip, CORBRP, 0); 1072 - for (timeout = 1000; timeout > 0; timeout--) { 1073 - if (azx_readw(chip, CORBRP) == 0) 1074 - break; 1075 - udelay(1); 1072 + azx_writew(chip, CORBRP, 0); 1073 + for (timeout = 1000; timeout > 0; timeout--) { 1074 + if (azx_readw(chip, CORBRP) == 0) 1075 + break; 1076 + udelay(1); 1077 + } 1078 + if (timeout <= 0) 1079 + dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n", 1080 + azx_readw(chip, CORBRP)); 1076 1081 } 1077 - if (timeout <= 0) 1078 - dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n", 1079 - azx_readw(chip, CORBRP)); 1080 1082 1081 1083 /* enable corb dma */ 1082 1084 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
+2 -1
sound/pci/hda/hda_intel.c
··· 249 249 /* quirks for Nvidia */ 250 250 #define AZX_DCAPS_PRESET_NVIDIA \ 251 251 (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\ 252 - AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT) 252 + AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\ 253 + AZX_DCAPS_CORBRP_SELF_CLEAR) 253 254 254 255 #define AZX_DCAPS_PRESET_CTHDA \ 255 256 (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
+1
sound/pci/hda/hda_priv.h
··· 189 189 #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ 190 190 #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 191 191 #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 192 + #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ 192 193 193 194 /* position fix mode */ 194 195 enum {
+3 -1
sound/pci/hda/patch_hdmi.c
··· 1127 1127 AMP_OUT_UNMUTE); 1128 1128 1129 1129 eld = &per_pin->sink_eld; 1130 - if (!eld->monitor_present) 1130 + if (!eld->monitor_present) { 1131 + hdmi_set_channel_count(codec, per_pin->cvt_nid, channels); 1131 1132 return; 1133 + } 1132 1134 1133 1135 if (!non_pcm && per_pin->chmap_set) 1134 1136 ca = hdmi_manual_channel_allocation(channels, per_pin->chmap);
+2
sound/pci/hda/patch_realtek.c
··· 4621 4621 SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4622 4622 SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), 4623 4623 SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), 4624 + SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4625 + SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4624 4626 SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4625 4627 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4626 4628 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+1 -1
sound/soc/codecs/alc5623.c
··· 1018 1018 dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret); 1019 1019 return ret; 1020 1020 } 1021 - vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8); 1022 1021 1023 1022 ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2); 1024 1023 if (ret < 0) { 1025 1024 dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret); 1026 1025 return ret; 1027 1026 } 1027 + vid2 >>= 8; 1028 1028 1029 1029 if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) { 1030 1030 dev_err(&client->dev, "unknown or wrong codec\n");
+4 -2
sound/soc/codecs/cs42l52.c
··· 1229 1229 } 1230 1230 1231 1231 if (cs42l52->pdata.reset_gpio) { 1232 - ret = gpio_request_one(cs42l52->pdata.reset_gpio, 1233 - GPIOF_OUT_INIT_HIGH, "CS42L52 /RST"); 1232 + ret = devm_gpio_request_one(&i2c_client->dev, 1233 + cs42l52->pdata.reset_gpio, 1234 + GPIOF_OUT_INIT_HIGH, 1235 + "CS42L52 /RST"); 1234 1236 if (ret < 0) { 1235 1237 dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", 1236 1238 cs42l52->pdata.reset_gpio, ret);
+4 -2
sound/soc/codecs/cs42l73.c
··· 1443 1443 i2c_set_clientdata(i2c_client, cs42l73); 1444 1444 1445 1445 if (cs42l73->pdata.reset_gpio) { 1446 - ret = gpio_request_one(cs42l73->pdata.reset_gpio, 1447 - GPIOF_OUT_INIT_HIGH, "CS42L73 /RST"); 1446 + ret = devm_gpio_request_one(&i2c_client->dev, 1447 + cs42l73->pdata.reset_gpio, 1448 + GPIOF_OUT_INIT_HIGH, 1449 + "CS42L73 /RST"); 1448 1450 if (ret < 0) { 1449 1451 dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", 1450 1452 cs42l73->pdata.reset_gpio, ret);
+7 -2
sound/soc/codecs/tlv320aic3x.c
··· 1399 1399 } 1400 1400 1401 1401 aic3x_add_widgets(codec); 1402 - list_add(&aic3x->list, &reset_list); 1403 1402 1404 1403 return 0; 1405 1404 ··· 1568 1569 1569 1570 ret = snd_soc_register_codec(&i2c->dev, 1570 1571 &soc_codec_dev_aic3x, &aic3x_dai, 1); 1571 - return ret; 1572 + 1573 + if (ret != 0) 1574 + goto err_gpio; 1575 + 1576 + list_add(&aic3x->list, &reset_list); 1577 + 1578 + return 0; 1572 1579 1573 1580 err_gpio: 1574 1581 if (gpio_is_valid(aic3x->gpio_reset) &&
+2 -2
sound/soc/fsl/fsl_spdif.h
··· 144 144 145 145 /* SPDIF Clock register */ 146 146 #define STC_SYSCLK_DIV_OFFSET 11 147 - #define STC_SYSCLK_DIV_MASK (0x1ff << STC_TXCLK_SRC_OFFSET) 148 - #define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK) 147 + #define STC_SYSCLK_DIV_MASK (0x1ff << STC_SYSCLK_DIV_OFFSET) 148 + #define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK) 149 149 #define STC_TXCLK_SRC_OFFSET 8 150 150 #define STC_TXCLK_SRC_MASK (0x7 << STC_TXCLK_SRC_OFFSET) 151 151 #define STC_TXCLK_SRC_SET(x) ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
+1 -1
sound/soc/intel/sst-dsp-priv.h
··· 136 136 enum sst_data_type data_type; /* type of module data */ 137 137 138 138 u32 size; /* size in bytes */ 139 - u32 offset; /* offset in FW file */ 139 + int32_t offset; /* offset in FW file */ 140 140 u32 data_offset; /* offset in ADSP memory space */ 141 141 void *data; /* module data */ 142 142 };
+4 -3
sound/soc/intel/sst-haswell-ipc.c
··· 617 617 case IPC_POSITION_CHANGED: 618 618 trace_ipc_notification("DSP stream position changed for", 619 619 stream->reply.stream_hw_id); 620 - sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos)); 620 + sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos)); 621 621 622 622 if (stream->notify_position) 623 623 stream->notify_position(stream, stream->pdata); ··· 991 991 return -EINVAL; 992 992 993 993 sst_dsp_read(hsw->dsp, volume, 994 - stream->reply.volume_register_address[channel], sizeof(volume)); 994 + stream->reply.volume_register_address[channel], 995 + sizeof(*volume)); 995 996 996 997 return 0; 997 998 } ··· 1610 1609 trace_ipc_request("PM enter Dx state", state); 1611 1610 1612 1611 ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_), 1613 - dx, sizeof(dx)); 1612 + dx, sizeof(*dx)); 1614 1613 if (ret < 0) { 1615 1614 dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state); 1616 1615 return ret;
-2
sound/soc/jz4740/Makefile
··· 1 1 # 2 2 # Jz4740 Platform Support 3 3 # 4 - snd-soc-jz4740-objs := jz4740-pcm.o 5 4 snd-soc-jz4740-i2s-objs := jz4740-i2s.o 6 5 7 - obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o 8 6 obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o 9 7 10 8 # Jz4740 Machine Support
+2 -2
sound/soc/sh/rcar/src.c
··· 258 258 { 259 259 struct rsnd_src *src = rsnd_mod_to_src(mod); 260 260 261 - clk_enable(src->clk); 261 + clk_prepare_enable(src->clk); 262 262 263 263 return 0; 264 264 } ··· 269 269 { 270 270 struct rsnd_src *src = rsnd_mod_to_src(mod); 271 271 272 - clk_disable(src->clk); 272 + clk_disable_unprepare(src->clk); 273 273 274 274 return 0; 275 275 }
+2 -2
sound/soc/sh/rcar/ssi.c
··· 171 171 u32 cr; 172 172 173 173 if (0 == ssi->usrcnt) { 174 - clk_enable(ssi->clk); 174 + clk_prepare_enable(ssi->clk); 175 175 176 176 if (rsnd_dai_is_clk_master(rdai)) { 177 177 if (rsnd_ssi_clk_from_parent(ssi)) ··· 230 230 rsnd_ssi_master_clk_stop(ssi); 231 231 } 232 232 233 - clk_disable(ssi->clk); 233 + clk_disable_unprepare(ssi->clk); 234 234 } 235 235 236 236 dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
-1
sound/soc/soc-dapm.c
··· 254 254 static void dapm_kcontrol_free(struct snd_kcontrol *kctl) 255 255 { 256 256 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); 257 - kfree(data->widget); 258 257 kfree(data->wlist); 259 258 kfree(data); 260 259 }
+8 -4
sound/usb/card.c
··· 651 651 int err = -ENODEV; 652 652 653 653 down_read(&chip->shutdown_rwsem); 654 - if (chip->probing) 654 + if (chip->probing && chip->in_pm) 655 655 err = 0; 656 656 else if (!chip->shutdown) 657 657 err = usb_autopm_get_interface(chip->pm_intf); ··· 663 663 void snd_usb_autosuspend(struct snd_usb_audio *chip) 664 664 { 665 665 down_read(&chip->shutdown_rwsem); 666 - if (!chip->shutdown && !chip->probing) 666 + if (!chip->shutdown && !chip->probing && !chip->in_pm) 667 667 usb_autopm_put_interface(chip->pm_intf); 668 668 up_read(&chip->shutdown_rwsem); 669 669 } ··· 695 695 chip->autosuspended = 1; 696 696 } 697 697 698 - list_for_each_entry(mixer, &chip->mixer_list, list) 699 - snd_usb_mixer_suspend(mixer); 698 + if (chip->num_suspended_intf == 1) 699 + list_for_each_entry(mixer, &chip->mixer_list, list) 700 + snd_usb_mixer_suspend(mixer); 700 701 701 702 return 0; 702 703 } ··· 712 711 return 0; 713 712 if (--chip->num_suspended_intf) 714 713 return 0; 714 + 715 + chip->in_pm = 1; 715 716 /* 716 717 * ALSA leaves material resumption to user space 717 718 * we just notify and restart the mixers ··· 729 726 chip->autosuspended = 0; 730 727 731 728 err_out: 729 + chip->in_pm = 0; 732 730 return err; 733 731 } 734 732
+1
sound/usb/card.h
··· 92 92 unsigned int curframesize; /* current packet size in frames (for capture) */ 93 93 unsigned int syncmaxsize; /* sync endpoint packet size */ 94 94 unsigned int fill_max:1; /* fill max packet size always */ 95 + unsigned int udh01_fb_quirk:1; /* corrupted feedback data */ 95 96 unsigned int datainterval; /* log_2 of data packet interval */ 96 97 unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */ 97 98 unsigned char silence_value;
+14 -1
sound/usb/endpoint.c
··· 471 471 ep->syncinterval = 3; 472 472 473 473 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize); 474 + 475 + if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ && 476 + ep->syncmaxsize == 4) 477 + ep->udh01_fb_quirk = 1; 474 478 } 475 479 476 480 list_add_tail(&ep->list, &chip->ep_list); ··· 1109 1105 if (f == 0) 1110 1106 return; 1111 1107 1112 - if (unlikely(ep->freqshift == INT_MIN)) { 1108 + if (unlikely(sender->udh01_fb_quirk)) { 1109 + /* 1110 + * The TEAC UD-H01 firmware sometimes changes the feedback value 1111 + * by +/- 0x1.0000. 1112 + */ 1113 + if (f < ep->freqn - 0x8000) 1114 + f += 0x10000; 1115 + else if (f > ep->freqn + 0x8000) 1116 + f -= 0x10000; 1117 + } else if (unlikely(ep->freqshift == INT_MIN)) { 1113 1118 /* 1114 1119 * The first time we see a feedback value, determine its format 1115 1120 * by shifting it left or right until it matches the nominal
+2 -3
sound/usb/pcm.c
··· 1501 1501 * The error should be lower than 2ms since the estimate relies 1502 1502 * on two reads of a counter updated every ms. 1503 1503 */ 1504 - if (printk_ratelimit() && 1505 - abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) 1506 - dev_dbg(&subs->dev->dev, 1504 + if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) 1505 + dev_dbg_ratelimited(&subs->dev->dev, 1507 1506 "delay: estimated %d, actual %d\n", 1508 1507 est_delay, subs->last_delay); 1509 1508
+1
sound/usb/usbaudio.h
··· 40 40 struct rw_semaphore shutdown_rwsem; 41 41 unsigned int shutdown:1; 42 42 unsigned int probing:1; 43 + unsigned int in_pm:1; 43 44 unsigned int autosuspended:1; 44 45 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ 45 46
+2 -2
tools/lib/api/fs/debugfs.c
··· 12 12 char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; 13 13 14 14 static const char * const debugfs_known_mountpoints[] = { 15 - "/sys/kernel/debug/", 16 - "/debug/", 15 + "/sys/kernel/debug", 16 + "/debug", 17 17 0, 18 18 }; 19 19
+1
tools/lib/traceevent/event-parse.c
··· 4344 4344 format, len_arg, arg); 4345 4345 trace_seq_terminate(&p); 4346 4346 trace_seq_puts(s, p.buffer); 4347 + trace_seq_destroy(&p); 4347 4348 arg = arg->next; 4348 4349 break; 4349 4350 default:
+2 -2
tools/lib/traceevent/event-parse.h
··· 876 876 struct event_filter *pevent_filter_alloc(struct pevent *pevent); 877 877 878 878 /* for backward compatibility */ 879 - #define FILTER_NONE PEVENT_ERRNO__FILTER_NOT_FOUND 880 - #define FILTER_NOEXIST PEVENT_ERRNO__NO_FILTER 879 + #define FILTER_NONE PEVENT_ERRNO__NO_FILTER 880 + #define FILTER_NOEXIST PEVENT_ERRNO__FILTER_NOT_FOUND 881 881 #define FILTER_MISS PEVENT_ERRNO__FILTER_MISS 882 882 #define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH 883 883
+1 -1
tools/net/bpf_dbg.c
··· 820 820 r->A &= r->X; 821 821 break; 822 822 case BPF_ALU_AND | BPF_K: 823 - r->A &= r->X; 823 + r->A &= K; 824 824 break; 825 825 case BPF_ALU_OR | BPF_X: 826 826 r->A |= r->X;
+1 -1
tools/perf/Makefile.perf
··· 589 589 $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $< 590 590 591 591 $(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS) 592 - $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS) 592 + $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS) 593 593 594 594 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS 595 595 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
+2 -1
tools/perf/arch/x86/tests/dwarf-unwind.c
··· 23 23 24 24 sp = (unsigned long) regs[PERF_REG_X86_SP]; 25 25 26 - map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp); 26 + map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp); 27 27 if (!map) { 28 28 pr_debug("failed to get stack map\n"); 29 + free(buf); 29 30 return -1; 30 31 } 31 32
+7 -1
tools/perf/arch/x86/tests/regs_load.S
··· 1 - 2 1 #include <linux/linkage.h> 3 2 4 3 #define AX 0 ··· 89 90 ret 90 91 ENDPROC(perf_regs_load) 91 92 #endif 93 + 94 + /* 95 + * We need to provide note.GNU-stack section, saying that we want 96 + * NOT executable stack. Otherwise the final linking will assume that 97 + * the ELF stack should not be restricted at all and set it RWX. 98 + */ 99 + .section .note.GNU-stack,"",@progbits
+35 -11
tools/perf/config/Makefile
··· 34 34 LIBUNWIND_LIBS = -lunwind -lunwind-arm 35 35 endif 36 36 37 + # So far there's only x86 libdw unwind support merged in perf. 38 + # Disable it on all other architectures in case libdw unwind 39 + # support is detected in system. Add supported architectures 40 + # to the check. 41 + ifneq ($(ARCH),x86) 42 + NO_LIBDW_DWARF_UNWIND := 1 43 + endif 44 + 37 45 ifeq ($(LIBUNWIND_LIBS),) 38 46 NO_LIBUNWIND := 1 39 47 else ··· 116 108 CFLAGS += -Wall 117 109 CFLAGS += -Wextra 118 110 CFLAGS += -std=gnu99 111 + 112 + # Enforce a non-executable stack, as we may regress (again) in the future by 113 + # adding assembler files missing the .GNU-stack linker note. 114 + LDFLAGS += -Wl,-z,noexecstack 119 115 120 116 EXTLIBS = -lelf -lpthread -lrt -lm -ldl 121 117 ··· 198 186 stackprotector-all \ 199 187 timerfd \ 200 188 libunwind-debug-frame \ 201 - bionic 189 + bionic \ 190 + liberty \ 191 + liberty-z \ 192 + cplus-demangle 202 193 203 194 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features. 204 195 # If in the future we need per-feature checks/flags for features not ··· 519 504 endif 520 505 521 506 ifeq ($(feature-libbfd), 1) 522 - EXTLIBS += -lbfd -lz -liberty 507 + EXTLIBS += -lbfd 508 + 509 + # call all detections now so we get correct 510 + # status in VF output 511 + $(call feature_check,liberty) 512 + $(call feature_check,liberty-z) 513 + $(call feature_check,cplus-demangle) 514 + 515 + ifeq ($(feature-liberty), 1) 516 + EXTLIBS += -liberty 517 + else 518 + ifeq ($(feature-liberty-z), 1) 519 + EXTLIBS += -liberty -lz 520 + endif 521 + endif 523 522 endif 524 523 525 524 ifdef NO_DEMANGLE ··· 544 515 CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT 545 516 else 546 517 ifneq ($(feature-libbfd), 1) 547 - $(call feature_check,liberty) 548 - ifeq ($(feature-liberty), 1) 549 - EXTLIBS += -lbfd -liberty 550 - else 551 - $(call feature_check,liberty-z) 552 - ifeq ($(feature-liberty-z), 1) 553 - EXTLIBS += -lbfd -liberty -lz 554 - else 555 - $(call feature_check,cplus-demangle) 518 + ifneq ($(feature-liberty), 1) 519 + ifneq ($(feature-liberty-z), 1) 520 + # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT 521 + # or any of 'bfd iberty z' trinity 556 522 ifeq ($(feature-cplus-demangle), 1) 557 523 EXTLIBS += -liberty 558 524 CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+2
tools/perf/tests/make
··· 46 46 make_install_html := install-html 47 47 make_install_info := install-info 48 48 make_install_pdf := install-pdf 49 + make_static := LDFLAGS=-static 49 50 50 51 # all the NO_* variable combined 51 52 make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1 ··· 88 87 # run += make_install_info 89 88 # run += make_install_pdf 90 89 run += make_minimal 90 + run += make_static 91 91 92 92 ifneq ($(call has,ctags),) 93 93 run += make_tags
+12 -4
tools/perf/util/machine.c
··· 717 717 } 718 718 719 719 static int map_groups__set_modules_path_dir(struct map_groups *mg, 720 - const char *dir_name) 720 + const char *dir_name, int depth) 721 721 { 722 722 struct dirent *dent; 723 723 DIR *dir = opendir(dir_name); ··· 742 742 !strcmp(dent->d_name, "..")) 743 743 continue; 744 744 745 - ret = map_groups__set_modules_path_dir(mg, path); 745 + /* Do not follow top-level source and build symlinks */ 746 + if (depth == 0) { 747 + if (!strcmp(dent->d_name, "source") || 748 + !strcmp(dent->d_name, "build")) 749 + continue; 750 + } 751 + 752 + ret = map_groups__set_modules_path_dir(mg, path, 753 + depth + 1); 746 754 if (ret < 0) 747 755 goto out; 748 756 } else { ··· 794 786 if (!version) 795 787 return -1; 796 788 797 - snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", 789 + snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 798 790 machine->root_dir, version); 799 791 free(version); 800 792 801 - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); 793 + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 802 794 } 803 795 804 796 static int machine__create_module(void *arg, const char *name, u64 start)
+8 -7
virt/kvm/arm/vgic.c
··· 548 548 u32 val; 549 549 u32 *reg; 550 550 551 - offset >>= 1; 552 551 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, 553 - vcpu->vcpu_id, offset); 552 + vcpu->vcpu_id, offset >> 1); 554 553 555 - if (offset & 2) 554 + if (offset & 4) 556 555 val = *reg >> 16; 557 556 else 558 557 val = *reg & 0xffff; ··· 560 561 vgic_reg_access(mmio, &val, offset, 561 562 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); 562 563 if (mmio->is_write) { 563 - if (offset < 4) { 564 + if (offset < 8) { 564 565 *reg = ~0U; /* Force PPIs/SGIs to 1 */ 565 566 return false; 566 567 } 567 568 568 569 val = vgic_cfg_compress(val); 569 - if (offset & 2) { 570 + if (offset & 4) { 570 571 *reg &= 0xffff; 571 572 *reg |= val << 16; 572 573 } else { ··· 915 916 case 0: 916 917 if (!target_cpus) 917 918 return; 919 + break; 918 920 919 921 case 1: 920 922 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; ··· 1667 1667 if (addr + size < addr) 1668 1668 return -EINVAL; 1669 1669 1670 + *ioaddr = addr; 1670 1671 ret = vgic_ioaddr_overlap(kvm); 1671 1672 if (ret) 1672 - return ret; 1673 - *ioaddr = addr; 1673 + *ioaddr = VGIC_ADDR_UNDEF; 1674 + 1674 1675 return ret; 1675 1676 } 1676 1677
+2 -1
virt/kvm/assigned-dev.c
··· 395 395 if (dev->entries_nr == 0) 396 396 return r; 397 397 398 - r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); 398 + r = pci_enable_msix_exact(dev->dev, 399 + dev->host_msix_entries, dev->entries_nr); 399 400 if (r) 400 401 return r; 401 402
+4 -4
virt/kvm/async_pf.c
··· 101 101 if (waitqueue_active(&vcpu->wq)) 102 102 wake_up_interruptible(&vcpu->wq); 103 103 104 - mmdrop(mm); 104 + mmput(mm); 105 105 kvm_put_kvm(vcpu->kvm); 106 106 } 107 107 ··· 118 118 flush_work(&work->work); 119 119 #else 120 120 if (cancel_work_sync(&work->work)) { 121 - mmdrop(work->mm); 121 + mmput(work->mm); 122 122 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ 123 123 kmem_cache_free(async_pf_cache, work); 124 124 } ··· 183 183 work->addr = hva; 184 184 work->arch = *arch; 185 185 work->mm = current->mm; 186 - atomic_inc(&work->mm->mm_count); 186 + atomic_inc(&work->mm->mm_users); 187 187 kvm_get_kvm(work->vcpu->kvm); 188 188 189 189 /* this can't really happen otherwise gfn_to_pfn_async ··· 201 201 return 1; 202 202 retry_sync: 203 203 kvm_put_kvm(work->vcpu->kvm); 204 - mmdrop(work->mm); 204 + mmput(work->mm); 205 205 kmem_cache_free(async_pf_cache, work); 206 206 return 0; 207 207 }