Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
drivers/net/ethernet/mellanox/mlx4/cmd.c
net/core/fib_rules.c
net/ipv4/fib_frontend.c

The fib_rules.c and fib_frontend.c conflicts were locking adjustments
in 'net' overlapping addition and removal of code in 'net-next'.

The mlx4 conflict was a bug fix in 'net' happening in the same
place a constant was being replaced with a more suitable macro.

Signed-off-by: David S. Miller <davem@davemloft.net>

+575 -275
+3 -1
Documentation/devicetree/bindings/net/dsa/dsa.txt
··· 19 19 (DSA_MAX_SWITCHES). 20 20 Each of these switch child nodes should have the following required properties: 21 21 22 - - reg : Describes the switch address on the MII bus 22 + - reg : Contains two fields. The first one describes the 23 + address on the MII bus. The second is the switch 24 + number that must be unique in cascaded configurations 23 25 - #address-cells : Must be 1 24 26 - #size-cells : Must be 0 25 27
+8
Documentation/input/alps.txt
··· 114 114 byte 4: 0 y6 y5 y4 y3 y2 y1 y0 115 115 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 116 116 117 + Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for 118 + the DualPoint Stick. 119 + 117 120 Dualpoint device -- interleaved packet format 118 121 --------------------------------------------- 119 122 ··· 129 126 byte 6: 0 y9 y8 y7 1 m r l 130 127 byte 7: 0 y6 y5 y4 y3 y2 y1 y0 131 128 byte 8: 0 z6 z5 z4 z3 z2 z1 z0 129 + 130 + Devices which use the interleaving format normally send standard PS/2 mouse 131 + packets for the DualPoint Stick + ALPS Absolute Mode packets for the 132 + touchpad, switching to the interleaved packet format when both the stick and 133 + the touchpad are used at the same time. 132 134 133 135 ALPS Absolute Mode - Protocol Version 3 134 136 ---------------------------------------
+6
Documentation/input/event-codes.txt
··· 294 294 The kernel does not provide button emulation for such devices but treats 295 295 them as any other INPUT_PROP_BUTTONPAD device. 296 296 297 + INPUT_PROP_ACCELEROMETER 298 + ------------------------- 299 + Directional axes on this device (absolute and/or relative x, y, z) represent 300 + accelerometer data. All other axes retain their meaning. A device must not mix 301 + regular directional axes and accelerometer axes on the same event node. 302 + 297 303 Guidelines: 298 304 ========== 299 305 The guidelines below ensure proper single-touch and multi-finger functionality.
+6 -3
Documentation/input/multi-touch-protocol.txt
··· 312 312 313 313 The type of approaching tool. A lot of kernel drivers cannot distinguish 314 314 between different tool types, such as a finger or a pen. In such cases, the 315 - event should be omitted. The protocol currently supports MT_TOOL_FINGER and 316 - MT_TOOL_PEN [2]. For type B devices, this event is handled by input core; 317 - drivers should instead use input_mt_report_slot_state(). 315 + event should be omitted. The protocol currently supports MT_TOOL_FINGER, 316 + MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled 317 + by input core; drivers should instead use input_mt_report_slot_state(). 318 + A contact's ABS_MT_TOOL_TYPE may change over time while still touching the 319 + device, because the firmware may not be able to determine which tool is being 320 + used when it first appears. 318 321 319 322 ABS_MT_BLOB_ID 320 323
+2 -3
MAINTAINERS
··· 637 637 F: include/uapi/linux/kfd_ioctl.h 638 638 639 639 AMD MICROCODE UPDATE SUPPORT 640 - M: Andreas Herrmann <herrmann.der.user@googlemail.com> 641 - L: amd64-microcode@amd64.org 640 + M: Borislav Petkov <bp@alien8.de> 642 641 S: Maintained 643 642 F: arch/x86/kernel/cpu/microcode/amd* 644 643 ··· 5094 5095 F: drivers/platform/x86/intel_menlow.c 5095 5096 5096 5097 INTEL IA32 MICROCODE UPDATE SUPPORT 5097 - M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 5098 + M: Borislav Petkov <bp@alien8.de> 5098 5099 S: Maintained 5099 5100 F: arch/x86/kernel/cpu/microcode/core* 5100 5101 F: arch/x86/kernel/cpu/microcode/intel*
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 0 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Hurr durr I'ma sheep 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/powerpc/include/asm/cputhreads.h
··· 55 55 56 56 static inline int cpu_nr_cores(void) 57 57 { 58 - return NR_CPUS >> threads_shift; 58 + return nr_cpu_ids >> threads_shift; 59 59 } 60 60 61 61 static inline cpumask_t cpu_online_cores_map(void)
+5 -5
arch/x86/kernel/cpu/perf_event_intel.c
··· 212 212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 213 213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 214 214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 215 - INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), 215 + INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 216 216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 217 - INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), 217 + INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 218 218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 219 - INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), 219 + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 220 220 EVENT_CONSTRAINT_END 221 221 }; 222 222 ··· 1649 1649 if (c) 1650 1650 return c; 1651 1651 1652 - c = intel_pebs_constraints(event); 1652 + c = intel_shared_regs_constraints(cpuc, event); 1653 1653 if (c) 1654 1654 return c; 1655 1655 1656 - c = intel_shared_regs_constraints(cpuc, event); 1656 + c = intel_pebs_constraints(event); 1657 1657 if (c) 1658 1658 return c; 1659 1659
+15 -1
arch/x86/kernel/entry_64.S
··· 799 799 cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ 800 800 jne opportunistic_sysret_failed 801 801 802 - testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */ 802 + /* 803 + * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, 804 + * restoring TF results in a trap from userspace immediately after 805 + * SYSRET. This would cause an infinite loop whenever #DB happens 806 + * with register state that satisfies the opportunistic SYSRET 807 + * conditions. For example, single-stepping this user code: 808 + * 809 + * movq $stuck_here,%rcx 810 + * pushfq 811 + * popq %r11 812 + * stuck_here: 813 + * 814 + * would never get past 'stuck_here'. 815 + */ 816 + testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 803 817 jnz opportunistic_sysret_failed 804 818 805 819 /* nothing to check for RSP */
+1 -1
arch/x86/kernel/kgdb.c
··· 72 72 { "bx", 8, offsetof(struct pt_regs, bx) }, 73 73 { "cx", 8, offsetof(struct pt_regs, cx) }, 74 74 { "dx", 8, offsetof(struct pt_regs, dx) }, 75 - { "si", 8, offsetof(struct pt_regs, dx) }, 75 + { "si", 8, offsetof(struct pt_regs, si) }, 76 76 { "di", 8, offsetof(struct pt_regs, di) }, 77 77 { "bp", 8, offsetof(struct pt_regs, bp) }, 78 78 { "sp", 8, offsetof(struct pt_regs, sp) },
+10
arch/x86/kernel/reboot.c
··· 183 183 }, 184 184 }, 185 185 186 + /* ASRock */ 187 + { /* Handle problems with rebooting on ASRock Q1900DC-ITX */ 188 + .callback = set_pci_reboot, 189 + .ident = "ASRock Q1900DC-ITX", 190 + .matches = { 191 + DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"), 192 + DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"), 193 + }, 194 + }, 195 + 186 196 /* ASUS */ 187 197 { /* Handle problems with rebooting on ASUS P4S800 */ 188 198 .callback = set_bios_reboot,
+9 -1
arch/x86/xen/p2m.c
··· 91 91 unsigned long xen_max_p2m_pfn __read_mostly; 92 92 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); 93 93 94 + #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT 95 + #define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT 96 + #else 97 + #define P2M_LIMIT 0 98 + #endif 99 + 94 100 static DEFINE_SPINLOCK(p2m_update_lock); 95 101 96 102 static unsigned long *p2m_mid_missing_mfn; ··· 391 385 void __init xen_vmalloc_p2m_tree(void) 392 386 { 393 387 static struct vm_struct vm; 388 + unsigned long p2m_limit; 394 389 390 + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; 395 391 vm.flags = VM_ALLOC; 396 - vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, 392 + vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), 397 393 PMD_SIZE * PMDS_PER_MID_PAGE); 398 394 vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); 399 395 pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
+3 -3
block/blk-settings.c
··· 585 585 b->physical_block_size); 586 586 587 587 t->io_min = max(t->io_min, b->io_min); 588 - t->io_opt = lcm(t->io_opt, b->io_opt); 588 + t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 589 589 590 590 t->cluster &= b->cluster; 591 591 t->discard_zeroes_data &= b->discard_zeroes_data; ··· 616 616 b->raid_partial_stripes_expensive); 617 617 618 618 /* Find lowest common alignment_offset */ 619 - t->alignment_offset = lcm(t->alignment_offset, alignment) 619 + t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 620 620 % max(t->physical_block_size, t->io_min); 621 621 622 622 /* Verify that new alignment_offset is on a logical block boundary */ ··· 643 643 b->max_discard_sectors); 644 644 t->discard_granularity = max(t->discard_granularity, 645 645 b->discard_granularity); 646 - t->discard_alignment = lcm(t->discard_alignment, alignment) % 646 + t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 647 647 t->discard_granularity; 648 648 } 649 649
+7 -15
drivers/firmware/dmi_scan.c
··· 86 86 int i = 0; 87 87 88 88 /* 89 - * Stop when we see all the items the table claimed to have 90 - * OR we run off the end of the table (also happens) 89 + * Stop when we have seen all the items the table claimed to have 90 + * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run 91 + * off the end of the table (should never happen but sometimes does 92 + * on bogus implementations.) 91 93 */ 92 - while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { 94 + while ((!num || i < num) && 95 + (data - buf + sizeof(struct dmi_header)) <= len) { 93 96 const struct dmi_header *dm = (const struct dmi_header *)data; 94 97 95 98 /* ··· 532 529 if (memcmp(buf, "_SM3_", 5) == 0 && 533 530 buf[6] < 32 && dmi_checksum(buf, buf[6])) { 534 531 dmi_ver = get_unaligned_be16(buf + 7); 532 + dmi_num = 0; /* No longer specified */ 535 533 dmi_len = get_unaligned_le32(buf + 12); 536 534 dmi_base = get_unaligned_le64(buf + 16); 537 - 538 - /* 539 - * The 64-bit SMBIOS 3.0 entry point no longer has a field 540 - * containing the number of structures present in the table. 541 - * Instead, it defines the table size as a maximum size, and 542 - * relies on the end-of-table structure type (#127) to be used 543 - * to signal the end of the table. 544 - * So let's define dmi_num as an upper bound as well: each 545 - * structure has a 4 byte header, so dmi_len / 4 is an upper 546 - * bound for the number of structures in the table. 547 - */ 548 - dmi_num = dmi_len / 4; 549 535 550 536 if (dmi_walk_early(dmi_decode) == 0) { 551 537 pr_info("SMBIOS %d.%d present.\n",
+1
drivers/gpu/drm/drm_edid_load.c
··· 287 287 288 288 drm_mode_connector_update_edid_property(connector, edid); 289 289 ret = drm_add_edid_modes(connector, edid); 290 + drm_edid_to_eld(connector, edid); 290 291 kfree(edid); 291 292 292 293 return ret;
+1
drivers/gpu/drm/drm_probe_helper.c
··· 174 174 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; 175 175 176 176 count = drm_add_edid_modes(connector, edid); 177 + drm_edid_to_eld(connector, edid); 177 178 } else 178 179 count = (*connector_funcs->get_modes)(connector); 179 180 }
+5 -3
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 147 147 unsigned int ovl_height; 148 148 unsigned int fb_width; 149 149 unsigned int fb_height; 150 + unsigned int fb_pitch; 150 151 unsigned int bpp; 151 152 unsigned int pixel_format; 152 153 dma_addr_t dma_addr; ··· 533 532 win_data->offset_y = plane->crtc_y; 534 533 win_data->ovl_width = plane->crtc_width; 535 534 win_data->ovl_height = plane->crtc_height; 535 + win_data->fb_pitch = plane->pitch; 536 536 win_data->fb_width = plane->fb_width; 537 537 win_data->fb_height = plane->fb_height; 538 538 win_data->dma_addr = plane->dma_addr[0] + offset; 539 539 win_data->bpp = plane->bpp; 540 540 win_data->pixel_format = plane->pixel_format; 541 - win_data->buf_offsize = (plane->fb_width - plane->crtc_width) * 542 - (plane->bpp >> 3); 541 + win_data->buf_offsize = 542 + plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); 543 543 win_data->line_size = plane->crtc_width * (plane->bpp >> 3); 544 544 545 545 DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", ··· 706 704 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 707 705 708 706 /* buffer end address */ 709 - size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); 707 + size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3); 710 708 val = (unsigned long)(win_data->dma_addr + size); 711 709 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 712 710
+10 -7
drivers/gpu/drm/exynos/exynos_mixer.c
··· 55 55 unsigned int fb_x; 56 56 unsigned int fb_y; 57 57 unsigned int fb_width; 58 + unsigned int fb_pitch; 58 59 unsigned int fb_height; 59 60 unsigned int src_width; 60 61 unsigned int src_height; ··· 439 438 } else { 440 439 luma_addr[0] = win_data->dma_addr; 441 440 chroma_addr[0] = win_data->dma_addr 442 - + (win_data->fb_width * win_data->fb_height); 441 + + (win_data->fb_pitch * win_data->fb_height); 443 442 } 444 443 445 444 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { ··· 448 447 luma_addr[1] = luma_addr[0] + 0x40; 449 448 chroma_addr[1] = chroma_addr[0] + 0x40; 450 449 } else { 451 - luma_addr[1] = luma_addr[0] + win_data->fb_width; 452 - chroma_addr[1] = chroma_addr[0] + win_data->fb_width; 450 + luma_addr[1] = luma_addr[0] + win_data->fb_pitch; 451 + chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch; 453 452 } 454 453 } else { 455 454 ctx->interlace = false; ··· 470 469 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); 471 470 472 471 /* setting size of input image */ 473 - vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | 472 + vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) | 474 473 VP_IMG_VSIZE(win_data->fb_height)); 475 474 /* chroma height has to reduced by 2 to avoid chroma distorions */ 476 - vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | 475 + vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) | 477 476 VP_IMG_VSIZE(win_data->fb_height / 2)); 478 477 479 478 vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); ··· 560 559 /* converting dma address base and source offset */ 561 560 dma_addr = win_data->dma_addr 562 561 + (win_data->fb_x * win_data->bpp >> 3) 563 - + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); 562 + + (win_data->fb_y * win_data->fb_pitch); 564 563 src_x_offset = 0; 565 564 src_y_offset = 0; 566 565 ··· 577 576 MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); 578 577 579 578 /* setup geometry */ 580 - mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); 579 + mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), 580 + win_data->fb_pitch / (win_data->bpp >> 3)); 581 581 582 582 /* setup display size */ 583 583 if (ctx->mxr_ver == MXR_VER_128_0_0_184 && ··· 963 961 win_data->fb_y = plane->fb_y; 964 962 win_data->fb_width = plane->fb_width; 965 963 win_data->fb_height = plane->fb_height; 964 + win_data->fb_pitch = plane->pitch; 966 965 win_data->src_width = plane->src_width; 967 966 win_data->src_height = plane->src_height; 968 967
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1487 1487 goto err; 1488 1488 } 1489 1489 1490 - if (i915_needs_cmd_parser(ring)) { 1490 + if (i915_needs_cmd_parser(ring) && args->batch_len) { 1491 1491 batch_obj = i915_gem_execbuffer_parse(ring, 1492 1492 &shadow_exec_entry, 1493 1493 eb,
+2 -2
drivers/gpu/drm/i915/intel_sprite.c
··· 1322 1322 drm_modeset_lock_all(dev); 1323 1323 1324 1324 plane = drm_plane_find(dev, set->plane_id); 1325 - if (!plane) { 1325 + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { 1326 1326 ret = -ENOENT; 1327 1327 goto out_unlock; 1328 1328 } ··· 1349 1349 drm_modeset_lock_all(dev); 1350 1350 1351 1351 plane = drm_plane_find(dev, get->plane_id); 1352 - if (!plane) { 1352 + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { 1353 1353 ret = -ENOENT; 1354 1354 goto out_unlock; 1355 1355 }
+1
drivers/gpu/drm/radeon/cikd.h
··· 2129 2129 #define VCE_UENC_REG_CLOCK_GATING 0x207c0 2130 2130 #define VCE_SYS_INT_EN 0x21300 2131 2131 # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) 2132 + #define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c 2132 2133 #define VCE_LMI_CTRL2 0x21474 2133 2134 #define VCE_LMI_CTRL 0x21498 2134 2135 #define VCE_LMI_VM_CTRL 0x214a0
+1
drivers/gpu/drm/radeon/radeon.h
··· 1565 1565 int new_active_crtc_count; 1566 1566 u32 current_active_crtcs; 1567 1567 int current_active_crtc_count; 1568 + bool single_display; 1568 1569 struct radeon_dpm_dynamic_state dyn_state; 1569 1570 struct radeon_dpm_fan fan; 1570 1571 u32 tdp_limit;
+7 -3
drivers/gpu/drm/radeon/radeon_bios.c
··· 76 76 77 77 static bool radeon_read_bios(struct radeon_device *rdev) 78 78 { 79 - uint8_t __iomem *bios; 79 + uint8_t __iomem *bios, val1, val2; 80 80 size_t size; 81 81 82 82 rdev->bios = NULL; ··· 86 86 return false; 87 87 } 88 88 89 - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { 89 + val1 = readb(&bios[0]); 90 + val2 = readb(&bios[1]); 91 + 92 + if (size == 0 || val1 != 0x55 || val2 != 0xaa) { 90 93 pci_unmap_rom(rdev->pdev, bios); 91 94 return false; 92 95 } 93 - rdev->bios = kmemdup(bios, size, GFP_KERNEL); 96 + rdev->bios = kzalloc(size, GFP_KERNEL); 94 97 if (rdev->bios == NULL) { 95 98 pci_unmap_rom(rdev->pdev, bios); 96 99 return false; 97 100 } 101 + memcpy_fromio(rdev->bios, bios, size); 98 102 pci_unmap_rom(rdev->pdev, bios); 99 103 return true; 100 104 }
+4 -7
drivers/gpu/drm/radeon/radeon_mn.c
··· 122 122 it = interval_tree_iter_first(&rmn->objects, start, end); 123 123 while (it) { 124 124 struct radeon_bo *bo; 125 - struct fence *fence; 126 125 int r; 127 126 128 127 bo = container_of(it, struct radeon_bo, mn_it); ··· 133 134 continue; 134 135 } 135 136 136 - fence = reservation_object_get_excl(bo->tbo.resv); 137 - if (fence) { 138 - r = radeon_fence_wait((struct radeon_fence *)fence, false); 139 - if (r) 140 - DRM_ERROR("(%d) failed to wait for user bo\n", r); 141 - } 137 + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, 138 + false, MAX_SCHEDULE_TIMEOUT); 139 + if (r) 140 + DRM_ERROR("(%d) failed to wait for user bo\n", r); 142 141 143 142 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 144 143 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+17 -5
drivers/gpu/drm/radeon/radeon_pm.c
··· 837 837 radeon_pm_compute_clocks(rdev); 838 838 } 839 839 840 - static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 841 - enum radeon_pm_state_type dpm_state) 840 + static bool radeon_dpm_single_display(struct radeon_device *rdev) 842 841 { 843 - int i; 844 - struct radeon_ps *ps; 845 - u32 ui_class; 846 842 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 847 843 true : false; 848 844 ··· 853 857 */ 854 858 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) 855 859 single_display = false; 860 + 861 + return single_display; 862 + } 863 + 864 + static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 865 + enum radeon_pm_state_type dpm_state) 866 + { 867 + int i; 868 + struct radeon_ps *ps; 869 + u32 ui_class; 870 + bool single_display = radeon_dpm_single_display(rdev); 856 871 857 872 /* certain older asics have a separare 3D performance state, 858 873 * so try that first if the user selected performance ··· 990 983 struct radeon_ps *ps; 991 984 enum radeon_pm_state_type dpm_state; 992 985 int ret; 986 + bool single_display = radeon_dpm_single_display(rdev); 993 987 994 988 /* if dpm init failed */ 995 989 if (!rdev->pm.dpm_enabled) ··· 1014 1006 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 1015 1007 /* vce just modifies an existing state so force a change */ 1016 1008 if (ps->vce_active != rdev->pm.dpm.vce_active) 1009 + goto force; 1010 + /* user has made a display change (such as timing) */ 1011 + if (rdev->pm.dpm.single_display != single_display) 1017 1012 goto force; 1018 1013 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 1019 1014 /* for pre-BTC and APUs if the num crtcs changed but state is the same, ··· 1080 1069 1081 1070 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1082 1071 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1072 + rdev->pm.dpm.single_display = single_display; 1083 1073 1084 1074 /* wait for the rings to drain */ 1085 1075 for (i = 0; i < RADEON_NUM_RINGS; i++) {
+1 -1
drivers/gpu/drm/radeon/radeon_ring.c
··· 495 495 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 496 496 seq_printf(m, "%u dwords in ring\n", count); 497 497 498 - if (!ring->ready) 498 + if (!ring->ring) 499 499 return 0; 500 500 501 501 /* print 8 dw before current rptr as often it's the last executed
+4
drivers/gpu/drm/radeon/radeon_ttm.c
··· 598 598 enum dma_data_direction direction = write ? 599 599 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 600 600 601 + /* double check that we don't free the table twice */ 602 + if (!ttm->sg->sgl) 603 + return; 604 + 601 605 /* free the sg table and pages again */ 602 606 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 603 607
+3
drivers/gpu/drm/radeon/vce_v2_0.c
··· 156 156 WREG32(VCE_LMI_SWAP_CNTL1, 0); 157 157 WREG32(VCE_LMI_VM_CTRL, 0); 158 158 159 + WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8); 160 + 161 + addr &= 0xff; 159 162 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); 160 163 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); 161 164 WREG32(VCE_VCPU_CACHE_SIZE0, size);
+1 -1
drivers/iio/accel/bma180.c
··· 659 659 660 660 mutex_lock(&data->mutex); 661 661 662 - for_each_set_bit(bit, indio_dev->buffer->scan_mask, 662 + for_each_set_bit(bit, indio_dev->active_scan_mask, 663 663 indio_dev->masklength) { 664 664 ret = bma180_get_data_reg(data, bit); 665 665 if (ret < 0) {
+10 -10
drivers/iio/accel/bmc150-accel.c
··· 168 168 int val; 169 169 int val2; 170 170 u8 bw_bits; 171 - } bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08}, 172 - {15, 630000, 0x09}, 173 - {31, 250000, 0x0A}, 174 - {62, 500000, 0x0B}, 175 - {125, 0, 0x0C}, 176 - {250, 0, 0x0D}, 177 - {500, 0, 0x0E}, 178 - {1000, 0, 0x0F} }; 171 + } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08}, 172 + {31, 260000, 0x09}, 173 + {62, 500000, 0x0A}, 174 + {125, 0, 0x0B}, 175 + {250, 0, 0x0C}, 176 + {500, 0, 0x0D}, 177 + {1000, 0, 0x0E}, 178 + {2000, 0, 0x0F} }; 179 179 180 180 static const struct { 181 181 int bw_bits; ··· 840 840 } 841 841 842 842 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 843 - "7.810000 15.630000 31.250000 62.500000 125 250 500 1000"); 843 + "15.620000 31.260000 62.50000 125 250 500 1000 2000"); 844 844 845 845 static struct attribute *bmc150_accel_attributes[] = { 846 846 &iio_const_attr_sampling_frequency_available.dev_attr.attr, ··· 986 986 int bit, ret, i = 0; 987 987 988 988 mutex_lock(&data->mutex); 989 - for_each_set_bit(bit, indio_dev->buffer->scan_mask, 989 + for_each_set_bit(bit, indio_dev->active_scan_mask, 990 990 indio_dev->masklength) { 991 991 ret = i2c_smbus_read_word_data(data->client, 992 992 BMC150_ACCEL_AXIS_TO_REG(bit));
+1 -1
drivers/iio/accel/kxcjk-1013.c
··· 956 956 957 957 mutex_lock(&data->mutex); 958 958 959 - for_each_set_bit(bit, indio_dev->buffer->scan_mask, 959 + for_each_set_bit(bit, indio_dev->active_scan_mask, 960 960 indio_dev->masklength) { 961 961 ret = kxcjk1013_get_acc_reg(data, bit); 962 962 if (ret < 0) {
+2 -1
drivers/iio/adc/Kconfig
··· 137 137 138 138 config CC10001_ADC 139 139 tristate "Cosmic Circuits 10001 ADC driver" 140 - depends on HAS_IOMEM || HAVE_CLK || REGULATOR 140 + depends on HAVE_CLK || REGULATOR 141 + depends on HAS_IOMEM 141 142 select IIO_BUFFER 142 143 select IIO_TRIGGERED_BUFFER 143 144 help
+2 -3
drivers/iio/adc/at91_adc.c
··· 544 544 { 545 545 struct iio_dev *idev = iio_trigger_get_drvdata(trig); 546 546 struct at91_adc_state *st = iio_priv(idev); 547 - struct iio_buffer *buffer = idev->buffer; 548 547 struct at91_adc_reg_desc *reg = st->registers; 549 548 u32 status = at91_adc_readl(st, reg->trigger_register); 550 549 int value; ··· 563 564 at91_adc_writel(st, reg->trigger_register, 564 565 status | value); 565 566 566 - for_each_set_bit(bit, buffer->scan_mask, 567 + for_each_set_bit(bit, idev->active_scan_mask, 567 568 st->num_channels) { 568 569 struct iio_chan_spec const *chan = idev->channels + bit; 569 570 at91_adc_writel(st, AT91_ADC_CHER, ··· 578 579 at91_adc_writel(st, reg->trigger_register, 579 580 status & ~value); 580 581 581 - for_each_set_bit(bit, buffer->scan_mask, 582 + for_each_set_bit(bit, idev->active_scan_mask, 582 583 st->num_channels) { 583 584 struct iio_chan_spec const *chan = idev->channels + bit; 584 585 at91_adc_writel(st, AT91_ADC_CHDR,
+1 -2
drivers/iio/adc/ti_am335x_adc.c
··· 188 188 static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 189 189 { 190 190 struct tiadc_device *adc_dev = iio_priv(indio_dev); 191 - struct iio_buffer *buffer = indio_dev->buffer; 192 191 unsigned int enb = 0; 193 192 u8 bit; 194 193 195 194 tiadc_step_config(indio_dev); 196 - for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels) 195 + for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) 197 196 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 198 197 adc_dev->buffer_en_ch_steps = enb; 199 198
+61 -30
drivers/iio/adc/vf610_adc.c
··· 141 141 struct regulator *vref; 142 142 struct vf610_adc_feature adc_feature; 143 143 144 + u32 sample_freq_avail[5]; 145 + 144 146 struct completion completion; 145 147 }; 148 + 149 + static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 }; 146 150 147 151 #define VF610_ADC_CHAN(_idx, _chan_type) { \ 148 152 .type = (_chan_type), \ ··· 184 180 /* sentinel */ 185 181 }; 186 182 187 - /* 188 - * ADC sample frequency, unit is ADCK cycles. 189 - * ADC clk source is ipg clock, which is the same as bus clock. 190 - * 191 - * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) 192 - * SFCAdder: fixed to 6 ADCK cycles 193 - * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. 194 - * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 195 - * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles 196 - * 197 - * By default, enable 12 bit resolution mode, clock source 198 - * set to ipg clock, So get below frequency group: 199 - */ 200 - static const u32 vf610_sample_freq_avail[5] = 201 - {1941176, 559332, 286957, 145374, 73171}; 183 + static inline void vf610_adc_calculate_rates(struct vf610_adc *info) 184 + { 185 + unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk); 186 + int i; 187 + 188 + /* 189 + * Calculate ADC sample frequencies 190 + * Sample time unit is ADCK cycles. ADCK clk source is ipg clock, 191 + * which is the same as bus clock. 192 + * 193 + * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) 194 + * SFCAdder: fixed to 6 ADCK cycles 195 + * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. 196 + * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 197 + * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles 198 + */ 199 + adck_rate = ipg_rate / info->adc_feature.clk_div; 200 + for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) 201 + info->sample_freq_avail[i] = 202 + adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3)); 203 + } 202 204 203 205 static inline void vf610_adc_cfg_init(struct vf610_adc *info) 204 206 { 207 + struct vf610_adc_feature *adc_feature = &info->adc_feature; 208 + 205 209 /* set default Configuration for ADC controller */ 206 - info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET; 207 - info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET; 210 + adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET; 211 + adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET; 208 212 209 - info->adc_feature.calibration = true; 210 - info->adc_feature.ovwren = true; 213 + adc_feature->calibration = true; 214 + adc_feature->ovwren = true; 211 215 212 - info->adc_feature.clk_div = 1; 213 - info->adc_feature.res_mode = 12; 214 - info->adc_feature.sample_rate = 1; 215 - info->adc_feature.lpm = true; 216 + adc_feature->res_mode = 12; 217 + adc_feature->sample_rate = 1; 218 + adc_feature->lpm = true; 219 + 220 + /* Use a save ADCK which is below 20MHz on all devices */ 221 + adc_feature->clk_div = 8; 222 + 223 + vf610_adc_calculate_rates(info); 216 224 } 217 225 218 226 static void vf610_adc_cfg_post_set(struct vf610_adc *info) ··· 306 290 307 291 cfg_data = readl(info->regs + VF610_REG_ADC_CFG); 308 292 309 - /* low power configuration */ 310 293 cfg_data &= ~VF610_ADC_ADLPC_EN; 311 294 if (adc_feature->lpm) 312 295 cfg_data |= VF610_ADC_ADLPC_EN; 313 296 314 - /* disable high speed */ 315 297 cfg_data &= ~VF610_ADC_ADHSC_EN; 316 298 317 299 writel(cfg_data, info->regs + VF610_REG_ADC_CFG); ··· 449 435 return IRQ_HANDLED; 450 436 } 451 437 452 - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171"); 438 + static ssize_t vf610_show_samp_freq_avail(struct device *dev, 439 + struct device_attribute *attr, char *buf) 440 + { 441 + struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev)); 442 + size_t len = 0; 443 + int i; 444 + 445 + for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++) 446 + len += scnprintf(buf + len, PAGE_SIZE - len, 447 + "%u ", info->sample_freq_avail[i]); 448 + 449 + /* replace trailing space by newline */ 450 + buf[len - 1] = '\n'; 451 + 452 + return len; 453 + } 454 + 455 + static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail); 453 456 454 457 static struct attribute *vf610_attributes[] = { 455 - &iio_const_attr_sampling_frequency_available.dev_attr.attr, 458 + &iio_dev_attr_sampling_frequency_available.dev_attr.attr, 456 459 NULL 457 460 }; 458 461 ··· 533 502 return IIO_VAL_FRACTIONAL_LOG2; 534 503 535 504 case IIO_CHAN_INFO_SAMP_FREQ: 536 - *val = vf610_sample_freq_avail[info->adc_feature.sample_rate]; 505 + *val = info->sample_freq_avail[info->adc_feature.sample_rate]; 537 506 *val2 = 0; 538 507 return IIO_VAL_INT; 539 508 ··· 556 525 switch (mask) { 557 526 case IIO_CHAN_INFO_SAMP_FREQ: 558 527 for (i = 0; 559 - i < ARRAY_SIZE(vf610_sample_freq_avail); 528 + i < ARRAY_SIZE(info->sample_freq_avail); 560 529 i++) 561 - if (val == vf610_sample_freq_avail[i]) { 530 + if (val == info->sample_freq_avail[i]) { 562 531 info->adc_feature.sample_rate = i; 563 532 vf610_adc_sample_set(info); 564 533 return 0;
+1 -1
drivers/iio/gyro/bmg160.c
··· 822 822 int bit, ret, i = 0; 823 823 824 824 mutex_lock(&data->mutex); 825 - for_each_set_bit(bit, indio_dev->buffer->scan_mask, 825 + for_each_set_bit(bit, indio_dev->active_scan_mask, 826 826 indio_dev->masklength) { 827 827 ret = i2c_smbus_read_word_data(data->client, 828 828 BMG160_AXIS_TO_REG(bit));
+1 -1
drivers/iio/imu/adis_trigger.c
··· 60 60 iio_trigger_set_drvdata(adis->trig, adis); 61 61 ret = iio_trigger_register(adis->trig); 62 62 63 - indio_dev->trig = adis->trig; 63 + indio_dev->trig = iio_trigger_get(adis->trig); 64 64 if (ret) 65 65 goto error_free_irq; 66 66
+30 -26
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 410 410 } 411 411 } 412 412 413 - static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr) 413 + static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) 414 414 { 415 - int result; 415 + int result, i; 416 416 u8 d; 417 417 418 - if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM) 419 - return -EINVAL; 420 - if (fsr == st->chip_config.fsr) 421 - return 0; 418 + for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) { 419 + if (gyro_scale_6050[i] == val) { 420 + d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); 421 + result = inv_mpu6050_write_reg(st, 422 + st->reg->gyro_config, d); 423 + if (result) 424 + return result; 422 425 423 - d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); 424 - result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d); 425 - if (result) 426 - return result; 427 - st->chip_config.fsr = fsr; 426 + st->chip_config.fsr = i; 427 + return 0; 428 + } 429 + } 428 430 429 - return 0; 431 + return -EINVAL; 430 432 } 431 433 432 - static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs) 434 + static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) 433 435 { 434 - int result; 436 + int result, i; 435 437 u8 d; 436 438 437 - if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM) 438 - return -EINVAL; 439 - if (fs == st->chip_config.accl_fs) 440 - return 0; 439 + for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) { 440 + if (accel_scale[i] == val) { 441 + d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); 442 + result = inv_mpu6050_write_reg(st, 443 + st->reg->accl_config, d); 444 + if (result) 445 + return result; 441 446 442 - d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); 443 - result = inv_mpu6050_write_reg(st, st->reg->accl_config, d); 444 - if (result) 445 - return result; 446 - st->chip_config.accl_fs = fs; 447 + st->chip_config.accl_fs = i; 448 + return 0; 449 + } 450 + } 447 451 448 - return 0; 452 + return -EINVAL; 449 453 } 450 454 451 455 static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, ··· 475 471 case IIO_CHAN_INFO_SCALE: 476 472 switch (chan->type) { 477 473 case IIO_ANGL_VEL: 478 - result = inv_mpu6050_write_fsr(st, val); 474 + result = inv_mpu6050_write_gyro_scale(st, val2); 479 475 break; 480 476 case IIO_ACCEL: 481 - result = inv_mpu6050_write_accel_fs(st, val); 477 + result = inv_mpu6050_write_accel_scale(st, val2); 482 478 break; 483 479 default: 484 480 result = -EINVAL;
+14 -11
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
··· 24 24 #include <linux/poll.h> 25 25 #include "inv_mpu_iio.h" 26 26 27 + static void inv_clear_kfifo(struct inv_mpu6050_state *st) 28 + { 29 + unsigned long flags; 30 + 31 + /* take the spin lock sem to avoid interrupt kick in */ 32 + spin_lock_irqsave(&st->time_stamp_lock, flags); 33 + kfifo_reset(&st->timestamps); 34 + spin_unlock_irqrestore(&st->time_stamp_lock, flags); 35 + } 36 + 27 37 int inv_reset_fifo(struct iio_dev *indio_dev) 28 38 { 29 39 int result; ··· 60 50 INV_MPU6050_BIT_FIFO_RST); 61 51 if (result) 62 52 goto reset_fifo_fail; 53 + 54 + /* clear timestamps fifo */ 55 + inv_clear_kfifo(st); 56 + 63 57 /* enable interrupt */ 64 58 if (st->chip_config.accl_fifo_enable || 65 59 st->chip_config.gyro_fifo_enable) { ··· 95 81 INV_MPU6050_BIT_DATA_RDY_EN); 96 82 97 83 return result; 98 - } 99 - 100 - static void inv_clear_kfifo(struct inv_mpu6050_state *st) 101 - { 102 - unsigned long flags; 103 - 104 - /* take the spin lock sem to avoid interrupt kick in */ 105 - spin_lock_irqsave(&st->time_stamp_lock, flags); 106 - kfifo_reset(&st->timestamps); 107 - spin_unlock_irqrestore(&st->time_stamp_lock, flags); 108 84 } 109 85 110 86 /** ··· 188 184 flush_fifo: 189 185 /* Flush HW and SW FIFOs. */ 190 186 inv_reset_fifo(indio_dev); 191 - inv_clear_kfifo(st); 192 187 mutex_unlock(&indio_dev->mlock); 193 188 iio_trigger_notify_done(indio_dev->trig); 194 189
+1 -1
drivers/iio/imu/kmx61.c
··· 1227 1227 base = KMX61_MAG_XOUT_L; 1228 1228 1229 1229 mutex_lock(&data->lock); 1230 - for_each_set_bit(bit, indio_dev->buffer->scan_mask, 1230 + for_each_set_bit(bit, indio_dev->active_scan_mask, 1231 1231 indio_dev->masklength) { 1232 1232 ret = kmx61_read_measurement(data, base, bit); 1233 1233 if (ret < 0) {
+3 -2
drivers/iio/industrialio-core.c
··· 847 847 * @attr_list: List of IIO device attributes 848 848 * 849 849 * This function frees the memory allocated for each of the IIO device 850 - * attributes in the list. Note: if you want to reuse the list after calling 851 - * this function you have to reinitialize it using INIT_LIST_HEAD(). 850 + * attributes in the list. 852 851 */ 853 852 void iio_free_chan_devattr_list(struct list_head *attr_list) 854 853 { ··· 855 856 856 857 list_for_each_entry_safe(p, n, attr_list, l) { 857 858 kfree(p->dev_attr.attr.name); 859 + list_del(&p->l); 858 860 kfree(p); 859 861 } 860 862 } ··· 936 936 937 937 iio_free_chan_devattr_list(&indio_dev->channel_attr_list); 938 938 kfree(indio_dev->chan_attr_group.attrs); 939 + indio_dev->chan_attr_group.attrs = NULL; 939 940 } 940 941 941 942 static void iio_dev_release(struct device *device)
+1
drivers/iio/industrialio-event.c
··· 500 500 error_free_setup_event_lines: 501 501 iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); 502 502 kfree(indio_dev->event_interface); 503 + indio_dev->event_interface = NULL; 503 504 return ret; 504 505 } 505 506
+1 -1
drivers/iio/proximity/sx9500.c
··· 494 494 495 495 mutex_lock(&data->mutex); 496 496 497 - for_each_set_bit(bit, indio_dev->buffer->scan_mask, 497 + for_each_set_bit(bit, indio_dev->active_scan_mask, 498 498 indio_dev->masklength) { 499 499 ret = sx9500_read_proximity(data, &indio_dev->channels[bit], 500 500 &val);
+8
drivers/infiniband/core/umem.c
··· 99 99 if (dmasync) 100 100 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); 101 101 102 + /* 103 + * If the combination of the addr and size requested for this memory 104 + * region causes an integer overflow, return error. 105 + */ 106 + if ((PAGE_ALIGN(addr + size) <= size) || 107 + (PAGE_ALIGN(addr + size) <= addr)) 108 + return ERR_PTR(-EINVAL); 109 + 102 110 if (!can_do_mlock()) 103 111 return ERR_PTR(-EPERM); 104 112
+29 -19
drivers/input/mouse/alps.c
··· 1154 1154 mutex_unlock(&alps_mutex); 1155 1155 } 1156 1156 1157 - static void alps_report_bare_ps2_packet(struct input_dev *dev, 1157 + static void alps_report_bare_ps2_packet(struct psmouse *psmouse, 1158 1158 unsigned char packet[], 1159 1159 bool report_buttons) 1160 1160 { 1161 + struct alps_data *priv = psmouse->private; 1162 + struct input_dev *dev; 1163 + 1164 + /* Figure out which device to use to report the bare packet */ 1165 + if (priv->proto_version == ALPS_PROTO_V2 && 1166 + (priv->flags & ALPS_DUALPOINT)) { 1167 + /* On V2 devices the DualPoint Stick reports bare packets */ 1168 + dev = priv->dev2; 1169 + } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) { 1170 + /* Register dev3 mouse if we received PS/2 packet first time */ 1171 + if (!IS_ERR(priv->dev3)) 1172 + psmouse_queue_work(psmouse, &priv->dev3_register_work, 1173 + 0); 1174 + return; 1175 + } else { 1176 + dev = priv->dev3; 1177 + } 1178 + 1161 1179 if (report_buttons) 1162 1180 alps_report_buttons(dev, NULL, 1163 1181 packet[0] & 1, packet[0] & 2, packet[0] & 4); ··· 1250 1232 * de-synchronization. 1251 1233 */ 1252 1234 1253 - alps_report_bare_ps2_packet(priv->dev2, 1254 - &psmouse->packet[3], false); 1235 + alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], 1236 + false); 1255 1237 1256 1238 /* 1257 1239 * Continue with the standard ALPS protocol handling, ··· 1307 1289 * properly we only do this if the device is fully synchronized. 1308 1290 */ 1309 1291 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { 1310 - 1311 - /* Register dev3 mouse if we received PS/2 packet first time */ 1312 - if (unlikely(!priv->dev3)) 1313 - psmouse_queue_work(psmouse, 1314 - &priv->dev3_register_work, 0); 1315 - 1316 1292 if (psmouse->pktcnt == 3) { 1317 - /* Once dev3 mouse device is registered report data */ 1318 - if (likely(!IS_ERR_OR_NULL(priv->dev3))) 1319 - alps_report_bare_ps2_packet(priv->dev3, 1320 - psmouse->packet, 1321 - true); 1293 + alps_report_bare_ps2_packet(psmouse, psmouse->packet, 1294 + true); 1322 1295 return PSMOUSE_FULL_PACKET; 1323 1296 } 1324 1297 return PSMOUSE_GOOD_DATA; ··· 2290 2281 priv->set_abs_params = alps_set_abs_params_mt; 2291 2282 priv->nibble_commands = alps_v3_nibble_commands; 2292 2283 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2293 - priv->x_max = 1360; 2294 - priv->y_max = 660; 2295 2284 priv->x_bits = 23; 2296 2285 priv->y_bits = 12; 2286 + 2287 + if (alps_dolphin_get_device_area(psmouse, priv)) 2288 + return -EIO; 2289 + 2297 2290 break; 2298 2291 2299 2292 case ALPS_PROTO_V6: ··· 2314 2303 priv->set_abs_params = alps_set_abs_params_mt; 2315 2304 priv->nibble_commands = alps_v3_nibble_commands; 2316 2305 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2317 - 2318 - if (alps_dolphin_get_device_area(psmouse, priv)) 2319 - return -EIO; 2306 + priv->x_max = 0xfff; 2307 + priv->y_max = 0x7ff; 2320 2308 2321 2309 if (priv->fw_ver[1] != 0xba) 2322 2310 priv->flags |= ALPS_BUTTONPAD;
+6 -1
drivers/input/mouse/synaptics.c
··· 154 154 }, 155 155 { 156 156 (const char * const []){"LEN2006", NULL}, 157 + {2691, 2691}, 158 + 1024, 5045, 2457, 4832 159 + }, 160 + { 161 + (const char * const []){"LEN2006", NULL}, 157 162 {ANY_BOARD_ID, ANY_BOARD_ID}, 158 163 1264, 5675, 1171, 4688 159 164 }, ··· 194 189 "LEN2003", 195 190 "LEN2004", /* L440 */ 196 191 "LEN2005", 197 - "LEN2006", 192 + "LEN2006", /* Edge E440/E540 */ 198 193 "LEN2007", 199 194 "LEN2008", 200 195 "LEN2009",
+48 -9
drivers/irqchip/irq-gic-v3-its.c
··· 169 169 170 170 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 171 171 { 172 - cmd->raw_cmd[0] &= ~(0xffffUL << 32); 172 + cmd->raw_cmd[0] &= BIT_ULL(32) - 1; 173 173 cmd->raw_cmd[0] |= ((u64)devid) << 32; 174 174 } 175 175 ··· 802 802 int i; 803 803 int psz = SZ_64K; 804 804 u64 shr = GITS_BASER_InnerShareable; 805 + u64 cache = GITS_BASER_WaWb; 805 806 806 807 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 807 808 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); ··· 849 848 val = (virt_to_phys(base) | 850 849 (type << GITS_BASER_TYPE_SHIFT) | 851 850 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 852 - GITS_BASER_WaWb | 851 + cache | 853 852 shr | 854 853 GITS_BASER_VALID); 855 854 ··· 875 874 * Shareability didn't stick. Just use 876 875 * whatever the read reported, which is likely 877 876 * to be the only thing this redistributor 878 - * supports. 877 + * supports. If that's zero, make it 878 + * non-cacheable as well. 879 879 */ 880 880 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 881 + if (!shr) 882 + cache = GITS_BASER_nC; 881 883 goto retry_baser; 882 884 } 883 885 ··· 984 980 tmp = readq_relaxed(rbase + GICR_PROPBASER); 985 981 986 982 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 983 + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 984 + /* 985 + * The HW reports non-shareable, we must 986 + * remove the cacheability attributes as 987 + * well. 988 + */ 989 + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 990 + GICR_PROPBASER_CACHEABILITY_MASK); 991 + val |= GICR_PROPBASER_nC; 992 + writeq_relaxed(val, rbase + GICR_PROPBASER); 993 + } 987 994 pr_info_once("GIC: using cache flushing for LPI property table\n"); 988 995 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 989 996 } 990 997 991 998 /* set PENDBASE */ 992 999 val = (page_to_phys(pend_page) | 993 - GICR_PROPBASER_InnerShareable | 994 - GICR_PROPBASER_WaWb); 1000 + GICR_PENDBASER_InnerShareable | 1001 + GICR_PENDBASER_WaWb); 995 1002 996 1003 writeq_relaxed(val, rbase + GICR_PENDBASER); 1004 + tmp = readq_relaxed(rbase + GICR_PENDBASER); 1005 + 1006 + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 1007 + /* 1008 + * The HW reports non-shareable, we must remove the 1009 + * cacheability attributes as well. 1010 + */ 1011 + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 1012 + GICR_PENDBASER_CACHEABILITY_MASK); 1013 + val |= GICR_PENDBASER_nC; 1014 + writeq_relaxed(val, rbase + GICR_PENDBASER); 1015 + } 997 1016 998 1017 /* Enable LPIs */ 999 1018 val = readl_relaxed(rbase + GICR_CTLR); ··· 1053 1026 * This ITS wants a linear CPU number. 1054 1027 */ 1055 1028 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); 1056 - target = GICR_TYPER_CPU_NUMBER(target); 1029 + target = GICR_TYPER_CPU_NUMBER(target) << 16; 1057 1030 } 1058 1031 1059 1032 /* Perform collection mapping */ ··· 1449 1422 1450 1423 writeq_relaxed(baser, its->base + GITS_CBASER); 1451 1424 tmp = readq_relaxed(its->base + GITS_CBASER); 1452 - writeq_relaxed(0, its->base + GITS_CWRITER); 1453 - writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1454 1425 1455 - if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { 1426 + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 1427 + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 1428 + /* 1429 + * The HW reports non-shareable, we must 1430 + * remove the cacheability attributes as 1431 + * well. 1432 + */ 1433 + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 1434 + GITS_CBASER_CACHEABILITY_MASK); 1435 + baser |= GITS_CBASER_nC; 1436 + writeq_relaxed(baser, its->base + GITS_CBASER); 1437 + } 1456 1438 pr_info("ITS: using cache flushing for cmd queue\n"); 1457 1439 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 1458 1440 } 1441 + 1442 + writeq_relaxed(0, its->base + GITS_CWRITER); 1443 + writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1459 1444 1460 1445 if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { 1461 1446 its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
+1 -6
drivers/net/ethernet/marvell/mvneta.c
··· 2729 2729 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2730 2730 { 2731 2731 struct mvneta_port *pp = netdev_priv(dev); 2732 - int ret; 2733 2732 2734 2733 if (!pp->phy_dev) 2735 2734 return -ENOTSUPP; 2736 2735 2737 - ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); 2738 - if (!ret) 2739 - mvneta_adjust_link(dev); 2740 - 2741 - return ret; 2736 + return phy_mii_ioctl(pp->phy_dev, ifr, cmd); 2742 2737 } 2743 2738 2744 2739 /* Ethtool methods */
+2 -1
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 725 725 * on the host, we deprecate the error message for this 726 726 * specific command/input_mod/opcode_mod/fw-status to be debug. 727 727 */ 728 - if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && 728 + if (op == MLX4_CMD_SET_PORT && 729 + (in_modifier == 1 || in_modifier == 2) && 729 730 op_modifier == MLX4_SET_PORT_IB_OPCODE && 730 731 context->fw_status == CMD_STAT_BAD_SIZE) 731 732 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
+1 -4
drivers/net/xen-netfront.c
··· 1008 1008 1009 1009 static int xennet_change_mtu(struct net_device *dev, int mtu) 1010 1010 { 1011 - int max = xennet_can_sg(dev) ? 1012 - XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; 1011 + int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 1013 1012 1014 1013 if (mtu > max) 1015 1014 return -EINVAL; ··· 1277 1278 1278 1279 netdev->ethtool_ops = &xennet_ethtool_ops; 1279 1280 SET_NETDEV_DEV(netdev, &dev->dev); 1280 - 1281 - netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); 1282 1281 1283 1282 np->netdev = netdev; 1284 1283
+8 -3
drivers/of/address.c
··· 450 450 return NULL; 451 451 } 452 452 453 - static int of_empty_ranges_quirk(void) 453 + static int of_empty_ranges_quirk(struct device_node *np) 454 454 { 455 455 if (IS_ENABLED(CONFIG_PPC)) { 456 - /* To save cycles, we cache the result */ 456 + /* To save cycles, we cache the result for global "Mac" setting */ 457 457 static int quirk_state = -1; 458 458 459 + /* PA-SEMI sdc DT bug */ 460 + if (of_device_is_compatible(np, "1682m-sdc")) 461 + return true; 462 + 463 + /* Make quirk cached */ 459 464 if (quirk_state < 0) 460 465 quirk_state = 461 466 of_machine_is_compatible("Power Macintosh") || ··· 495 490 * This code is only enabled on powerpc. --gcl 496 491 */ 497 492 ranges = of_get_property(parent, rprop, &rlen); 498 - if (ranges == NULL && !of_empty_ranges_quirk()) { 493 + if (ranges == NULL && !of_empty_ranges_quirk(parent)) { 499 494 pr_debug("OF: no ranges; cannot translate\n"); 500 495 return 1; 501 496 }
+1
drivers/staging/iio/Kconfig
··· 38 38 config IIO_SIMPLE_DUMMY_BUFFER 39 39 bool "Buffered capture support" 40 40 select IIO_BUFFER 41 + select IIO_TRIGGER 41 42 select IIO_KFIFO_BUF 42 43 help 43 44 Add buffered data capture to the simple dummy driver.
+1
drivers/staging/iio/magnetometer/hmc5843_core.c
··· 592 592 mutex_init(&data->lock); 593 593 594 594 indio_dev->dev.parent = dev; 595 + indio_dev->name = dev->driver->name; 595 596 indio_dev->info = &hmc5843_info; 596 597 indio_dev->modes = INDIO_DIRECT_MODE; 597 598 indio_dev->channels = data->variant->channels;
+5
drivers/tty/serial/fsl_lpuart.c
··· 921 921 writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, 922 922 sport->port.membase + UARTPFIFO); 923 923 924 + /* explicitly clear RDRF */ 925 + readb(sport->port.membase + UARTSR1); 926 + 924 927 /* flush Tx and Rx FIFO */ 925 928 writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, 926 929 sport->port.membase + UARTCFIFO); ··· 1078 1075 1079 1076 sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & 1080 1077 UARTPFIFO_FIFOSIZE_MASK) + 1); 1078 + 1079 + sport->port.fifosize = sport->txfifo_size; 1081 1080 1082 1081 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & 1083 1082 UARTPFIFO_FIFOSIZE_MASK) + 1);
+1
drivers/tty/serial/samsung.c
··· 963 963 free_irq(ourport->tx_irq, ourport); 964 964 tx_enabled(port) = 0; 965 965 ourport->tx_claimed = 0; 966 + ourport->tx_mode = 0; 966 967 } 967 968 968 969 if (ourport->rx_claimed) {
+8 -1
drivers/usb/host/xhci-hub.c
··· 387 387 status = PORT_PLC; 388 388 port_change_bit = "link state"; 389 389 break; 390 + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: 391 + status = PORT_CEC; 392 + port_change_bit = "config error"; 393 + break; 390 394 default: 391 395 /* Should never happen */ 392 396 return; ··· 592 588 status |= USB_PORT_STAT_C_LINK_STATE << 16; 593 589 if ((raw_port_status & PORT_WRC)) 594 590 status |= USB_PORT_STAT_C_BH_RESET << 16; 591 + if ((raw_port_status & PORT_CEC)) 592 + status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; 595 593 } 596 594 597 595 if (hcd->speed != HCD_USB3) { ··· 1011 1005 case USB_PORT_FEAT_C_OVER_CURRENT: 1012 1006 case USB_PORT_FEAT_C_ENABLE: 1013 1007 case USB_PORT_FEAT_C_PORT_LINK_STATE: 1008 + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: 1014 1009 xhci_clear_port_change_bit(xhci, wValue, wIndex, 1015 1010 port_array[wIndex], temp); 1016 1011 break; ··· 1076 1069 */ 1077 1070 status = bus_state->resuming_ports; 1078 1071 1079 - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; 1072 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; 1080 1073 1081 1074 spin_lock_irqsave(&xhci->lock, flags); 1082 1075 /* For each port, did anything change? If so, set that bit in buf. */
+1 -1
drivers/usb/host/xhci-pci.c
··· 115 115 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 116 116 xhci->quirks |= XHCI_LPM_SUPPORT; 117 117 xhci->quirks |= XHCI_INTEL_HOST; 118 + xhci->quirks |= XHCI_AVOID_BEI; 118 119 } 119 120 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 120 121 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { ··· 131 130 * PPT chipsets. 132 131 */ 133 132 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 134 - xhci->quirks |= XHCI_AVOID_BEI; 135 133 } 136 134 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 137 135 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+1 -1
drivers/usb/isp1760/isp1760-udc.c
··· 1203 1203 1204 1204 if (udc->driver) { 1205 1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); 1206 - spin_unlock(&udc->lock); 1206 + spin_unlock_irqrestore(&udc->lock, flags); 1207 1207 return -EBUSY; 1208 1208 } 1209 1209
+7 -2
drivers/usb/serial/ftdi_sio.c
··· 604 604 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 605 605 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), 606 606 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 607 + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, 607 608 /* 608 609 * ELV devices: 609 610 */ ··· 1884 1883 { 1885 1884 struct usb_device *udev = serial->dev; 1886 1885 1887 - if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || 1888 - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) 1886 + if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) 1887 + return ftdi_jtag_probe(serial); 1888 + 1889 + if (udev->product && 1890 + (!strcmp(udev->product, "BeagleBone/XDS100V2") || 1891 + !strcmp(udev->product, "SNAP Connect E10"))) 1889 1892 return ftdi_jtag_probe(serial); 1890 1893 1891 1894 return 0;
+6
drivers/usb/serial/ftdi_sio_ids.h
··· 561 561 */ 562 562 #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ 563 563 564 + /* 565 + * Synapse Wireless product ids (FTDI_VID) 566 + * http://www.synapse-wireless.com 567 + */ 568 + #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */ 569 + 564 570 565 571 /********************************/ 566 572 /** third-party VID/PID combos **/
+3
drivers/usb/serial/keyspan_pda.c
··· 61 61 /* For Xircom PGSDB9 and older Entrega version of the same device */ 62 62 #define XIRCOM_VENDOR_ID 0x085a 63 63 #define XIRCOM_FAKE_ID 0x8027 64 + #define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */ 64 65 #define ENTREGA_VENDOR_ID 0x1645 65 66 #define ENTREGA_FAKE_ID 0x8093 66 67 ··· 71 70 #endif 72 71 #ifdef XIRCOM 73 72 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, 73 + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, 74 74 { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, 75 75 #endif 76 76 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, ··· 95 93 #ifdef XIRCOM 96 94 static const struct usb_device_id id_table_fake_xircom[] = { 97 95 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, 96 + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, 98 97 { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, 99 98 { } 100 99 };
+17
drivers/xen/Kconfig
··· 55 55 56 56 In that case step 3 should be omitted. 57 57 58 + config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT 59 + int "Hotplugged memory limit (in GiB) for a PV guest" 60 + default 512 if X86_64 61 + default 4 if X86_32 62 + range 0 64 if X86_32 63 + depends on XEN_HAVE_PVMMU 64 + depends on XEN_BALLOON_MEMORY_HOTPLUG 65 + help 66 + Maxmium amount of memory (in GiB) that a PV guest can be 67 + expanded to when using memory hotplug. 68 + 69 + A PV guest can have more memory than this limit if is 70 + started with a larger maximum. 71 + 72 + This value is used to allocate enough space in internal 73 + tables needed for physical memory administration. 74 + 58 75 config XEN_SCRUB_PAGES 59 76 bool "Scrub pages before returning them to system" 60 77 depends on XEN_BALLOON
+23
drivers/xen/balloon.c
··· 229 229 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); 230 230 nid = memory_add_physaddr_to_nid(hotplug_start_paddr); 231 231 232 + #ifdef CONFIG_XEN_HAVE_PVMMU 233 + /* 234 + * add_memory() will build page tables for the new memory so 235 + * the p2m must contain invalid entries so the correct 236 + * non-present PTEs will be written. 237 + * 238 + * If a failure occurs, the original (identity) p2m entries 239 + * are not restored since this region is now known not to 240 + * conflict with any devices. 241 + */ 242 + if (!xen_feature(XENFEAT_auto_translated_physmap)) { 243 + unsigned long pfn, i; 244 + 245 + pfn = PFN_DOWN(hotplug_start_paddr); 246 + for (i = 0; i < balloon_hotplug; i++) { 247 + if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 248 + pr_warn("set_phys_to_machine() failed, no memory added\n"); 249 + return BP_ECANCELED; 250 + } 251 + } 252 + } 253 + #endif 254 + 232 255 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); 233 256 234 257 if (rc) {
+5 -1
fs/cifs/cifsencrypt.c
··· 1 1 /* 2 2 * fs/cifs/cifsencrypt.c 3 3 * 4 + * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP 5 + * for more detailed information 6 + * 4 7 * Copyright (C) International Business Machines Corp., 2005,2013 5 8 * Author(s): Steve French (sfrench@us.ibm.com) 6 9 * ··· 518 515 __func__); 519 516 return rc; 520 517 } 521 - } else if (ses->serverName) { 518 + } else { 519 + /* We use ses->serverName if no domain name available */ 522 520 len = strlen(ses->serverName); 523 521 524 522 server = kmalloc(2 + (len * 2), GFP_KERNEL);
+11 -2
fs/cifs/connect.c
··· 1599 1599 pr_warn("CIFS: username too long\n"); 1600 1600 goto cifs_parse_mount_err; 1601 1601 } 1602 + 1603 + kfree(vol->username); 1602 1604 vol->username = kstrdup(string, GFP_KERNEL); 1603 1605 if (!vol->username) 1604 1606 goto cifs_parse_mount_err; ··· 1702 1700 goto cifs_parse_mount_err; 1703 1701 } 1704 1702 1703 + kfree(vol->domainname); 1705 1704 vol->domainname = kstrdup(string, GFP_KERNEL); 1706 1705 if (!vol->domainname) { 1707 1706 pr_warn("CIFS: no memory for domainname\n"); ··· 1734 1731 } 1735 1732 1736 1733 if (strncasecmp(string, "default", 7) != 0) { 1734 + kfree(vol->iocharset); 1737 1735 vol->iocharset = kstrdup(string, 1738 1736 GFP_KERNEL); 1739 1737 if (!vol->iocharset) { ··· 2917 2913 * calling name ends in null (byte 16) from old smb 2918 2914 * convention. 2919 2915 */ 2920 - if (server->workstation_RFC1001_name && 2921 - server->workstation_RFC1001_name[0] != 0) 2916 + if (server->workstation_RFC1001_name[0] != 0) 2922 2917 rfc1002mangle(ses_init_buf->trailer. 2923 2918 session_req.calling_name, 2924 2919 server->workstation_RFC1001_name, ··· 3695 3692 #endif /* CIFS_WEAK_PW_HASH */ 3696 3693 rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, 3697 3694 bcc_ptr, nls_codepage); 3695 + if (rc) { 3696 + cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n", 3697 + __func__, rc); 3698 + cifs_buf_release(smb_buffer); 3699 + return rc; 3700 + } 3698 3701 3699 3702 bcc_ptr += CIFS_AUTH_RESP_SIZE; 3700 3703 if (ses->capabilities & CAP_UNICODE) {
+1
fs/cifs/file.c
··· 1823 1823 cifsFileInfo_put(inv_file); 1824 1824 spin_lock(&cifs_file_list_lock); 1825 1825 ++refind; 1826 + inv_file = NULL; 1826 1827 goto refind_writable; 1827 1828 } 1828 1829 }
+2
fs/cifs/inode.c
··· 771 771 cifs_buf_release(srchinf->ntwrk_buf_start); 772 772 } 773 773 kfree(srchinf); 774 + if (rc) 775 + goto cgii_exit; 774 776 } else 775 777 goto cgii_exit; 776 778
+1 -1
fs/cifs/smb2misc.c
··· 322 322 323 323 /* return pointer to beginning of data area, ie offset from SMB start */ 324 324 if ((*off != 0) && (*len != 0)) 325 - return hdr->ProtocolId + *off; 325 + return (char *)(&hdr->ProtocolId[0]) + *off; 326 326 else 327 327 return NULL; 328 328 }
+2 -1
fs/cifs/smb2ops.c
··· 684 684 685 685 /* No need to change MaxChunks since already set to 1 */ 686 686 chunk_sizes_updated = true; 687 - } 687 + } else 688 + goto cchunk_out; 688 689 } 689 690 690 691 cchunk_out:
+10 -7
fs/cifs/smb2pdu.c
··· 1218 1218 struct smb2_ioctl_req *req; 1219 1219 struct smb2_ioctl_rsp *rsp; 1220 1220 struct TCP_Server_Info *server; 1221 - struct cifs_ses *ses = tcon->ses; 1221 + struct cifs_ses *ses; 1222 1222 struct kvec iov[2]; 1223 1223 int resp_buftype; 1224 1224 int num_iovecs; ··· 1232 1232 /* zero out returned data len, in case of error */ 1233 1233 if (plen) 1234 1234 *plen = 0; 1235 + 1236 + if (tcon) 1237 + ses = tcon->ses; 1238 + else 1239 + return -EIO; 1235 1240 1236 1241 if (ses && (ses->server)) 1237 1242 server = ses->server; ··· 1301 1296 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; 1302 1297 1303 1298 if ((rc != 0) && (rc != -EINVAL)) { 1304 - if (tcon) 1305 - cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 1299 + cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 1306 1300 goto ioctl_exit; 1307 1301 } else if (rc == -EINVAL) { 1308 1302 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && 1309 1303 (opcode != FSCTL_SRV_COPYCHUNK)) { 1310 - if (tcon) 1311 - cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 1304 + cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 1312 1305 goto ioctl_exit; 1313 1306 } 1314 1307 } ··· 1632 1629 1633 1630 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); 1634 1631 1635 - if ((rc != 0) && tcon) 1632 + if (rc != 0) 1636 1633 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); 1637 1634 1638 1635 free_rsp_buf(resp_buftype, iov[0].iov_base); ··· 2117 2114 struct kvec iov[2]; 2118 2115 int rc = 0; 2119 2116 int len; 2120 - int resp_buftype; 2117 + int resp_buftype = CIFS_NO_BUFFER; 2121 2118 unsigned char *bufptr; 2122 2119 struct TCP_Server_Info *server; 2123 2120 struct cifs_ses *ses = tcon->ses;
+17
include/linux/irqchip/arm-gic-v3.h
··· 126 126 #define GICR_PROPBASER_WaWb (5U << 7) 127 127 #define GICR_PROPBASER_RaWaWt (6U << 7) 128 128 #define GICR_PROPBASER_RaWaWb (7U << 7) 129 + #define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) 129 130 #define GICR_PROPBASER_IDBITS_MASK (0x1f) 131 + 132 + #define GICR_PENDBASER_NonShareable (0U << 10) 133 + #define GICR_PENDBASER_InnerShareable (1U << 10) 134 + #define GICR_PENDBASER_OuterShareable (2U << 10) 135 + #define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) 136 + #define GICR_PENDBASER_nCnB (0U << 7) 137 + #define GICR_PENDBASER_nC (1U << 7) 138 + #define GICR_PENDBASER_RaWt (2U << 7) 139 + #define GICR_PENDBASER_RaWb (3U << 7) 140 + #define GICR_PENDBASER_WaWt (4U << 7) 141 + #define GICR_PENDBASER_WaWb (5U << 7) 142 + #define GICR_PENDBASER_RaWaWt (6U << 7) 143 + #define GICR_PENDBASER_RaWaWb (7U << 7) 144 + #define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) 130 145 131 146 /* 132 147 * Re-Distributor registers, offsets from SGI_base ··· 197 182 #define GITS_CBASER_WaWb (5UL << 59) 198 183 #define GITS_CBASER_RaWaWt (6UL << 59) 199 184 #define GITS_CBASER_RaWaWb (7UL << 59) 185 + #define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) 200 186 #define GITS_CBASER_NonShareable (0UL << 10) 201 187 #define GITS_CBASER_InnerShareable (1UL << 10) 202 188 #define GITS_CBASER_OuterShareable (2UL << 10) ··· 214 198 #define GITS_BASER_WaWb (5UL << 59) 215 199 #define GITS_BASER_RaWaWt (6UL << 59) 216 200 #define GITS_BASER_RaWaWb (7UL << 59) 201 + #define GITS_BASER_CACHEABILITY_MASK (7UL << 59) 217 202 #define GITS_BASER_TYPE_SHIFT (56) 218 203 #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) 219 204 #define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+1
include/linux/lcm.h
··· 4 4 #include <linux/compiler.h> 5 5 6 6 unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; 7 + unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__; 7 8 8 9 #endif /* _LCM_H */
+6
include/linux/netdevice.h
··· 2182 2182 void synchronize_net(void); 2183 2183 int init_dummy_netdev(struct net_device *dev); 2184 2184 2185 + DECLARE_PER_CPU(int, xmit_recursion); 2186 + static inline int dev_recursion_level(void) 2187 + { 2188 + return this_cpu_read(xmit_recursion); 2189 + } 2190 + 2185 2191 struct net_device *dev_get_by_index(struct net *net, int ifindex); 2186 2192 struct net_device *__dev_get_by_index(struct net *net, int ifindex); 2187 2193 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-16
include/net/ip.h
··· 455 455 456 456 #endif 457 457 458 - static inline int sk_mc_loop(struct sock *sk) 459 - { 460 - if (!sk) 461 - return 1; 462 - switch (sk->sk_family) { 463 - case AF_INET: 464 - return inet_sk(sk)->mc_loop; 465 - #if IS_ENABLED(CONFIG_IPV6) 466 - case AF_INET6: 467 - return inet6_sk(sk)->mc_loop; 468 - #endif 469 - } 470 - WARN_ON(1); 471 - return 1; 472 - } 473 - 474 458 bool ip_call_ra_chain(struct sk_buff *skb); 475 459 476 460 /*
+2 -1
include/net/ip6_route.h
··· 174 174 175 175 static inline int ip6_skb_dst_mtu(struct sk_buff *skb) 176 176 { 177 - struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 177 + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? 178 + inet6_sk(skb->sk) : NULL; 178 179 179 180 return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? 180 181 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+2
include/net/sock.h
··· 1762 1762 1763 1763 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1764 1764 1765 + bool sk_mc_loop(struct sock *sk); 1766 + 1765 1767 static inline bool sk_can_gso(const struct sock *sk) 1766 1768 { 1767 1769 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
+2 -1
include/uapi/linux/input.h
··· 973 973 */ 974 974 #define MT_TOOL_FINGER 0 975 975 #define MT_TOOL_PEN 1 976 - #define MT_TOOL_MAX 1 976 + #define MT_TOOL_PALM 2 977 + #define MT_TOOL_MAX 2 977 978 978 979 /* 979 980 * Values describing the status of a force-feedback effect
+11
lib/lcm.c
··· 12 12 return 0; 13 13 } 14 14 EXPORT_SYMBOL_GPL(lcm); 15 + 16 + unsigned long lcm_not_zero(unsigned long a, unsigned long b) 17 + { 18 + unsigned long l = lcm(a, b); 19 + 20 + if (l) 21 + return l; 22 + 23 + return (b ? : a); 24 + } 25 + EXPORT_SYMBOL_GPL(lcm_not_zero);
+3 -1
net/core/dev.c
··· 2870 2870 #define skb_update_prio(skb) 2871 2871 #endif 2872 2872 2873 - static DEFINE_PER_CPU(int, xmit_recursion); 2873 + DEFINE_PER_CPU(int, xmit_recursion); 2874 + EXPORT_SYMBOL(xmit_recursion); 2875 + 2874 2876 #define RECURSION_LIMIT 10 2875 2877 2876 2878 /**
+1 -1
net/core/fib_rules.c
··· 165 165 166 166 spin_lock(&net->rules_mod_lock); 167 167 list_del_rcu(&ops->list); 168 - fib_rules_cleanup_ops(ops); 169 168 spin_unlock(&net->rules_mod_lock); 170 169 170 + fib_rules_cleanup_ops(ops); 171 171 kfree_rcu(ops, rcu); 172 172 } 173 173 EXPORT_SYMBOL_GPL(fib_rules_unregister);
+12 -16
net/core/net_namespace.c
··· 198 198 */ 199 199 int peernet2id(struct net *net, struct net *peer) 200 200 { 201 - int id = __peernet2id(net, peer, true); 201 + bool alloc = atomic_read(&peer->count) == 0 ? false : true; 202 + int id; 202 203 204 + id = __peernet2id(net, peer, alloc); 203 205 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 204 206 } 205 207 EXPORT_SYMBOL(peernet2id); ··· 340 338 static void cleanup_net(struct work_struct *work) 341 339 { 342 340 const struct pernet_operations *ops; 343 - struct net *net, *tmp, *peer; 341 + struct net *net, *tmp; 344 342 struct list_head net_kill_list; 345 343 LIST_HEAD(net_exit_list); 346 344 ··· 356 354 list_for_each_entry(net, &net_kill_list, cleanup_list) { 357 355 list_del_rcu(&net->list); 358 356 list_add_tail(&net->exit_list, &net_exit_list); 357 + for_each_net(tmp) { 358 + int id = __peernet2id(tmp, net, false); 359 + 360 + if (id >= 0) 361 + idr_remove(&tmp->netns_ids, id); 362 + } 363 + idr_destroy(&net->netns_ids); 364 + 359 365 } 360 366 rtnl_unlock(); 361 367 ··· 389 379 */ 390 380 rcu_barrier(); 391 381 392 - rtnl_lock(); 393 382 /* Finally it is safe to free my network namespace structure */ 394 383 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 395 - /* Unreference net from all peers (no need to loop over 396 - * net_exit_list because idr_destroy() will be called for each 397 - * element of this list. 398 - */ 399 - for_each_net(peer) { 400 - int id = __peernet2id(peer, net, false); 401 - 402 - if (id >= 0) 403 - idr_remove(&peer->netns_ids, id); 404 - } 405 - idr_destroy(&net->netns_ids); 406 - 407 384 list_del_init(&net->exit_list); 408 385 put_user_ns(net->user_ns); 409 386 net_drop_ns(net); 410 387 } 411 - rtnl_unlock(); 412 388 } 413 389 static DECLARE_WORK(net_cleanup_work, cleanup_net); 414 390
+19
net/core/sock.c
··· 653 653 sock_reset_flag(sk, bit); 654 654 } 655 655 656 + bool sk_mc_loop(struct sock *sk) 657 + { 658 + if (dev_recursion_level()) 659 + return false; 660 + if (!sk) 661 + return true; 662 + switch (sk->sk_family) { 663 + case AF_INET: 664 + return inet_sk(sk)->mc_loop; 665 + #if IS_ENABLED(CONFIG_IPV6) 666 + case AF_INET6: 667 + return inet6_sk(sk)->mc_loop; 668 + #endif 669 + } 670 + WARN_ON(1); 671 + return true; 672 + } 673 + EXPORT_SYMBOL(sk_mc_loop); 674 + 656 675 /* 657 676 * This is meant for all protocols to use and covers goings on 658 677 * at the socket level. Everything here is generic.
+2
net/decnet/dn_rules.c
··· 248 248 249 249 void __exit dn_fib_rules_cleanup(void) 250 250 { 251 + rtnl_lock(); 251 252 fib_rules_unregister(dn_fib_rules_ops); 253 + rtnl_unlock(); 252 254 rcu_barrier(); 253 255 } 254 256
+7 -16
net/dsa/dsa.c
··· 513 513 #ifdef CONFIG_OF 514 514 static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, 515 515 struct dsa_chip_data *cd, 516 - int chip_index, 516 + int chip_index, int port_index, 517 517 struct device_node *link) 518 518 { 519 - int ret; 520 519 const __be32 *reg; 521 - int link_port_addr; 522 520 int link_sw_addr; 523 521 struct device_node *parent_sw; 524 522 int len; ··· 529 531 if (!reg || (len != sizeof(*reg) * 2)) 530 532 return -EINVAL; 531 533 534 + /* 535 + * Get the destination switch number from the second field of its 'reg' 536 + * property, i.e. for "reg = <0x19 1>" sw_addr is '1'. 537 + */ 532 538 link_sw_addr = be32_to_cpup(reg + 1); 533 539 534 540 if (link_sw_addr >= pd->nr_chips) ··· 549 547 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); 550 548 } 551 549 552 - reg = of_get_property(link, "reg", NULL); 553 - if (!reg) { 554 - ret = -EINVAL; 555 - goto out; 556 - } 557 - 558 - link_port_addr = be32_to_cpup(reg); 559 - 560 - cd->rtable[link_sw_addr] = link_port_addr; 550 + cd->rtable[link_sw_addr] = port_index; 561 551 562 552 return 0; 563 - out: 564 - kfree(cd->rtable); 565 - return ret; 566 553 } 567 554 568 555 static void dsa_of_free_platform_data(struct dsa_platform_data *pd) ··· 661 670 if (!strcmp(port_name, "dsa") && link && 662 671 pd->nr_chips > 1) { 663 672 ret = dsa_of_setup_routing_table(pd, cd, 664 - chip_index, link); 673 + chip_index, port_index, link); 665 674 if (ret) 666 675 goto out_free_chip; 667 676 }
-2
net/ipv4/fib_frontend.c
··· 1175 1175 unsigned int i; 1176 1176 1177 1177 rtnl_lock(); 1178 - 1179 1178 #ifdef CONFIG_IP_MULTIPLE_TABLES 1180 1179 RCU_INIT_POINTER(net->ipv4.fib_local, NULL); 1181 1180 RCU_INIT_POINTER(net->ipv4.fib_main, NULL); 1182 1181 RCU_INIT_POINTER(net->ipv4.fib_default, NULL); 1183 1182 #endif 1184 - 1185 1183 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1186 1184 struct hlist_head *head = &net->ipv4.fib_table_hash[i]; 1187 1185 struct hlist_node *tmp;
+5
net/ipv4/ipmr.c
··· 276 276 { 277 277 struct mr_table *mrt, *next; 278 278 279 + rtnl_lock(); 279 280 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 280 281 list_del(&mrt->list); 281 282 ipmr_free_table(mrt); 282 283 } 283 284 fib_rules_unregister(net->ipv4.mr_rules_ops); 285 + rtnl_unlock(); 284 286 } 285 287 #else 286 288 #define ipmr_for_each_table(mrt, net) \ ··· 308 306 309 307 static void __net_exit ipmr_rules_exit(struct net *net) 310 308 { 309 + rtnl_lock(); 311 310 ipmr_free_table(net->ipv4.mrt); 311 + net->ipv4.mrt = NULL; 312 + rtnl_unlock(); 312 313 } 313 314 #endif 314 315
+4 -3
net/ipv4/tcp_input.c
··· 3105 3105 if (!first_ackt.v64) 3106 3106 first_ackt = last_ackt; 3107 3107 3108 - if (!(sacked & TCPCB_SACKED_ACKED)) 3108 + if (!(sacked & TCPCB_SACKED_ACKED)) { 3109 3109 reord = min(pkts_acked, reord); 3110 - if (!after(scb->end_seq, tp->high_seq)) 3111 - flag |= FLAG_ORIG_SACK_ACKED; 3110 + if (!after(scb->end_seq, tp->high_seq)) 3111 + flag |= FLAG_ORIG_SACK_ACKED; 3112 + } 3112 3113 } 3113 3114 3114 3115 if (sacked & TCPCB_SACKED_ACKED)
+2
net/ipv6/fib6_rules.c
··· 315 315 316 316 static void __net_exit fib6_rules_net_exit(struct net *net) 317 317 { 318 + rtnl_lock(); 318 319 fib_rules_unregister(net->ipv6.fib6_rules_ops); 320 + rtnl_unlock(); 319 321 } 320 322 321 323 static struct pernet_operations fib6_rules_net_ops = {
+2 -1
net/ipv6/ip6_output.c
··· 542 542 { 543 543 struct sk_buff *frag; 544 544 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); 545 - struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 545 + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? 546 + inet6_sk(skb->sk) : NULL; 546 547 struct ipv6hdr *tmp_hdr; 547 548 struct frag_hdr *fh; 548 549 unsigned int mtu, hlen, left, len;
+2 -2
net/ipv6/ip6mr.c
··· 265 265 list_del(&mrt->list); 266 266 ip6mr_free_table(mrt); 267 267 } 268 - rtnl_unlock(); 269 268 fib_rules_unregister(net->ipv6.mr6_rules_ops); 269 + rtnl_unlock(); 270 270 } 271 271 #else 272 272 #define ip6mr_for_each_table(mrt, net) \ ··· 334 334 335 335 static void ip6mr_free_table(struct mr6_table *mrt) 336 336 { 337 - del_timer(&mrt->ipmr_expire_timer); 337 + del_timer_sync(&mrt->ipmr_expire_timer); 338 338 mroute_clean_tables(mrt); 339 339 kfree(mrt); 340 340 }
+1
net/l2tp/l2tp_core.c
··· 1871 1871 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); 1872 1872 if (!l2tp_wq) { 1873 1873 pr_err("alloc_workqueue failed\n"); 1874 + unregister_pernet_device(&l2tp_net_ops); 1874 1875 rc = -ENOMEM; 1875 1876 goto out; 1876 1877 }