Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perf/urgent' into perf/core to fix conflicts

Conflicts:
tools/perf/bench/numa.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+730 -682
+3 -50
MAINTAINERS
··· 8917 8917 S: Maintained 8918 8918 F: drivers/net/usb/rtl8150.c 8919 8919 8920 - USB SERIAL BELKIN F5U103 DRIVER 8921 - M: William Greathouse <wgreathouse@smva.com> 8920 + USB SERIAL SUBSYSTEM 8921 + M: Johan Hovold <jhovold@gmail.com> 8922 8922 L: linux-usb@vger.kernel.org 8923 8923 S: Maintained 8924 - F: drivers/usb/serial/belkin_sa.* 8925 - 8926 - USB SERIAL CYPRESS M8 DRIVER 8927 - M: Lonnie Mendez <dignome@gmail.com> 8928 - L: linux-usb@vger.kernel.org 8929 - S: Maintained 8930 - W: http://geocities.com/i0xox0i 8931 - W: http://firstlight.net/cvs 8932 - F: drivers/usb/serial/cypress_m8.* 8933 - 8934 - USB SERIAL CYBERJACK DRIVER 8935 - M: Matthias Bruestle and Harald Welte <support@reiner-sct.com> 8936 - W: http://www.reiner-sct.de/support/treiber_cyberjack.php 8937 - S: Maintained 8938 - F: drivers/usb/serial/cyberjack.c 8939 - 8940 - USB SERIAL DIGI ACCELEPORT DRIVER 8941 - M: Peter Berger <pberger@brimson.com> 8942 - M: Al Borchers <alborchers@steinerpoint.com> 8943 - L: linux-usb@vger.kernel.org 8944 - S: Maintained 8945 - F: drivers/usb/serial/digi_acceleport.c 8946 - 8947 - USB SERIAL DRIVER 8948 - M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 8949 - L: linux-usb@vger.kernel.org 8950 - S: Supported 8951 8924 F: Documentation/usb/usb-serial.txt 8952 - F: drivers/usb/serial/generic.c 8953 - F: drivers/usb/serial/usb-serial.c 8925 + F: drivers/usb/serial/ 8954 8926 F: include/linux/usb/serial.h 8955 - 8956 - USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER 8957 - M: Gary Brubaker <xavyer@ix.netcom.com> 8958 - L: linux-usb@vger.kernel.org 8959 - S: Maintained 8960 - F: drivers/usb/serial/empeg.c 8961 - 8962 - USB SERIAL KEYSPAN DRIVER 8963 - M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 8964 - L: linux-usb@vger.kernel.org 8965 - S: Maintained 8966 - F: drivers/usb/serial/*keyspan* 8967 - 8968 - USB SERIAL WHITEHEAT DRIVER 8969 - M: Support Department <support@connecttech.com> 8970 - L: linux-usb@vger.kernel.org 8971 - W: http://www.connecttech.com 8972 - S: Supported 8973 - F: drivers/usb/serial/whiteheat* 8974 8927 8975 8928 USB SMSC75XX ETHERNET DRIVER 8976 8929 M: Steve Glendinning <steve.glendinning@shawell.net>
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 12 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc7 4 + EXTRAVERSION = 5 5 NAME = One Giant Leap for Frogkind 6 6 7 7 # *DOCUMENTATION*
+3 -3
arch/arc/mm/fault.c
··· 17 17 #include <asm/pgalloc.h> 18 18 #include <asm/mmu.h> 19 19 20 - static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) 20 + static int handle_vmalloc_fault(unsigned long address) 21 21 { 22 22 /* 23 23 * Synchronize this task's top level page-table ··· 27 27 pud_t *pud, *pud_k; 28 28 pmd_t *pmd, *pmd_k; 29 29 30 - pgd = pgd_offset_fast(mm, address); 30 + pgd = pgd_offset_fast(current->active_mm, address); 31 31 pgd_k = pgd_offset_k(address); 32 32 33 33 if (!pgd_present(*pgd_k)) ··· 72 72 * nothing more. 73 73 */ 74 74 if (address >= VMALLOC_START && address <= VMALLOC_END) { 75 - ret = handle_vmalloc_fault(mm, address); 75 + ret = handle_vmalloc_fault(address); 76 76 if (unlikely(ret)) 77 77 goto bad_area_nosemaphore; 78 78 else
+2 -2
arch/mips/kernel/perf_event_mipsxx.c
··· 971 971 [C(LL)] = { 972 972 [C(OP_READ)] = { 973 973 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 974 - [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, 974 + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, 975 975 }, 976 976 [C(OP_WRITE)] = { 977 977 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 978 - [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, 978 + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, 979 979 }, 980 980 }, 981 981 [C(ITLB)] = {
+5 -4
arch/mips/mti-malta/malta-int.c
··· 473 473 { 474 474 int cpu; 475 475 476 - for (cpu = 0; cpu < NR_CPUS; cpu++) { 476 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 477 477 fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1); 478 478 fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2); 479 479 } ··· 574 574 /* FIXME */ 575 575 int i; 576 576 #if defined(CONFIG_MIPS_MT_SMP) 577 - gic_call_int_base = GIC_NUM_INTRS - NR_CPUS; 578 - gic_resched_int_base = gic_call_int_base - NR_CPUS; 577 + gic_call_int_base = GIC_NUM_INTRS - 578 + (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids; 579 + gic_resched_int_base = gic_call_int_base - nr_cpu_ids; 579 580 fill_ipi_map(); 580 581 #endif 581 582 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, ··· 600 599 printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status()); 601 600 write_c0_status(0x1100dc00); 602 601 printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status()); 603 - for (i = 0; i < NR_CPUS; i++) { 602 + for (i = 0; i < nr_cpu_ids; i++) { 604 603 arch_init_ipiirq(MIPS_GIC_IRQ_BASE + 605 604 GIC_RESCHED_INT(i), &irq_resched); 606 605 arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
+1 -1
arch/mips/ralink/timer.c
··· 126 126 return -ENOENT; 127 127 } 128 128 129 - rt->membase = devm_request_and_ioremap(&pdev->dev, res); 129 + rt->membase = devm_ioremap_resource(&pdev->dev, res); 130 130 if (IS_ERR(rt->membase)) 131 131 return PTR_ERR(rt->membase); 132 132
+3 -1
arch/um/kernel/exitcode.c
··· 40 40 const char __user *buffer, size_t count, loff_t *pos) 41 41 { 42 42 char *end, buf[sizeof("nnnnn\0")]; 43 + size_t size; 43 44 int tmp; 44 45 45 - if (copy_from_user(buf, buffer, count)) 46 + size = min(count, sizeof(buf)); 47 + if (copy_from_user(buf, buffer, size)) 46 48 return -EFAULT; 47 49 48 50 tmp = simple_strtol(buf, &end, 0);
+2 -1
arch/x86/include/asm/percpu.h
··· 128 128 do { \ 129 129 typedef typeof(var) pao_T__; \ 130 130 const int pao_ID__ = (__builtin_constant_p(val) && \ 131 - ((val) == 1 || (val) == -1)) ? (val) : 0; \ 131 + ((val) == 1 || (val) == -1)) ? \ 132 + (int)(val) : 0; \ 132 133 if (0) { \ 133 134 pao_T__ pao_tmp__; \ 134 135 pao_tmp__ = (val); \
+3 -3
arch/x86/kernel/cpu/perf_event.c
··· 1276 1276 static int __kprobes 1277 1277 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) 1278 1278 { 1279 - int ret; 1280 1279 u64 start_clock; 1281 1280 u64 finish_clock; 1281 + int ret; 1282 1282 1283 1283 if (!atomic_read(&active_events)) 1284 1284 return NMI_DONE; 1285 1285 1286 - start_clock = local_clock(); 1286 + start_clock = sched_clock(); 1287 1287 ret = x86_pmu.handle_irq(regs); 1288 - finish_clock = local_clock(); 1288 + finish_clock = sched_clock(); 1289 1289 1290 1290 perf_sample_event_took(finish_clock - start_clock); 1291 1291
+1 -1
arch/x86/kernel/kvm.c
··· 609 609 610 610 struct dentry *kvm_init_debugfs(void) 611 611 { 612 - d_kvm_debug = debugfs_create_dir("kvm", NULL); 612 + d_kvm_debug = debugfs_create_dir("kvm-guest", NULL); 613 613 if (!d_kvm_debug) 614 614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); 615 615
+2 -2
arch/x86/kernel/nmi.c
··· 113 113 u64 before, delta, whole_msecs; 114 114 int remainder_ns, decimal_msecs, thishandled; 115 115 116 - before = local_clock(); 116 + before = sched_clock(); 117 117 thishandled = a->handler(type, regs); 118 118 handled += thishandled; 119 - delta = local_clock() - before; 119 + delta = sched_clock() - before; 120 120 trace_nmi_handler(a->handler, (int)delta, thishandled); 121 121 122 122 if (delta < nmi_longest_ns)
+30 -19
arch/xtensa/kernel/entry.S
··· 1122 1122 * a3: exctable, original value in excsave1 1123 1123 */ 1124 1124 1125 - fast_syscall_spill_registers_fixup: 1125 + ENTRY(fast_syscall_spill_registers_fixup) 1126 1126 1127 1127 rsr a2, windowbase # get current windowbase (a2 is saved) 1128 1128 xsr a0, depc # restore depc and a0 ··· 1134 1134 */ 1135 1135 1136 1136 xsr a3, excsave1 # get spill-mask 1137 - slli a2, a3, 1 # shift left by one 1137 + slli a3, a3, 1 # shift left by one 1138 1138 1139 - slli a3, a2, 32-WSBITS 1140 - src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1139 + slli a2, a3, 32-WSBITS 1140 + src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... 1141 1141 wsr a2, windowstart # set corrected windowstart 1142 1142 1143 - rsr a3, excsave1 1144 - l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1145 - l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) 1143 + srli a3, a3, 1 1144 + rsr a2, excsave1 1145 + l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 1146 + xsr a2, excsave1 1147 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 1148 + l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) 1149 + xsr a2, excsave1 1146 1150 1147 1151 /* Return to the original (user task) WINDOWBASE. 1148 1152 * We leave the following frame behind: 1149 1153 * a0, a1, a2 same 1150 - * a3: trashed (saved in excsave_1) 1154 + * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) 1151 1155 * depc: depc (we have to return to that address) 1152 - * excsave_1: a3 1156 + * excsave_1: exctable 1153 1157 */ 1154 1158 1155 1159 wsr a3, windowbase ··· 1163 1159 * a0: return address 1164 1160 * a1: used, stack pointer 1165 1161 * a2: kernel stack pointer 1166 - * a3: available, saved in EXCSAVE_1 1162 + * a3: available 1167 1163 * depc: exception address 1168 - * excsave: a3 1164 + * excsave: exctable 1169 1165 * Note: This frame might be the same as above. 1170 1166 */ 1171 1167 ··· 1185 1181 rsr a0, exccause 1186 1182 addx4 a0, a0, a3 # find entry in table 1187 1183 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1184 + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1188 1185 jx a0 1189 1186 1190 - fast_syscall_spill_registers_fixup_return: 1187 + ENDPROC(fast_syscall_spill_registers_fixup) 1188 + 1189 + ENTRY(fast_syscall_spill_registers_fixup_return) 1191 1190 1192 1191 /* When we return here, all registers have been restored (a2: DEPC) */ 1193 1192 ··· 1198 1191 1199 1192 /* Restore fixup handler. */ 1200 1193 1201 - xsr a3, excsave1 1202 - movi a2, fast_syscall_spill_registers_fixup 1203 - s32i a2, a3, EXC_TABLE_FIXUP 1204 - s32i a0, a3, EXC_TABLE_DOUBLE_SAVE 1205 - rsr a2, windowbase 1206 - s32i a2, a3, EXC_TABLE_PARAM 1207 - l32i a2, a3, EXC_TABLE_KSTK 1194 + rsr a2, excsave1 1195 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE 1196 + movi a3, fast_syscall_spill_registers_fixup 1197 + s32i a3, a2, EXC_TABLE_FIXUP 1198 + rsr a3, windowbase 1199 + s32i a3, a2, EXC_TABLE_PARAM 1200 + l32i a2, a2, EXC_TABLE_KSTK 1208 1201 1209 1202 /* Load WB at the time the exception occurred. */ 1210 1203 ··· 1213 1206 wsr a3, windowbase 1214 1207 rsync 1215 1208 1209 + rsr a3, excsave1 1210 + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1211 + 1216 1212 rfde 1217 1213 1214 + ENDPROC(fast_syscall_spill_registers_fixup_return) 1218 1215 1219 1216 /* 1220 1217 * spill all registers.
+1 -1
arch/xtensa/kernel/signal.c
··· 341 341 342 342 sp = regs->areg[1]; 343 343 344 - if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) { 344 + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { 345 345 sp = current->sas_ss_sp + current->sas_ss_size; 346 346 } 347 347
+2 -1
arch/xtensa/platforms/iss/network.c
··· 737 737 return 1; 738 738 } 739 739 740 - if ((new = alloc_bootmem(sizeof new)) == NULL) { 740 + new = alloc_bootmem(sizeof(*new)); 741 + if (new == NULL) { 741 742 printk("Alloc_bootmem failed\n"); 742 743 return 1; 743 744 }
+21
drivers/clk/clk-nomadik.c
··· 27 27 */ 28 28 29 29 #define SRC_CR 0x00U 30 + #define SRC_CR_T0_ENSEL BIT(15) 31 + #define SRC_CR_T1_ENSEL BIT(17) 32 + #define SRC_CR_T2_ENSEL BIT(19) 33 + #define SRC_CR_T3_ENSEL BIT(21) 34 + #define SRC_CR_T4_ENSEL BIT(23) 35 + #define SRC_CR_T5_ENSEL BIT(25) 36 + #define SRC_CR_T6_ENSEL BIT(27) 37 + #define SRC_CR_T7_ENSEL BIT(29) 30 38 #define SRC_XTALCR 0x0CU 31 39 #define SRC_XTALCR_XTALTIMEN BIT(20) 32 40 #define SRC_XTALCR_SXTALDIS BIT(19) ··· 551 543 __func__, np->name); 552 544 return; 553 545 } 546 + 547 + /* Set all timers to use the 2.4 MHz TIMCLK */ 548 + val = readl(src_base + SRC_CR); 549 + val |= SRC_CR_T0_ENSEL; 550 + val |= SRC_CR_T1_ENSEL; 551 + val |= SRC_CR_T2_ENSEL; 552 + val |= SRC_CR_T3_ENSEL; 553 + val |= SRC_CR_T4_ENSEL; 554 + val |= SRC_CR_T5_ENSEL; 555 + val |= SRC_CR_T6_ENSEL; 556 + val |= SRC_CR_T7_ENSEL; 557 + writel(val, src_base + SRC_CR); 558 + 554 559 val = readl(src_base + SRC_XTALCR); 555 560 pr_info("SXTALO is %s\n", 556 561 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+2 -2
drivers/clk/mvebu/armada-370.c
··· 39 39 }; 40 40 41 41 static const u32 a370_tclk_freqs[] __initconst = { 42 - 16600000, 43 - 20000000, 42 + 166000000, 43 + 200000000, 44 44 }; 45 45 46 46 static u32 __init a370_get_tclk_freq(void __iomem *sar)
+1 -1
drivers/clk/socfpga/clk.c
··· 49 49 #define SOCFPGA_L4_SP_CLK "l4_sp_clk" 50 50 #define SOCFPGA_NAND_CLK "nand_clk" 51 51 #define SOCFPGA_NAND_X_CLK "nand_x_clk" 52 - #define SOCFPGA_MMC_CLK "mmc_clk" 52 + #define SOCFPGA_MMC_CLK "sdmmc_clk" 53 53 #define SOCFPGA_DB_CLK "gpio_db_clk" 54 54 55 55 #define div_mask(width) ((1 << (width)) - 1)
+1 -1
drivers/clk/versatile/clk-icst.c
··· 107 107 108 108 vco = icst_hz_to_vco(icst->params, rate); 109 109 icst->rate = icst_hz(icst->params, vco); 110 - vco_set(icst->vcoreg, icst->lockreg, vco); 110 + vco_set(icst->lockreg, icst->vcoreg, vco); 111 111 return 0; 112 112 } 113 113
+1 -1
drivers/gpu/drm/drm_drv.c
··· 61 61 62 62 /** Ioctl table */ 63 63 static const struct drm_ioctl_desc drm_ioctls[] = { 64 - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 64 + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), 65 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 66 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 67 67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+24 -4
drivers/gpu/drm/i915/intel_crt.c
··· 83 83 return true; 84 84 } 85 85 86 - static void intel_crt_get_config(struct intel_encoder *encoder, 87 - struct intel_crtc_config *pipe_config) 86 + static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 88 87 { 89 88 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 90 89 struct intel_crt *crt = intel_encoder_to_crt(encoder); ··· 101 102 else 102 103 flags |= DRM_MODE_FLAG_NVSYNC; 103 104 104 - pipe_config->adjusted_mode.flags |= flags; 105 + return flags; 106 + } 107 + 108 + static void intel_crt_get_config(struct intel_encoder *encoder, 109 + struct intel_crtc_config *pipe_config) 110 + { 111 + pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); 112 + } 113 + 114 + static void hsw_crt_get_config(struct intel_encoder *encoder, 115 + struct intel_crtc_config *pipe_config) 116 + { 117 + intel_ddi_get_config(encoder, pipe_config); 118 + 119 + pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | 120 + DRM_MODE_FLAG_NHSYNC | 121 + DRM_MODE_FLAG_PVSYNC | 122 + DRM_MODE_FLAG_NVSYNC); 123 + pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); 105 124 } 106 125 107 126 /* Note: The caller is required to filter out dpms modes not supported by the ··· 816 799 crt->base.mode_set = intel_crt_mode_set; 817 800 crt->base.disable = intel_disable_crt; 818 801 crt->base.enable = intel_enable_crt; 819 - crt->base.get_config = intel_crt_get_config; 802 + if (IS_HASWELL(dev)) 803 + crt->base.get_config = hsw_crt_get_config; 804 + else 805 + crt->base.get_config = intel_crt_get_config; 820 806 if (I915_HAS_HOTPLUG(dev)) 821 807 crt->base.hpd_pin = HPD_CRT; 822 808 if (HAS_DDI(dev))
+19 -2
drivers/gpu/drm/i915/intel_ddi.c
··· 1249 1249 intel_dp_check_link_status(intel_dp); 1250 1250 } 1251 1251 1252 - static void intel_ddi_get_config(struct intel_encoder *encoder, 1253 - struct intel_crtc_config *pipe_config) 1252 + void intel_ddi_get_config(struct intel_encoder *encoder, 1253 + struct intel_crtc_config *pipe_config) 1254 1254 { 1255 1255 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1256 1256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); ··· 1268 1268 flags |= DRM_MODE_FLAG_NVSYNC; 1269 1269 1270 1270 pipe_config->adjusted_mode.flags |= flags; 1271 + 1272 + switch (temp & TRANS_DDI_BPC_MASK) { 1273 + case TRANS_DDI_BPC_6: 1274 + pipe_config->pipe_bpp = 18; 1275 + break; 1276 + case TRANS_DDI_BPC_8: 1277 + pipe_config->pipe_bpp = 24; 1278 + break; 1279 + case TRANS_DDI_BPC_10: 1280 + pipe_config->pipe_bpp = 30; 1281 + break; 1282 + case TRANS_DDI_BPC_12: 1283 + pipe_config->pipe_bpp = 36; 1284 + break; 1285 + default: 1286 + break; 1287 + } 1271 1288 } 1272 1289 1273 1290 static void intel_ddi_destroy(struct drm_encoder *encoder)
+84 -47
drivers/gpu/drm/i915/intel_display.c
··· 2327 2327 FDI_FE_ERRC_ENABLE); 2328 2328 } 2329 2329 2330 - static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) 2330 + static bool pipe_has_enabled_pch(struct intel_crtc *crtc) 2331 2331 { 2332 - return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; 2332 + return crtc->base.enabled && crtc->active && 2333 + crtc->config.has_pch_encoder; 2333 2334 } 2334 2335 2335 2336 static void ivb_modeset_global_resources(struct drm_device *dev) ··· 2980 2979 I915_READ(VSYNCSHIFT(cpu_transcoder))); 2981 2980 } 2982 2981 2982 + static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) 2983 + { 2984 + struct drm_i915_private *dev_priv = dev->dev_private; 2985 + uint32_t temp; 2986 + 2987 + temp = I915_READ(SOUTH_CHICKEN1); 2988 + if (temp & FDI_BC_BIFURCATION_SELECT) 2989 + return; 2990 + 2991 + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 2992 + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 2993 + 2994 + temp |= FDI_BC_BIFURCATION_SELECT; 2995 + DRM_DEBUG_KMS("enabling fdi C rx\n"); 2996 + I915_WRITE(SOUTH_CHICKEN1, temp); 2997 + POSTING_READ(SOUTH_CHICKEN1); 2998 + } 2999 + 3000 + static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 3001 + { 3002 + struct drm_device *dev = intel_crtc->base.dev; 3003 + struct drm_i915_private *dev_priv = dev->dev_private; 3004 + 3005 + switch (intel_crtc->pipe) { 3006 + case PIPE_A: 3007 + break; 3008 + case PIPE_B: 3009 + if (intel_crtc->config.fdi_lanes > 2) 3010 + WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 3011 + else 3012 + cpt_enable_fdi_bc_bifurcation(dev); 3013 + 3014 + break; 3015 + case PIPE_C: 3016 + cpt_enable_fdi_bc_bifurcation(dev); 3017 + 3018 + break; 3019 + default: 3020 + BUG(); 3021 + } 3022 + } 3023 + 2983 3024 /* 2984 3025 * Enable PCH resources required for PCH ports: 2985 3026 * - PCH PLLs ··· 3039 2996 u32 reg, temp; 3040 2997 3041 2998 assert_pch_transcoder_disabled(dev_priv, pipe); 2999 + 3000 + if (IS_IVYBRIDGE(dev)) 3001 + ivybridge_update_fdi_bc_bifurcation(intel_crtc); 3042 3002 3043 3003 /* Write the TU size bits before fdi link training, so that error 3044 3004 * detection works. */ ··· 5029 4983 if (!(tmp & PIPECONF_ENABLE)) 5030 4984 return false; 5031 4985 4986 + if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 4987 + switch (tmp & PIPECONF_BPC_MASK) { 4988 + case PIPECONF_6BPC: 4989 + pipe_config->pipe_bpp = 18; 4990 + break; 4991 + case PIPECONF_8BPC: 4992 + pipe_config->pipe_bpp = 24; 4993 + break; 4994 + case PIPECONF_10BPC: 4995 + pipe_config->pipe_bpp = 30; 4996 + break; 4997 + default: 4998 + break; 4999 + } 5000 + } 5001 + 5032 5002 intel_get_pipe_timings(crtc, pipe_config); 5033 5003 5034 5004 i9xx_get_pfit_config(crtc, pipe_config); ··· 5638 5576 return true; 5639 5577 } 5640 5578 5641 - static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) 5642 - { 5643 - struct drm_i915_private *dev_priv = dev->dev_private; 5644 - uint32_t temp; 5645 - 5646 - temp = I915_READ(SOUTH_CHICKEN1); 5647 - if (temp & FDI_BC_BIFURCATION_SELECT) 5648 - return; 5649 - 5650 - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5651 - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5652 - 5653 - temp |= FDI_BC_BIFURCATION_SELECT; 5654 - DRM_DEBUG_KMS("enabling fdi C rx\n"); 5655 - I915_WRITE(SOUTH_CHICKEN1, temp); 5656 - POSTING_READ(SOUTH_CHICKEN1); 5657 - } 5658 - 5659 - static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 5660 - { 5661 - struct drm_device *dev = intel_crtc->base.dev; 5662 - struct drm_i915_private *dev_priv = dev->dev_private; 5663 - 5664 - switch (intel_crtc->pipe) { 5665 - case PIPE_A: 5666 - break; 5667 - case PIPE_B: 5668 - if (intel_crtc->config.fdi_lanes > 2) 5669 - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 5670 - else 5671 - cpt_enable_fdi_bc_bifurcation(dev); 5672 - 5673 - break; 5674 - case PIPE_C: 5675 - cpt_enable_fdi_bc_bifurcation(dev); 5676 - 5677 - break; 5678 - default: 5679 - BUG(); 5680 - } 5681 - } 5682 - 5683 5579 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 5684 5580 { 5685 5581 /* ··· 5831 5811 &intel_crtc->config.fdi_m_n); 5832 5812 } 5833 5813 5834 - if (IS_IVYBRIDGE(dev)) 5835 - ivybridge_update_fdi_bc_bifurcation(intel_crtc); 5836 - 5837 5814 ironlake_set_pipeconf(crtc); 5838 5815 5839 5816 /* Set up the display plane register */ ··· 5897 5880 tmp = I915_READ(PIPECONF(crtc->pipe)); 5898 5881 if (!(tmp & PIPECONF_ENABLE)) 5899 5882 return false; 5883 + 5884 + switch (tmp & PIPECONF_BPC_MASK) { 5885 + case PIPECONF_6BPC: 5886 + pipe_config->pipe_bpp = 18; 5887 + break; 5888 + case PIPECONF_8BPC: 5889 + pipe_config->pipe_bpp = 24; 5890 + break; 5891 + case PIPECONF_10BPC: 5892 + pipe_config->pipe_bpp = 30; 5893 + break; 5894 + case PIPECONF_12BPC: 5895 + pipe_config->pipe_bpp = 36; 5896 + break; 5897 + default: 5898 + break; 5899 + } 5900 5900 5901 5901 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 5902 5902 struct intel_shared_dpll *pll; ··· 8645 8611 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 8646 8612 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8647 8613 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8614 + 8615 + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 8616 + PIPE_CONF_CHECK_I(pipe_bpp); 8648 8617 8649 8618 #undef PIPE_CONF_CHECK_X 8650 8619 #undef PIPE_CONF_CHECK_I
+20
drivers/gpu/drm/i915/intel_dp.c
··· 1401 1401 else 1402 1402 pipe_config->port_clock = 270000; 1403 1403 } 1404 + 1405 + if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 1406 + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1407 + /* 1408 + * This is a big fat ugly hack. 1409 + * 1410 + * Some machines in UEFI boot mode provide us a VBT that has 18 1411 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 1412 + * unknown we fail to light up. Yet the same BIOS boots up with 1413 + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 1414 + * max, not what it tells us to use. 1415 + * 1416 + * Note: This will still be broken if the eDP panel is not lit 1417 + * up by the BIOS, and thus we can't get the mode at module 1418 + * load. 1419 + */ 1420 + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 1421 + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1422 + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1423 + } 1404 1424 } 1405 1425 1406 1426 static bool is_edp_psr(struct intel_dp *intel_dp)
+2
drivers/gpu/drm/i915/intel_drv.h
··· 765 765 extern bool 766 766 intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 767 767 extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); 768 + extern void intel_ddi_get_config(struct intel_encoder *encoder, 769 + struct intel_crtc_config *pipe_config); 768 770 769 771 extern void intel_display_handle_reset(struct drm_device *dev); 770 772 extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+16
drivers/gpu/drm/i915/intel_lvds.c
··· 700 700 }, 701 701 { 702 702 .callback = intel_no_lvds_dmi_callback, 703 + .ident = "Intel D410PT", 704 + .matches = { 705 + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 706 + DMI_MATCH(DMI_BOARD_NAME, "D410PT"), 707 + }, 708 + }, 709 + { 710 + .callback = intel_no_lvds_dmi_callback, 711 + .ident = "Intel D425KT", 712 + .matches = { 713 + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 714 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"), 715 + }, 716 + }, 717 + { 718 + .callback = intel_no_lvds_dmi_callback, 703 719 .ident = "Intel D510MO", 704 720 .matches = { 705 721 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+1
drivers/gpu/drm/radeon/evergreen_hdmi.c
··· 291 291 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 292 292 293 293 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 294 + HDMI_ACR_SOURCE | /* select SW CTS value */ 294 295 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 295 296 296 297 evergreen_hdmi_update_ACR(encoder, mode->clock);
+1 -1
drivers/gpu/drm/radeon/kv_dpm.c
··· 2635 2635 pi->caps_sclk_ds = true; 2636 2636 pi->enable_auto_thermal_throttling = true; 2637 2637 pi->disable_nb_ps3_in_battery = false; 2638 - pi->bapm_enable = true; 2638 + pi->bapm_enable = false; 2639 2639 pi->voltage_drop_t = 0; 2640 2640 pi->caps_sclk_throttle_low_notification = false; 2641 2641 pi->caps_fps = false; /* true? */
+2 -2
drivers/gpu/drm/radeon/radeon.h
··· 1272 1272 struct radeon_clock_and_voltage_limits { 1273 1273 u32 sclk; 1274 1274 u32 mclk; 1275 - u32 vddc; 1276 - u32 vddci; 1275 + u16 vddc; 1276 + u16 vddci; 1277 1277 }; 1278 1278 1279 1279 struct radeon_clock_array {
+5 -5
drivers/input/input.c
··· 1734 1734 */ 1735 1735 struct input_dev *input_allocate_device(void) 1736 1736 { 1737 + static atomic_t input_no = ATOMIC_INIT(0); 1737 1738 struct input_dev *dev; 1738 1739 1739 1740 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); ··· 1744 1743 device_initialize(&dev->dev); 1745 1744 mutex_init(&dev->mutex); 1746 1745 spin_lock_init(&dev->event_lock); 1746 + init_timer(&dev->timer); 1747 1747 INIT_LIST_HEAD(&dev->h_list); 1748 1748 INIT_LIST_HEAD(&dev->node); 1749 + 1750 + dev_set_name(&dev->dev, "input%ld", 1751 + (unsigned long) atomic_inc_return(&input_no) - 1); 1749 1752 1750 1753 __module_get(THIS_MODULE); 1751 1754 } ··· 2024 2019 */ 2025 2020 int input_register_device(struct input_dev *dev) 2026 2021 { 2027 - static atomic_t input_no = ATOMIC_INIT(0); 2028 2022 struct input_devres *devres = NULL; 2029 2023 struct input_handler *handler; 2030 2024 unsigned int packet_size; ··· 2063 2059 * If delay and period are pre-set by the driver, then autorepeating 2064 2060 * is handled by the driver itself and we don't do it in input.c. 2065 2061 */ 2066 - init_timer(&dev->timer); 2067 2062 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { 2068 2063 dev->timer.data = (long) dev; 2069 2064 dev->timer.function = input_repeat_key; ··· 2075 2072 2076 2073 if (!dev->setkeycode) 2077 2074 dev->setkeycode = input_default_setkeycode; 2078 - 2079 - dev_set_name(&dev->dev, "input%ld", 2080 - (unsigned long) atomic_inc_return(&input_no) - 1); 2081 2075 2082 2076 error = device_add(&dev->dev); 2083 2077 if (error)
+9 -2
drivers/input/keyboard/pxa27x_keypad.c
··· 786 786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 787 787 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 788 788 789 - if (pdata) 789 + if (pdata) { 790 790 error = pxa27x_keypad_build_keycode(keypad); 791 - else 791 + } else { 792 792 error = pxa27x_keypad_build_keycode_from_dt(keypad); 793 + /* 794 + * Data that we get from DT resides in dynamically 795 + * allocated memory so we need to update our pdata 796 + * pointer. 797 + */ 798 + pdata = keypad->pdata; 799 + } 793 800 if (error) { 794 801 dev_err(&pdev->dev, "failed to build keycode\n"); 795 802 goto failed_put_clk;
+10 -4
drivers/input/misc/cm109.c
··· 351 351 if (status) { 352 352 if (status == -ESHUTDOWN) 353 353 return; 354 - dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 354 + dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", 355 + __func__, status); 356 + goto out; 355 357 } 356 358 357 359 /* Special keys */ ··· 420 418 dev->ctl_data->byte[2], 421 419 dev->ctl_data->byte[3]); 422 420 423 - if (status) 424 - dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 421 + if (status) { 422 + if (status == -ESHUTDOWN) 423 + return; 424 + dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", 425 + __func__, status); 426 + } 425 427 426 428 spin_lock(&dev->ctl_submit_lock); 427 429 ··· 433 427 434 428 if (likely(!dev->shutdown)) { 435 429 436 - if (dev->buzzer_pending) { 430 + if (dev->buzzer_pending || status) { 437 431 dev->buzzer_pending = 0; 438 432 dev->ctl_urb_pending = 1; 439 433 cm109_submit_buzz_toggle(dev);
+1
drivers/input/mouse/alps.c
··· 103 103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 104 104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 105 105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 106 + { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */ 106 107 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 107 108 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 108 109 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+14 -9
drivers/input/serio/i8042.c
··· 223 223 { 224 224 unsigned long flags; 225 225 unsigned char data, str; 226 - int i = 0; 226 + int count = 0; 227 + int retval = 0; 227 228 228 229 spin_lock_irqsave(&i8042_lock, flags); 229 230 230 - while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) { 231 - udelay(50); 232 - data = i8042_read_data(); 233 - i++; 234 - dbg("%02x <- i8042 (flush, %s)\n", 235 - data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); 231 + while ((str = i8042_read_status()) & I8042_STR_OBF) { 232 + if (count++ < I8042_BUFFER_SIZE) { 233 + udelay(50); 234 + data = i8042_read_data(); 235 + dbg("%02x <- i8042 (flush, %s)\n", 236 + data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); 237 + } else { 238 + retval = -EIO; 239 + break; 240 + } 236 241 } 237 242 238 243 spin_unlock_irqrestore(&i8042_lock, flags); 239 244 240 - return i; 245 + return retval; 241 246 } 242 247 243 248 /* ··· 854 849 855 850 static int i8042_controller_check(void) 856 851 { 857 - if (i8042_flush() == I8042_BUFFER_SIZE) { 852 + if (i8042_flush()) { 858 853 pr_err("No controller found\n"); 859 854 return -ENODEV; 860 855 }
+4
drivers/input/tablet/wacom_sys.c
··· 1031 1031 } 1032 1032 1033 1033 static enum power_supply_property wacom_battery_props[] = { 1034 + POWER_SUPPLY_PROP_SCOPE, 1034 1035 POWER_SUPPLY_PROP_CAPACITY 1035 1036 }; 1036 1037 ··· 1043 1042 int ret = 0; 1044 1043 1045 1044 switch (psp) { 1045 + case POWER_SUPPLY_PROP_SCOPE: 1046 + val->intval = POWER_SUPPLY_SCOPE_DEVICE; 1047 + break; 1046 1048 case POWER_SUPPLY_PROP_CAPACITY: 1047 1049 val->intval = 1048 1050 wacom->wacom_wac.battery_capacity * 100 / 31;
+8
drivers/input/tablet/wacom_wac.c
··· 2054 2054 static const struct wacom_features wacom_features_0x10D = 2055 2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2056 2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2057 + static const struct wacom_features wacom_features_0x10E = 2058 + { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255, 2059 + 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2060 + static const struct wacom_features wacom_features_0x10F = 2061 + { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255, 2062 + 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2057 2063 static const struct wacom_features wacom_features_0x4001 = 2058 2064 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2059 2065 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; ··· 2254 2248 { USB_DEVICE_WACOM(0x100) }, 2255 2249 { USB_DEVICE_WACOM(0x101) }, 2256 2250 { USB_DEVICE_WACOM(0x10D) }, 2251 + { USB_DEVICE_WACOM(0x10E) }, 2252 + { USB_DEVICE_WACOM(0x10F) }, 2257 2253 { USB_DEVICE_WACOM(0x300) }, 2258 2254 { USB_DEVICE_WACOM(0x301) }, 2259 2255 { USB_DEVICE_WACOM(0x304) },
+1 -5
drivers/pci/hotplug/acpiphp_glue.c
··· 552 552 struct acpiphp_func *func; 553 553 int max, pass; 554 554 LIST_HEAD(add_list); 555 - int nr_found; 556 555 557 - nr_found = acpiphp_rescan_slot(slot); 556 + acpiphp_rescan_slot(slot); 558 557 max = acpiphp_max_busnr(bus); 559 558 for (pass = 0; pass < 2; pass++) { 560 559 list_for_each_entry(dev, &bus->devices, bus_list) { ··· 573 574 } 574 575 } 575 576 __pci_bus_assign_resources(bus, &add_list, NULL); 576 - /* Nothing more to do here if there are no new devices on this bus. */ 577 - if (!nr_found && (slot->flags & SLOT_ENABLED)) 578 - return; 579 577 580 578 acpiphp_sanitize_bus(bus); 581 579 acpiphp_set_hpp_values(bus);
+2
drivers/scsi/aacraid/linit.c
··· 771 771 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 772 772 { 773 773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 774 + if (!capable(CAP_SYS_RAWIO)) 775 + return -EPERM; 774 776 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); 775 777 } 776 778
+95 -81
drivers/scsi/sg.c
··· 105 105 static int sg_add(struct device *, struct class_interface *); 106 106 static void sg_remove(struct device *, struct class_interface *); 107 107 108 + static DEFINE_SPINLOCK(sg_open_exclusive_lock); 109 + 108 110 static DEFINE_IDR(sg_index_idr); 109 - static DEFINE_RWLOCK(sg_index_lock); 111 + static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock 112 + file descriptor list for device */ 110 113 111 114 static struct class_interface sg_interface = { 112 115 .add_dev = sg_add, ··· 146 143 } Sg_request; 147 144 148 145 typedef struct sg_fd { /* holds the state of a file descriptor */ 149 - struct list_head sfd_siblings; /* protected by sfd_lock of device */ 146 + /* sfd_siblings is protected by sg_index_lock */ 147 + struct list_head sfd_siblings; 150 148 struct sg_device *parentdp; /* owning device */ 151 149 wait_queue_head_t read_wait; /* queue read until command done */ 152 150 rwlock_t rq_list_lock; /* protect access to list in req_arr */ ··· 170 166 171 167 typedef struct sg_device { /* holds the state of each scsi generic device */ 172 168 struct scsi_device *device; 169 + wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 173 170 int sg_tablesize; /* adapter's max scatter-gather table size */ 174 171 u32 index; /* device index number */ 175 - spinlock_t sfd_lock; /* protect file descriptor list for device */ 172 + /* sfds is protected by sg_index_lock */ 176 173 struct list_head sfds; 177 - struct rw_semaphore o_sem; /* exclude open should hold this rwsem */ 178 174 volatile char detached; /* 0->attached, 1->detached pending removal */ 175 + /* exclude protected by sg_open_exclusive_lock */ 179 176 char exclude; /* opened for exclusive access */ 180 177 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 181 178 struct gendisk *disk; ··· 225 220 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 226 221 } 227 222 223 + static int get_exclude(Sg_device *sdp) 224 + { 225 + unsigned long flags; 226 + int ret; 227 + 228 + spin_lock_irqsave(&sg_open_exclusive_lock, flags); 229 + ret = sdp->exclude; 230 + spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); 231 + return ret; 232 + } 233 + 234 + static int set_exclude(Sg_device *sdp, char val) 235 + { 236 + unsigned long flags; 237 + 238 + spin_lock_irqsave(&sg_open_exclusive_lock, flags); 239 + sdp->exclude = val; 240 + spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); 241 + return val; 242 + } 243 + 228 244 static int sfds_list_empty(Sg_device *sdp) 229 245 { 230 246 unsigned long flags; 231 247 int ret; 232 248 233 - spin_lock_irqsave(&sdp->sfd_lock, flags); 249 + read_lock_irqsave(&sg_index_lock, flags); 234 250 ret = list_empty(&sdp->sfds); 235 - spin_unlock_irqrestore(&sdp->sfd_lock, flags); 251 + read_unlock_irqrestore(&sg_index_lock, flags); 236 252 return ret; 237 253 } 238 254 ··· 265 239 struct request_queue *q; 266 240 Sg_device *sdp; 267 241 Sg_fd *sfp; 242 + int res; 268 243 int retval; 269 244 270 245 nonseekable_open(inode, filp); ··· 294 267 goto error_out; 295 268 } 296 269 297 - if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { 298 - retval = -EPERM; /* Can't lock it with read only access */ 270 + if (flags & O_EXCL) { 271 + if (O_RDONLY == (flags & O_ACCMODE)) { 272 + retval = -EPERM; /* Can't lock it with read only access */ 273 + goto error_out; 274 + } 275 + if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { 276 + retval = -EBUSY; 277 + goto error_out; 278 + } 279 + res = wait_event_interruptible(sdp->o_excl_wait, 280 + ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); 281 + if (res) { 282 + retval = res; /* -ERESTARTSYS because signal hit process */ 283 + goto error_out; 284 + } 285 + } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */ 286 + if (flags & O_NONBLOCK) { 287 + retval = -EBUSY; 288 + goto error_out; 289 + } 290 + res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); 291 + if (res) { 292 + retval = res; /* -ERESTARTSYS because signal hit process */ 293 + goto error_out; 294 + } 295 + } 296 + if (sdp->detached) { 297 + retval = -ENODEV; 299 298 goto error_out; 300 299 } 301 - if (flags & O_NONBLOCK) { 302 - if (flags & O_EXCL) { 303 - if (!down_write_trylock(&sdp->o_sem)) { 304 - retval = -EBUSY; 305 - goto error_out; 306 - } 307 - } else { 308 - if (!down_read_trylock(&sdp->o_sem)) { 309 - retval = -EBUSY; 310 - goto error_out; 311 - } 312 - } 313 - } else { 314 - if (flags & O_EXCL) 315 - down_write(&sdp->o_sem); 316 - else 317 - down_read(&sdp->o_sem); 318 - } 319 - /* Since write lock is held, no need to check sfd_list */ 320 - if (flags & O_EXCL) 321 - sdp->exclude = 1; /* used by release lock */ 322 - 323 300 if (sfds_list_empty(sdp)) { /* no existing opens on this device */ 324 301 sdp->sgdebug = 0; 325 302 q = sdp->device->request_queue; 326 303 sdp->sg_tablesize = queue_max_segments(q); 327 304 } 328 - sfp = sg_add_sfp(sdp, dev); 329 - if (!IS_ERR(sfp)) 305 + if ((sfp = sg_add_sfp(sdp, dev))) 330 306 filp->private_data = sfp; 331 - /* retval is already provably zero at this point because of the 332 - * check after retval = scsi_autopm_get_device(sdp->device)) 333 - */ 334 307 else { 335 - retval = PTR_ERR(sfp); 336 - 337 308 if (flags & O_EXCL) { 338 - sdp->exclude = 0; /* undo if error */ 339 - up_write(&sdp->o_sem); 340 - } else 341 - up_read(&sdp->o_sem); 309 + set_exclude(sdp, 0); /* undo if error */ 310 + wake_up_interruptible(&sdp->o_excl_wait); 311 + } 312 + retval = -ENOMEM; 313 + goto error_out; 314 + } 315 + retval = 0; 342 316 error_out: 317 + if (retval) { 343 318 scsi_autopm_put_device(sdp->device); 344 319 sdp_put: 345 320 scsi_device_put(sdp->device); ··· 358 329 { 359 330 Sg_device *sdp; 360 331 Sg_fd *sfp; 361 - int excl; 362 332 363 333 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 364 334 return -ENXIO; 365 335 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 366 336 367 - excl = sdp->exclude; 368 - sdp->exclude = 0; 369 - if (excl) 370 - up_write(&sdp->o_sem); 371 - else 372 - up_read(&sdp->o_sem); 337 + set_exclude(sdp, 0); 338 + wake_up_interruptible(&sdp->o_excl_wait); 373 339 374 340 scsi_autopm_put_device(sdp->device); 375 341 kref_put(&sfp->f_ref, sg_remove_sfp); ··· 1415 1391 disk->first_minor = k; 1416 1392 sdp->disk = disk; 1417 1393 sdp->device = scsidp; 1418 - spin_lock_init(&sdp->sfd_lock); 1419 1394 INIT_LIST_HEAD(&sdp->sfds); 1420 - init_rwsem(&sdp->o_sem); 1395 + init_waitqueue_head(&sdp->o_excl_wait); 1421 1396 sdp->sg_tablesize = queue_max_segments(q); 1422 1397 sdp->index = k; 1423 1398 kref_init(&sdp->d_ref); ··· 1549 1526 1550 1527 /* Need a write lock to set sdp->detached. */ 1551 1528 write_lock_irqsave(&sg_index_lock, iflags); 1552 - spin_lock(&sdp->sfd_lock); 1553 1529 sdp->detached = 1; 1554 1530 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { 1555 1531 wake_up_interruptible(&sfp->read_wait); 1556 1532 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); 1557 1533 } 1558 - spin_unlock(&sdp->sfd_lock); 1559 1534 write_unlock_irqrestore(&sg_index_lock, iflags); 1560 1535 1561 1536 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); ··· 2064 2043 2065 2044 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2066 2045 if (!sfp) 2067 - return ERR_PTR(-ENOMEM); 2046 + return NULL; 2068 2047 2069 2048 init_waitqueue_head(&sfp->read_wait); 2070 2049 rwlock_init(&sfp->rq_list_lock); ··· 2078 2057 sfp->cmd_q = SG_DEF_COMMAND_Q; 2079 2058 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2080 2059 sfp->parentdp = sdp; 2081 - spin_lock_irqsave(&sdp->sfd_lock, iflags); 2082 - if (sdp->detached) { 2083 - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2084 - return ERR_PTR(-ENODEV); 2085 - } 2060 + write_lock_irqsave(&sg_index_lock, iflags); 2086 2061 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2087 - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2062 + write_unlock_irqrestore(&sg_index_lock, iflags); 2088 2063 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2089 2064 if (unlikely(sg_big_buff != def_reserved_size)) 2090 2065 sg_big_buff = def_reserved_size; ··· 2130 2113 struct sg_device *sdp = sfp->parentdp; 2131 2114 unsigned long iflags; 2132 2115 2133 - spin_lock_irqsave(&sdp->sfd_lock, iflags); 2116 + write_lock_irqsave(&sg_index_lock, iflags); 2134 2117 list_del(&sfp->sfd_siblings); 2135 - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2118 + write_unlock_irqrestore(&sg_index_lock, iflags); 2119 + wake_up_interruptible(&sdp->o_excl_wait); 2136 2120 2137 2121 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); 2138 2122 schedule_work(&sfp->ew.work); ··· 2520 2502 return 0; 2521 2503 } 2522 2504 2523 - /* must be called while holding sg_index_lock and sfd_lock */ 2505 + /* must be called while holding sg_index_lock */ 2524 2506 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2525 2507 { 2526 2508 int k, m, new_interface, blen, usg; ··· 2605 2587 2606 2588 read_lock_irqsave(&sg_index_lock, iflags); 2607 2589 sdp = it ? sg_lookup_dev(it->index) : NULL; 2608 - if (sdp) { 2609 - spin_lock(&sdp->sfd_lock); 2610 - if (!list_empty(&sdp->sfds)) { 2611 - struct scsi_device *scsidp = sdp->device; 2590 + if (sdp && !list_empty(&sdp->sfds)) { 2591 + struct scsi_device *scsidp = sdp->device; 2612 2592 2613 - seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2614 - if (sdp->detached) 2615 - seq_printf(s, "detached pending close "); 2616 - else 2617 - seq_printf 2618 - (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2619 - scsidp->host->host_no, 2620 - scsidp->channel, scsidp->id, 2621 - scsidp->lun, 2622 - scsidp->host->hostt->emulated); 2623 - seq_printf(s, " sg_tablesize=%d excl=%d\n", 2624 - sdp->sg_tablesize, sdp->exclude); 2625 - sg_proc_debug_helper(s, sdp); 2626 - } 2627 - spin_unlock(&sdp->sfd_lock); 2593 + seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2594 + if (sdp->detached) 2595 + seq_printf(s, "detached pending close "); 2596 + else 2597 + seq_printf 2598 + (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2599 + scsidp->host->host_no, 2600 + scsidp->channel, scsidp->id, 2601 + scsidp->lun, 2602 + scsidp->host->hostt->emulated); 2603 + seq_printf(s, " sg_tablesize=%d excl=%d\n", 2604 + sdp->sg_tablesize, get_exclude(sdp)); 2605 + sg_proc_debug_helper(s, sdp); 2628 2606 } 2629 2607 read_unlock_irqrestore(&sg_index_lock, iflags); 2630 2608 return 0;
+1
drivers/staging/bcm/Bcmchar.c
··· 1960 1960 1961 1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); 1962 1962 1963 + memset(&DevInfo, 0, sizeof(DevInfo)); 1963 1964 DevInfo.MaxRDMBufferSize = BUFFER_4K; 1964 1965 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; 1965 1966 DevInfo.u32RxAlignmentCorrection = 0;
+3
drivers/staging/ozwpan/ozcdev.c
··· 155 155 struct oz_app_hdr *app_hdr; 156 156 struct oz_serial_ctx *ctx; 157 157 158 + if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr)) 159 + return -EINVAL; 160 + 158 161 spin_lock_bh(&g_cdev.lock); 159 162 pd = g_cdev.active_pd; 160 163 if (pd)
+1 -1
drivers/staging/sb105x/sb_pci_mp.c
··· 1063 1063 1064 1064 static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt) 1065 1065 { 1066 - struct serial_icounter_struct icount; 1066 + struct serial_icounter_struct icount = {}; 1067 1067 struct sb_uart_icount cnow; 1068 1068 struct sb_uart_port *port = state->port; 1069 1069
+6 -3
drivers/staging/wlags49_h2/wl_priv.c
··· 570 570 ltv_t *pLtv; 571 571 bool_t ltvAllocated = FALSE; 572 572 ENCSTRCT sEncryption; 573 + size_t len; 573 574 574 575 #ifdef USE_WDS 575 576 hcf_16 hcfPort = HCF_PORT_0; ··· 687 686 break; 688 687 case CFG_CNF_OWN_NAME: 689 688 memset(lp->StationName, 0, sizeof(lp->StationName)); 690 - memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); 689 + len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName)); 690 + strlcpy(lp->StationName, &pLtv->u.u8[2], len); 691 691 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); 692 692 break; 693 693 case CFG_CNF_LOAD_BALANCING: ··· 1785 1783 { 1786 1784 struct wl_private *lp = wl_priv(dev); 1787 1785 unsigned long flags; 1786 + size_t len; 1788 1787 int ret = 0; 1789 1788 /*------------------------------------------------------------------------*/ 1790 1789 ··· 1796 1793 wl_lock(lp, &flags); 1797 1794 1798 1795 memset(lp->StationName, 0, sizeof(lp->StationName)); 1799 - 1800 - memcpy(lp->StationName, extra, wrqu->data.length); 1796 + len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName)); 1797 + strlcpy(lp->StationName, extra, len); 1801 1798 1802 1799 /* Commit the adapter parameters */ 1803 1800 wl_apply(lp);
+2 -7
drivers/tty/serial/atmel_serial.c
··· 1499 1499 /* 1500 1500 * Get ip name usart or uart 1501 1501 */ 1502 - static int atmel_get_ip_name(struct uart_port *port) 1502 + static void atmel_get_ip_name(struct uart_port *port) 1503 1503 { 1504 1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1505 1505 int name = UART_GET_IP_NAME(port); ··· 1518 1518 atmel_port->is_usart = false; 1519 1519 } else { 1520 1520 dev_err(port->dev, "Not supported ip name, set to uart\n"); 1521 - return -EINVAL; 1522 1521 } 1523 - 1524 - return 0; 1525 1522 } 1526 1523 1527 1524 /* ··· 2402 2405 /* 2403 2406 * Get port name of usart or uart 2404 2407 */ 2405 - ret = atmel_get_ip_name(&port->uart); 2406 - if (ret < 0) 2407 - goto err_add_port; 2408 + atmel_get_ip_name(&port->uart); 2408 2409 2409 2410 return 0; 2410 2411
+15 -2
drivers/uio/uio.c
··· 642 642 { 643 643 struct uio_device *idev = vma->vm_private_data; 644 644 int mi = uio_find_mem_index(vma); 645 + struct uio_mem *mem; 645 646 if (mi < 0) 647 + return -EINVAL; 648 + mem = idev->info->mem + mi; 649 + 650 + if (vma->vm_end - vma->vm_start > mem->size) 646 651 return -EINVAL; 647 652 648 653 vma->vm_ops = &uio_physical_vm_ops; 649 - 650 654 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 651 655 656 + /* 657 + * We cannot use the vm_iomap_memory() helper here, 658 + * because vma->vm_pgoff is the map index we looked 659 + * up above in uio_find_mem_index(), rather than an 660 + * actual page offset into the mmap. 661 + * 662 + * So we just do the physical mmap without a page 663 + * offset. 664 + */ 652 665 return remap_pfn_range(vma, 653 666 vma->vm_start, 654 - idev->info->mem[mi].addr >> PAGE_SHIFT, 667 + mem->addr >> PAGE_SHIFT, 655 668 vma->vm_end - vma->vm_start, 656 669 vma->vm_page_prot); 657 670 }
+1
drivers/usb/serial/ftdi_sio.c
··· 904 904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, 905 905 /* Crucible Devices */ 906 906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, 907 + { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, 907 908 { } /* Terminating entry */ 908 909 }; 909 910
+6
drivers/usb/serial/ftdi_sio_ids.h
··· 1307 1307 * Manufacturer: Crucible Technologies 1308 1308 */ 1309 1309 #define FTDI_CT_COMET_PID 0x8e08 1310 + 1311 + /* 1312 + * Product: Z3X Box 1313 + * Manufacturer: Smart GSM Team 1314 + */ 1315 + #define FTDI_Z3X_PID 0x0011
+57 -219
drivers/usb/serial/pl2303.c
··· 4 4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) 5 5 * Copyright (C) 2003 IBM Corp. 6 6 * 7 - * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com> 8 - * - fixes, improvements and documentation for the baud rate encoding methods 9 - * Copyright (C) 2013 Reinhard Max <max@suse.de> 10 - * - fixes and improvements for the divisor based baud rate encoding method 11 - * 12 7 * Original driver for 2.2.x by anonymous 13 8 * 14 9 * This program is free software; you can redistribute it and/or ··· 129 134 130 135 131 136 enum pl2303_type { 132 - type_0, /* H version ? */ 133 - type_1, /* H version ? */ 134 - HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 135 - HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */ 136 - TB, /* TB version */ 137 - HX_CLONE, /* Cheap and less functional clone of the HX chip */ 137 + type_0, /* don't know the difference between type 0 and */ 138 + type_1, /* type 1, until someone from prolific tells us... */ 139 + HX, /* HX version of the pl2303 chip */ 138 140 }; 139 - /* 140 - * NOTE: don't know the difference between type 0 and type 1, 141 - * until someone from Prolific tells us... 142 - * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants 143 - */ 144 141 145 142 struct pl2303_serial_private { 146 143 enum pl2303_type type; ··· 172 185 { 173 186 struct pl2303_serial_private *spriv; 174 187 enum pl2303_type type = type_0; 175 - char *type_str = "unknown (treating as type_0)"; 176 188 unsigned char *buf; 177 189 178 190 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); ··· 184 198 return -ENOMEM; 185 199 } 186 200 187 - if (serial->dev->descriptor.bDeviceClass == 0x02) { 201 + if (serial->dev->descriptor.bDeviceClass == 0x02) 188 202 type = type_0; 189 - type_str = "type_0"; 190 - } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) { 191 - /* 192 - * NOTE: The bcdDevice version is the only difference between 193 - * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB 194 - */ 195 - if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) { 196 - /* Check if the device is a clone */ 197 - pl2303_vendor_read(0x9494, 0, serial, buf); 198 - /* 199 - * NOTE: Not sure if this read is really needed. 200 - * The HX returns 0x00, the clone 0x02, but the Windows 201 - * driver seems to ignore the value and continues. 202 - */ 203 - pl2303_vendor_write(0x0606, 0xaa, serial); 204 - pl2303_vendor_read(0x8686, 0, serial, buf); 205 - if (buf[0] != 0xaa) { 206 - type = HX_CLONE; 207 - type_str = "X/HX clone (limited functionality)"; 208 - } else { 209 - type = HX_TA; 210 - type_str = "X/HX/TA"; 211 - } 212 - pl2303_vendor_write(0x0606, 0x00, serial); 213 - } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) 214 - == 0x400) { 215 - type = HXD_EA_RA_SA; 216 - type_str = "HXD/EA/RA/SA"; 217 - } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) 218 - == 0x500) { 219 - type = TB; 220 - type_str = "TB"; 221 - } else { 222 - dev_info(&serial->interface->dev, 223 - "unknown/unsupported device type\n"); 224 - kfree(spriv); 225 - kfree(buf); 226 - return -ENODEV; 227 - } 228 - } else if (serial->dev->descriptor.bDeviceClass == 0x00 229 - || serial->dev->descriptor.bDeviceClass == 0xFF) { 203 + else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) 204 + type = HX; 205 + else if (serial->dev->descriptor.bDeviceClass == 0x00) 230 206 type = type_1; 231 - type_str = "type_1"; 232 - } 233 - dev_dbg(&serial->interface->dev, "device type: %s\n", type_str); 207 + else if (serial->dev->descriptor.bDeviceClass == 0xFF) 208 + type = type_1; 209 + dev_dbg(&serial->interface->dev, "device type: %d\n", type); 234 210 235 211 spriv->type = type; 236 212 usb_set_serial_data(serial, spriv); ··· 207 259 pl2303_vendor_read(0x8383, 0, serial, buf); 208 260 pl2303_vendor_write(0, 1, serial); 209 261 pl2303_vendor_write(1, 0, serial); 210 - if (type == type_0 || type == type_1) 211 - pl2303_vendor_write(2, 0x24, serial); 212 - else 262 + if (type == HX) 213 263 pl2303_vendor_write(2, 0x44, serial); 264 + else 265 + pl2303_vendor_write(2, 0x24, serial); 214 266 215 267 kfree(buf); 216 268 return 0; ··· 264 316 return retval; 265 317 } 266 318 267 - static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type, 268 - u8 buf[4]) 319 + static void pl2303_encode_baudrate(struct tty_struct *tty, 320 + struct usb_serial_port *port, 321 + u8 buf[4]) 269 322 { 270 - /* 271 - * NOTE: Only the values defined in baud_sup are supported ! 272 - * => if unsupported values are set, the PL2303 uses 9600 baud instead 273 - * => HX clones just don't work at unsupported baud rates < 115200 baud, 274 - * for baud rates > 115200 they run at 115200 baud 275 - */ 276 323 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 277 - 4800, 7200, 9600, 14400, 19200, 28800, 38400, 278 - 57600, 115200, 230400, 460800, 614400, 921600, 279 - 1228800, 2457600, 3000000, 6000000, 12000000 }; 280 - /* 281 - * NOTE: With the exception of type_0/1 devices, the following 282 - * additional baud rates are supported (tested with HX rev. 3A only): 283 - * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 284 - * 403200, 806400. (*: not HX and HX clones) 285 - * 286 - * Maximum values: HXD, TB: 12000000; HX, TA: 6000000; 287 - * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200 288 - * 289 - * As long as we are not using this encoding method for anything else 290 - * than the type_0+1, HX and HX clone chips, there is no point in 291 - * complicating the code to support them. 292 - */ 324 + 4800, 7200, 9600, 14400, 19200, 28800, 38400, 325 + 57600, 115200, 230400, 460800, 500000, 614400, 326 + 921600, 1228800, 2457600, 3000000, 6000000 }; 327 + 328 + struct usb_serial *serial = port->serial; 329 + struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 330 + int baud; 293 331 int i; 332 + 333 + /* 334 + * NOTE: Only the values defined in baud_sup are supported! 335 + * => if unsupported values are set, the PL2303 seems to use 336 + * 9600 baud (at least my PL2303X always does) 337 + */ 338 + baud = tty_get_baud_rate(tty); 339 + dev_dbg(&port->dev, "baud requested = %d\n", baud); 340 + if (!baud) 341 + return; 294 342 295 343 /* Set baudrate to nearest supported value */ 296 344 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) { 297 345 if (baud_sup[i] > baud) 298 346 break; 299 347 } 348 + 300 349 if (i == ARRAY_SIZE(baud_sup)) 301 350 baud = baud_sup[i - 1]; 302 351 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1])) 303 352 baud = baud_sup[i - 1]; 304 353 else 305 354 baud = baud_sup[i]; 306 - /* Respect the chip type specific baud rate limits */ 307 - /* 308 - * FIXME: as long as we don't know how to distinguish between the 309 - * HXD, EA, RA, and SA chip variants, allow the max. value of 12M. 310 - */ 311 - if (type == HX_TA) 312 - baud = min_t(int, baud, 6000000); 313 - else if (type == type_0 || type == type_1) 355 + 356 + /* type_0, type_1 only support up to 1228800 baud */ 357 + if (spriv->type != HX) 314 358 baud = min_t(int, baud, 1228800); 315 - else if (type == HX_CLONE) 316 - baud = min_t(int, baud, 115200); 317 - /* Direct (standard) baud rate encoding method */ 318 - put_unaligned_le32(baud, buf); 319 359 320 - return baud; 321 - } 322 - 323 - static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type, 324 - u8 buf[4]) 325 - { 326 - /* 327 - * Divisor based baud rate encoding method 328 - * 329 - * NOTE: HX clones do NOT support this method. 330 - * It's not clear if the type_0/1 chips support it. 331 - * 332 - * divisor = 12MHz * 32 / baudrate = 2^A * B 333 - * 334 - * with 335 - * 336 - * A = buf[1] & 0x0e 337 - * B = buf[0] + (buf[1] & 0x01) << 8 338 - * 339 - * Special cases: 340 - * => 8 < B < 16: device seems to work not properly 341 - * => B <= 8: device uses the max. value B = 512 instead 342 - */ 343 - unsigned int A, B; 344 - 345 - /* 346 - * NOTE: The Windows driver allows maximum baud rates of 110% of the 347 - * specified maximium value. 348 - * Quick tests with early (2004) HX (rev. A) chips suggest, that even 349 - * higher baud rates (up to the maximum of 24M baud !) are working fine, 350 - * but that should really be tested carefully in "real life" scenarios 351 - * before removing the upper limit completely. 352 - * Baud rates smaller than the specified 75 baud are definitely working 353 - * fine. 354 - */ 355 - if (type == type_0 || type == type_1) 356 - baud = min_t(int, baud, 1228800 * 1.1); 357 - else if (type == HX_TA) 358 - baud = min_t(int, baud, 6000000 * 1.1); 359 - else if (type == HXD_EA_RA_SA) 360 - /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */ 361 - /* 362 - * FIXME: as long as we don't know how to distinguish between 363 - * these chip variants, allow the max. of these values 364 - */ 365 - baud = min_t(int, baud, 12000000 * 1.1); 366 - else if (type == TB) 367 - baud = min_t(int, baud, 12000000 * 1.1); 368 - /* Determine factors A and B */ 369 - A = 0; 370 - B = 12000000 * 32 / baud; /* 12MHz */ 371 - B <<= 1; /* Add one bit for rounding */ 372 - while (B > (512 << 1) && A <= 14) { 373 - A += 2; 374 - B >>= 2; 375 - } 376 - if (A > 14) { /* max. divisor = min. baudrate reached */ 377 - A = 14; 378 - B = 512; 379 - /* => ~45.78 baud */ 360 + if (baud <= 115200) { 361 + put_unaligned_le32(baud, buf); 380 362 } else { 381 - B = (B + 1) >> 1; /* Round the last bit */ 382 - } 383 - /* Handle special cases */ 384 - if (B == 512) 385 - B = 0; /* also: 1 to 8 */ 386 - else if (B < 16) 387 363 /* 388 - * NOTE: With the current algorithm this happens 389 - * only for A=0 and means that the min. divisor 390 - * (respectively: the max. baudrate) is reached. 364 + * Apparently the formula for higher speeds is: 365 + * baudrate = 12M * 32 / (2^buf[1]) / buf[0] 391 366 */ 392 - B = 16; /* => 24 MBaud */ 393 - /* Encode the baud rate */ 394 - buf[3] = 0x80; /* Select divisor encoding method */ 395 - buf[2] = 0; 396 - buf[1] = (A & 0x0e); /* A */ 397 - buf[1] |= ((B & 0x100) >> 8); /* MSB of B */ 398 - buf[0] = B & 0xff; /* 8 LSBs of B */ 399 - /* Calculate the actual/resulting baud rate */ 400 - if (B <= 8) 401 - B = 512; 402 - baud = 12000000 * 32 / ((1 << A) * B); 367 + unsigned tmp = 12000000 * 32 / baud; 368 + buf[3] = 0x80; 369 + buf[2] = 0; 370 + buf[1] = (tmp >= 256); 371 + while (tmp >= 256) { 372 + tmp >>= 2; 373 + buf[1] <<= 1; 374 + } 375 + buf[0] = tmp; 376 + } 403 377 404 - return baud; 405 - } 406 - 407 - static void pl2303_encode_baudrate(struct tty_struct *tty, 408 - struct usb_serial_port *port, 409 - enum pl2303_type type, 410 - u8 buf[4]) 411 - { 412 - int baud; 413 - 414 - baud = tty_get_baud_rate(tty); 415 - dev_dbg(&port->dev, "baud requested = %d\n", baud); 416 - if (!baud) 417 - return; 418 - /* 419 - * There are two methods for setting/encoding the baud rate 420 - * 1) Direct method: encodes the baud rate value directly 421 - * => supported by all chip types 422 - * 2) Divisor based method: encodes a divisor to a base value (12MHz*32) 423 - * => not supported by HX clones (and likely type_0/1 chips) 424 - * 425 - * NOTE: Although the divisor based baud rate encoding method is much 426 - * more flexible, some of the standard baud rate values can not be 427 - * realized exactly. But the difference is very small (max. 0.2%) and 428 - * the device likely uses the same baud rate generator for both methods 429 - * so that there is likley no difference. 430 - */ 431 - if (type == type_0 || type == type_1 || type == HX_CLONE) 432 - baud = pl2303_baudrate_encode_direct(baud, type, buf); 433 - else 434 - baud = pl2303_baudrate_encode_divisor(baud, type, buf); 435 378 /* Save resulting baud rate */ 436 379 tty_encode_baud_rate(tty, baud, baud); 437 380 dev_dbg(&port->dev, "baud set = %d\n", baud); ··· 379 540 dev_dbg(&port->dev, "data bits = %d\n", buf[6]); 380 541 } 381 542 382 - /* For reference: buf[0]:buf[3] baud rate value */ 383 - pl2303_encode_baudrate(tty, port, spriv->type, buf); 543 + /* For reference buf[0]:buf[3] baud rate value */ 544 + pl2303_encode_baudrate(tty, port, &buf[0]); 384 545 385 546 /* For reference buf[4]=0 is 1 stop bits */ 386 547 /* For reference buf[4]=1 is 1.5 stop bits */ ··· 457 618 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); 458 619 459 620 if (C_CRTSCTS(tty)) { 460 - if (spriv->type == type_0 || spriv->type == type_1) 461 - pl2303_vendor_write(0x0, 0x41, serial); 462 - else 621 + if (spriv->type == HX) 463 622 pl2303_vendor_write(0x0, 0x61, serial); 623 + else 624 + pl2303_vendor_write(0x0, 0x41, serial); 464 625 } else { 465 626 pl2303_vendor_write(0x0, 0x0, serial); 466 627 } ··· 497 658 struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 498 659 int result; 499 660 500 - if (spriv->type == type_0 || spriv->type == type_1) { 661 + if (spriv->type != HX) { 501 662 usb_clear_halt(serial->dev, port->write_urb->pipe); 502 663 usb_clear_halt(serial->dev, port->read_urb->pipe); 503 664 } else { ··· 672 833 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 673 834 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 674 835 0, NULL, 0, 100); 675 - /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */ 676 836 if (result) 677 837 dev_err(&port->dev, "error sending break = %d\n", result); 678 838 }
+1 -25
drivers/video/au1100fb.c
··· 361 361 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) 362 362 { 363 363 struct au1100fb_device *fbdev; 364 - unsigned int len; 365 - unsigned long start=0, off; 366 364 367 365 fbdev = to_au1100fb_device(fbi); 368 - 369 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { 370 - return -EINVAL; 371 - } 372 - 373 - start = fbdev->fb_phys & PAGE_MASK; 374 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); 375 - 376 - off = vma->vm_pgoff << PAGE_SHIFT; 377 - 378 - if ((vma->vm_end - vma->vm_start + off) > len) { 379 - return -EINVAL; 380 - } 381 - 382 - off += start; 383 - vma->vm_pgoff = off >> PAGE_SHIFT; 384 366 385 367 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 386 368 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 387 369 388 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 389 - vma->vm_end - vma->vm_start, 390 - vma->vm_page_prot)) { 391 - return -EAGAIN; 392 - } 393 - 394 - return 0; 370 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len); 395 371 } 396 372 397 373 static struct fb_ops au1100fb_ops =
+1 -22
drivers/video/au1200fb.c
··· 1233 1233 * method mainly to allow the use of the TLB streaming flag (CCA=6) 1234 1234 */ 1235 1235 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1236 - 1237 1236 { 1238 - unsigned int len; 1239 - unsigned long start=0, off; 1240 1237 struct au1200fb_device *fbdev = info->par; 1241 - 1242 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { 1243 - return -EINVAL; 1244 - } 1245 - 1246 - start = fbdev->fb_phys & PAGE_MASK; 1247 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); 1248 - 1249 - off = vma->vm_pgoff << PAGE_SHIFT; 1250 - 1251 - if ((vma->vm_end - vma->vm_start + off) > len) { 1252 - return -EINVAL; 1253 - } 1254 - 1255 - off += start; 1256 - vma->vm_pgoff = off >> PAGE_SHIFT; 1257 1238 1258 1239 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1259 1240 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ 1260 1241 1261 - return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1262 - vma->vm_end - vma->vm_start, 1263 - vma->vm_page_prot); 1242 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len); 1264 1243 } 1265 1244 1266 1245 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
+3 -2
fs/dcache.c
··· 542 542 * If ref is non-zero, then decrement the refcount too. 543 543 * Returns dentry requiring refcount drop, or NULL if we're done. 544 544 */ 545 - static inline struct dentry * 545 + static struct dentry * 546 546 dentry_kill(struct dentry *dentry, int unlock_on_failure) 547 547 __releases(dentry->d_lock) 548 548 { ··· 630 630 goto kill_it; 631 631 } 632 632 633 - dentry->d_flags |= DCACHE_REFERENCED; 633 + if (!(dentry->d_flags & DCACHE_REFERENCED)) 634 + dentry->d_flags |= DCACHE_REFERENCED; 634 635 dentry_lru_add(dentry); 635 636 636 637 dentry->d_lockref.count--;
+1 -3
fs/eventpoll.c
··· 34 34 #include <linux/mutex.h> 35 35 #include <linux/anon_inodes.h> 36 36 #include <linux/device.h> 37 - #include <linux/freezer.h> 38 37 #include <asm/uaccess.h> 39 38 #include <asm/io.h> 40 39 #include <asm/mman.h> ··· 1604 1605 } 1605 1606 1606 1607 spin_unlock_irqrestore(&ep->lock, flags); 1607 - if (!freezable_schedule_hrtimeout_range(to, slack, 1608 - HRTIMER_MODE_ABS)) 1608 + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) 1609 1609 timed_out = 1; 1610 1610 1611 1611 spin_lock_irqsave(&ep->lock, flags);
+1 -2
fs/select.c
··· 238 238 239 239 set_current_state(state); 240 240 if (!pwq->triggered) 241 - rc = freezable_schedule_hrtimeout_range(expires, slack, 242 - HRTIMER_MODE_ABS); 241 + rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 243 242 __set_current_state(TASK_RUNNING); 244 243 245 244 /*
+3 -3
include/linux/ipc_namespace.h
··· 34 34 int sem_ctls[4]; 35 35 int used_sems; 36 36 37 - int msg_ctlmax; 38 - int msg_ctlmnb; 39 - int msg_ctlmni; 37 + unsigned int msg_ctlmax; 38 + unsigned int msg_ctlmnb; 39 + unsigned int msg_ctlmni; 40 40 atomic_t msg_bytes; 41 41 atomic_t msg_hdrs; 42 42 int auto_msgmni;
+4 -4
include/linux/percpu.h
··· 332 332 #endif 333 333 334 334 #ifndef this_cpu_sub 335 - # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) 335 + # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) 336 336 #endif 337 337 338 338 #ifndef this_cpu_inc ··· 418 418 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) 419 419 #endif 420 420 421 - #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) 421 + #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) 422 422 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) 423 423 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) 424 424 ··· 586 586 #endif 587 587 588 588 #ifndef __this_cpu_sub 589 - # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) 589 + # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) 590 590 #endif 591 591 592 592 #ifndef __this_cpu_inc ··· 668 668 __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) 669 669 #endif 670 670 671 - #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val)) 671 + #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) 672 672 #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) 673 673 #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) 674 674
+7 -5
include/uapi/linux/perf_event.h
··· 479 479 /* 480 480 * Control data for the mmap() data buffer. 481 481 * 482 - * User-space reading the @data_head value should issue an rmb(), on 483 - * SMP capable platforms, after reading this value -- see 484 - * perf_event_wakeup(). 482 + * User-space reading the @data_head value should issue an smp_rmb(), 483 + * after reading this value. 485 484 * 486 485 * When the mapping is PROT_WRITE the @data_tail value should be 487 - * written by userspace to reflect the last read data. In this case 488 - * the kernel will not over-write unread data. 486 + * written by userspace to reflect the last read data, after issueing 487 + * an smp_mb() to separate the data read from the ->data_tail store. 488 + * In this case the kernel will not over-write unread data. 489 + * 490 + * See perf_output_put_handle() for the data ordering. 489 491 */ 490 492 __u64 data_head; /* head in the data section */ 491 493 __u64 data_tail; /* user-space written tail */
+12 -8
ipc/ipc_sysctl.c
··· 62 62 return err; 63 63 } 64 64 65 - static int proc_ipc_callback_dointvec(ctl_table *table, int write, 65 + static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write, 66 66 void __user *buffer, size_t *lenp, loff_t *ppos) 67 67 { 68 68 struct ctl_table ipc_table; ··· 72 72 memcpy(&ipc_table, table, sizeof(ipc_table)); 73 73 ipc_table.data = get_ipc(table); 74 74 75 - rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); 75 + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); 76 76 77 77 if (write && !rc && lenp_bef == *lenp) 78 78 /* ··· 152 152 #define proc_ipc_dointvec NULL 153 153 #define proc_ipc_dointvec_minmax NULL 154 154 #define proc_ipc_dointvec_minmax_orphans NULL 155 - #define proc_ipc_callback_dointvec NULL 155 + #define proc_ipc_callback_dointvec_minmax NULL 156 156 #define proc_ipcauto_dointvec_minmax NULL 157 157 #endif 158 158 159 159 static int zero; 160 160 static int one = 1; 161 - #ifdef CONFIG_CHECKPOINT_RESTORE 162 161 static int int_max = INT_MAX; 163 - #endif 164 162 165 163 static struct ctl_table ipc_kern_table[] = { 166 164 { ··· 196 198 .data = &init_ipc_ns.msg_ctlmax, 197 199 .maxlen = sizeof (init_ipc_ns.msg_ctlmax), 198 200 .mode = 0644, 199 - .proc_handler = proc_ipc_dointvec, 201 + .proc_handler = proc_ipc_dointvec_minmax, 202 + .extra1 = &zero, 203 + .extra2 = &int_max, 200 204 }, 201 205 { 202 206 .procname = "msgmni", 203 207 .data = &init_ipc_ns.msg_ctlmni, 204 208 .maxlen = sizeof (init_ipc_ns.msg_ctlmni), 205 209 .mode = 0644, 206 - .proc_handler = proc_ipc_callback_dointvec, 210 + .proc_handler = proc_ipc_callback_dointvec_minmax, 211 + .extra1 = &zero, 212 + .extra2 = &int_max, 207 213 }, 208 214 { 209 215 .procname = "msgmnb", 210 216 .data = &init_ipc_ns.msg_ctlmnb, 211 217 .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), 212 218 .mode = 0644, 213 - .proc_handler = proc_ipc_dointvec, 219 + .proc_handler = proc_ipc_dointvec_minmax, 220 + .extra1 = &zero, 221 + .extra2 = &int_max, 214 222 }, 215 223 { 216 224 .procname = "sem",
+27 -4
kernel/events/ring_buffer.c
··· 87 87 goto out; 88 88 89 89 /* 90 - * Publish the known good head. Rely on the full barrier implied 91 - * by atomic_dec_and_test() order the rb->head read and this 92 - * write. 90 + * Since the mmap() consumer (userspace) can run on a different CPU: 91 + * 92 + * kernel user 93 + * 94 + * READ ->data_tail READ ->data_head 95 + * smp_mb() (A) smp_rmb() (C) 96 + * WRITE $data READ $data 97 + * smp_wmb() (B) smp_mb() (D) 98 + * STORE ->data_head WRITE ->data_tail 99 + * 100 + * Where A pairs with D, and B pairs with C. 101 + * 102 + * I don't think A needs to be a full barrier because we won't in fact 103 + * write data until we see the store from userspace. So we simply don't 104 + * issue the data WRITE until we observe it. Be conservative for now. 105 + * 106 + * OTOH, D needs to be a full barrier since it separates the data READ 107 + * from the tail WRITE. 108 + * 109 + * For B a WMB is sufficient since it separates two WRITEs, and for C 110 + * an RMB is sufficient since it separates two READs. 111 + * 112 + * See perf_output_begin(). 93 113 */ 114 + smp_wmb(); 94 115 rb->user_page->data_head = head; 95 116 96 117 /* ··· 175 154 * Userspace could choose to issue a mb() before updating the 176 155 * tail pointer. So that all reads will be completed before the 177 156 * write is issued. 157 + * 158 + * See perf_output_put_handle(). 178 159 */ 179 160 tail = ACCESS_ONCE(rb->user_page->data_tail); 180 - smp_rmb(); 161 + smp_mb(); 181 162 offset = head = local_read(&rb->head); 182 163 head += size; 183 164 if (unlikely(!perf_output_space(rb, tail, offset, head)))
+1 -1
lib/Kconfig.debug
··· 983 983 984 984 config DEBUG_KOBJECT_RELEASE 985 985 bool "kobject release debugging" 986 - depends on DEBUG_KERNEL 986 + depends on DEBUG_OBJECTS_TIMERS 987 987 help 988 988 kobjects are reference counted objects. This means that their 989 989 last reference count put is not predictable, and the kobject can
+2 -1
lib/scatterlist.c
··· 577 577 miter->__offset += miter->consumed; 578 578 miter->__remaining -= miter->consumed; 579 579 580 - if (miter->__flags & SG_MITER_TO_SG) 580 + if ((miter->__flags & SG_MITER_TO_SG) && 581 + !PageSlab(miter->page)) 581 582 flush_kernel_dcache_page(miter->page); 582 583 583 584 if (miter->__flags & SG_MITER_ATOMIC) {
+48 -22
mm/huge_memory.c
··· 1278 1278 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 1279 1279 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1280 1280 { 1281 + struct anon_vma *anon_vma = NULL; 1281 1282 struct page *page; 1282 1283 unsigned long haddr = addr & HPAGE_PMD_MASK; 1284 + int page_nid = -1, this_nid = numa_node_id(); 1283 1285 int target_nid; 1284 - int current_nid = -1; 1285 - bool migrated; 1286 + bool page_locked; 1287 + bool migrated = false; 1286 1288 1287 1289 spin_lock(&mm->page_table_lock); 1288 1290 if (unlikely(!pmd_same(pmd, *pmdp))) 1289 1291 goto out_unlock; 1290 1292 1291 1293 page = pmd_page(pmd); 1292 - get_page(page); 1293 - current_nid = page_to_nid(page); 1294 + page_nid = page_to_nid(page); 1294 1295 count_vm_numa_event(NUMA_HINT_FAULTS); 1295 - if (current_nid == numa_node_id()) 1296 + if (page_nid == this_nid) 1296 1297 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1297 1298 1299 + /* 1300 + * Acquire the page lock to serialise THP migrations but avoid dropping 1301 + * page_table_lock if at all possible 1302 + */ 1303 + page_locked = trylock_page(page); 1298 1304 target_nid = mpol_misplaced(page, vma, haddr); 1299 1305 if (target_nid == -1) { 1300 - put_page(page); 1301 - goto clear_pmdnuma; 1306 + /* If the page was locked, there are no parallel migrations */ 1307 + if (page_locked) 1308 + goto clear_pmdnuma; 1309 + 1310 + /* 1311 + * Otherwise wait for potential migrations and retry. We do 1312 + * relock and check_same as the page may no longer be mapped. 1313 + * As the fault is being retried, do not account for it. 1314 + */ 1315 + spin_unlock(&mm->page_table_lock); 1316 + wait_on_page_locked(page); 1317 + page_nid = -1; 1318 + goto out; 1302 1319 } 1303 1320 1304 - /* Acquire the page lock to serialise THP migrations */ 1321 + /* Page is misplaced, serialise migrations and parallel THP splits */ 1322 + get_page(page); 1305 1323 spin_unlock(&mm->page_table_lock); 1306 - lock_page(page); 1324 + if (!page_locked) 1325 + lock_page(page); 1326 + anon_vma = page_lock_anon_vma_read(page); 1307 1327 1308 1328 /* Confirm the PTE did not while locked */ 1309 1329 spin_lock(&mm->page_table_lock); 1310 1330 if (unlikely(!pmd_same(pmd, *pmdp))) { 1311 1331 unlock_page(page); 1312 1332 put_page(page); 1333 + page_nid = -1; 1313 1334 goto out_unlock; 1314 1335 } 1315 - spin_unlock(&mm->page_table_lock); 1316 1336 1317 - /* Migrate the THP to the requested node */ 1337 + /* 1338 + * Migrate the THP to the requested node, returns with page unlocked 1339 + * and pmd_numa cleared. 1340 + */ 1341 + spin_unlock(&mm->page_table_lock); 1318 1342 migrated = migrate_misplaced_transhuge_page(mm, vma, 1319 1343 pmdp, pmd, addr, page, target_nid); 1320 - if (!migrated) 1321 - goto check_same; 1344 + if (migrated) 1345 + page_nid = target_nid; 1322 1346 1323 - task_numa_fault(target_nid, HPAGE_PMD_NR, true); 1324 - return 0; 1325 - 1326 - check_same: 1327 - spin_lock(&mm->page_table_lock); 1328 - if (unlikely(!pmd_same(pmd, *pmdp))) 1329 - goto out_unlock; 1347 + goto out; 1330 1348 clear_pmdnuma: 1349 + BUG_ON(!PageLocked(page)); 1331 1350 pmd = pmd_mknonnuma(pmd); 1332 1351 set_pmd_at(mm, haddr, pmdp, pmd); 1333 1352 VM_BUG_ON(pmd_numa(*pmdp)); 1334 1353 update_mmu_cache_pmd(vma, addr, pmdp); 1354 + unlock_page(page); 1335 1355 out_unlock: 1336 1356 spin_unlock(&mm->page_table_lock); 1337 - if (current_nid != -1) 1338 - task_numa_fault(current_nid, HPAGE_PMD_NR, false); 1357 + 1358 + out: 1359 + if (anon_vma) 1360 + page_unlock_anon_vma_read(anon_vma); 1361 + 1362 + if (page_nid != -1) 1363 + task_numa_fault(page_nid, HPAGE_PMD_NR, migrated); 1364 + 1339 1365 return 0; 1340 1366 } 1341 1367
+2 -1
mm/list_lru.c
··· 81 81 * decrement nr_to_walk first so that we don't livelock if we 82 82 * get stuck on large numbesr of LRU_RETRY items 83 83 */ 84 - if (--(*nr_to_walk) == 0) 84 + if (!*nr_to_walk) 85 85 break; 86 + --*nr_to_walk; 86 87 87 88 ret = isolate(item, &nlru->lock, cb_arg); 88 89 switch (ret) {
+26 -31
mm/memcontrol.c
··· 54 54 #include <linux/page_cgroup.h> 55 55 #include <linux/cpu.h> 56 56 #include <linux/oom.h> 57 + #include <linux/lockdep.h> 57 58 #include "internal.h" 58 59 #include <net/sock.h> 59 60 #include <net/ip.h> ··· 2047 2046 return total; 2048 2047 } 2049 2048 2049 + #ifdef CONFIG_LOCKDEP 2050 + static struct lockdep_map memcg_oom_lock_dep_map = { 2051 + .name = "memcg_oom_lock", 2052 + }; 2053 + #endif 2054 + 2050 2055 static DEFINE_SPINLOCK(memcg_oom_lock); 2051 2056 2052 2057 /* ··· 2090 2083 } 2091 2084 iter->oom_lock = false; 2092 2085 } 2093 - } 2086 + } else 2087 + mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 2094 2088 2095 2089 spin_unlock(&memcg_oom_lock); 2096 2090 ··· 2103 2095 struct mem_cgroup *iter; 2104 2096 2105 2097 spin_lock(&memcg_oom_lock); 2098 + mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 2106 2099 for_each_mem_cgroup_tree(iter, memcg) 2107 2100 iter->oom_lock = false; 2108 2101 spin_unlock(&memcg_oom_lock); ··· 2774 2765 *ptr = memcg; 2775 2766 return 0; 2776 2767 nomem: 2777 - *ptr = NULL; 2778 - if (gfp_mask & __GFP_NOFAIL) 2779 - return 0; 2780 - return -ENOMEM; 2768 + if (!(gfp_mask & __GFP_NOFAIL)) { 2769 + *ptr = NULL; 2770 + return -ENOMEM; 2771 + } 2781 2772 bypass: 2782 2773 *ptr = root_mem_cgroup; 2783 2774 return -EINTR; ··· 3782 3773 { 3783 3774 /* Update stat data for mem_cgroup */ 3784 3775 preempt_disable(); 3785 - WARN_ON_ONCE(from->stat->count[idx] < nr_pages); 3786 - __this_cpu_add(from->stat->count[idx], -nr_pages); 3776 + __this_cpu_sub(from->stat->count[idx], nr_pages); 3787 3777 __this_cpu_add(to->stat->count[idx], nr_pages); 3788 3778 preempt_enable(); 3789 3779 } ··· 4958 4950 } while (usage > 0); 4959 4951 } 4960 4952 4961 - /* 4962 - * This mainly exists for tests during the setting of set of use_hierarchy. 4963 - * Since this is the very setting we are changing, the current hierarchy value 4964 - * is meaningless 4965 - */ 4966 - static inline bool __memcg_has_children(struct mem_cgroup *memcg) 4967 - { 4968 - struct cgroup_subsys_state *pos; 4969 - 4970 - /* bounce at first found */ 4971 - css_for_each_child(pos, &memcg->css) 4972 - return true; 4973 - return false; 4974 - } 4975 - 4976 - /* 4977 - * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed 4978 - * to be already dead (as in mem_cgroup_force_empty, for instance). This is 4979 - * from mem_cgroup_count_children(), in the sense that we don't really care how 4980 - * many children we have; we only need to know if we have any. It also counts 4981 - * any memcg without hierarchy as infertile. 4982 - */ 4983 4953 static inline bool memcg_has_children(struct mem_cgroup *memcg) 4984 4954 { 4985 - return memcg->use_hierarchy && __memcg_has_children(memcg); 4955 + lockdep_assert_held(&memcg_create_mutex); 4956 + /* 4957 + * The lock does not prevent addition or deletion to the list 4958 + * of children, but it prevents a new child from being 4959 + * initialized based on this parent in css_online(), so it's 4960 + * enough to decide whether hierarchically inherited 4961 + * attributes can still be changed or not. 4962 + */ 4963 + return memcg->use_hierarchy && 4964 + !list_empty(&memcg->css.cgroup->children); 4986 4965 } 4987 4966 4988 4967 /* ··· 5049 5054 */ 5050 5055 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 5051 5056 (val == 1 || val == 0)) { 5052 - if (!__memcg_has_children(memcg)) 5057 + if (list_empty(&memcg->css.cgroup->children)) 5053 5058 memcg->use_hierarchy = val; 5054 5059 else 5055 5060 retval = -EBUSY;
+21 -32
mm/memory.c
··· 3521 3521 } 3522 3522 3523 3523 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 3524 - unsigned long addr, int current_nid) 3524 + unsigned long addr, int page_nid) 3525 3525 { 3526 3526 get_page(page); 3527 3527 3528 3528 count_vm_numa_event(NUMA_HINT_FAULTS); 3529 - if (current_nid == numa_node_id()) 3529 + if (page_nid == numa_node_id()) 3530 3530 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 3531 3531 3532 3532 return mpol_misplaced(page, vma, addr); ··· 3537 3537 { 3538 3538 struct page *page = NULL; 3539 3539 spinlock_t *ptl; 3540 - int current_nid = -1; 3540 + int page_nid = -1; 3541 3541 int target_nid; 3542 3542 bool migrated = false; 3543 3543 ··· 3567 3567 return 0; 3568 3568 } 3569 3569 3570 - current_nid = page_to_nid(page); 3571 - target_nid = numa_migrate_prep(page, vma, addr, current_nid); 3570 + page_nid = page_to_nid(page); 3571 + target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3572 3572 pte_unmap_unlock(ptep, ptl); 3573 3573 if (target_nid == -1) { 3574 - /* 3575 - * Account for the fault against the current node if it not 3576 - * being replaced regardless of where the page is located. 3577 - */ 3578 - current_nid = numa_node_id(); 3579 3574 put_page(page); 3580 3575 goto out; 3581 3576 } ··· 3578 3583 /* Migrate to the requested node */ 3579 3584 migrated = migrate_misplaced_page(page, target_nid); 3580 3585 if (migrated) 3581 - current_nid = target_nid; 3586 + page_nid = target_nid; 3582 3587 3583 3588 out: 3584 - if (current_nid != -1) 3585 - task_numa_fault(current_nid, 1, migrated); 3589 + if (page_nid != -1) 3590 + task_numa_fault(page_nid, 1, migrated); 3586 3591 return 0; 3587 3592 } 3588 3593 ··· 3597 3602 unsigned long offset; 3598 3603 spinlock_t *ptl; 3599 3604 bool numa = false; 3600 - int local_nid = numa_node_id(); 3601 3605 3602 3606 spin_lock(&mm->page_table_lock); 3603 3607 pmd = *pmdp; ··· 3619 3625 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) { 3620 3626 pte_t pteval = *pte; 3621 3627 struct page *page; 3622 - int curr_nid = local_nid; 3628 + int page_nid = -1; 3623 3629 int target_nid; 3624 - bool migrated; 3630 + bool migrated = false; 3631 + 3625 3632 if (!pte_present(pteval)) 3626 3633 continue; 3627 3634 if (!pte_numa(pteval)) ··· 3644 3649 if (unlikely(page_mapcount(page) != 1)) 3645 3650 continue; 3646 3651 3647 - /* 3648 - * Note that the NUMA fault is later accounted to either 3649 - * the node that is currently running or where the page is 3650 - * migrated to. 3651 - */ 3652 - curr_nid = local_nid; 3653 - target_nid = numa_migrate_prep(page, vma, addr, 3654 - page_to_nid(page)); 3655 - if (target_nid == -1) { 3652 + page_nid = page_to_nid(page); 3653 + target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3654 + pte_unmap_unlock(pte, ptl); 3655 + if (target_nid != -1) { 3656 + migrated = migrate_misplaced_page(page, target_nid); 3657 + if (migrated) 3658 + page_nid = target_nid; 3659 + } else { 3656 3660 put_page(page); 3657 - continue; 3658 3661 } 3659 3662 3660 - /* Migrate to the requested node */ 3661 - pte_unmap_unlock(pte, ptl); 3662 - migrated = migrate_misplaced_page(page, target_nid); 3663 - if (migrated) 3664 - curr_nid = target_nid; 3665 - task_numa_fault(curr_nid, 1, migrated); 3663 + if (page_nid != -1) 3664 + task_numa_fault(page_nid, 1, migrated); 3666 3665 3667 3666 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); 3668 3667 }
+11 -8
mm/migrate.c
··· 1715 1715 unlock_page(new_page); 1716 1716 put_page(new_page); /* Free it */ 1717 1717 1718 - unlock_page(page); 1718 + /* Retake the callers reference and putback on LRU */ 1719 + get_page(page); 1719 1720 putback_lru_page(page); 1720 - 1721 - count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1722 - isolated = 0; 1723 - goto out; 1721 + mod_zone_page_state(page_zone(page), 1722 + NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 1723 + goto out_fail; 1724 1724 } 1725 1725 1726 1726 /* ··· 1737 1737 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1738 1738 entry = pmd_mkhuge(entry); 1739 1739 1740 - page_add_new_anon_rmap(new_page, vma, haddr); 1741 - 1740 + pmdp_clear_flush(vma, haddr, pmd); 1742 1741 set_pmd_at(mm, haddr, pmd, entry); 1742 + page_add_new_anon_rmap(new_page, vma, haddr); 1743 1743 update_mmu_cache_pmd(vma, address, &entry); 1744 1744 page_remove_rmap(page); 1745 1745 /* ··· 1758 1758 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 1759 1759 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 1760 1760 1761 - out: 1762 1761 mod_zone_page_state(page_zone(page), 1763 1762 NR_ISOLATED_ANON + page_lru, 1764 1763 -HPAGE_PMD_NR); ··· 1766 1767 out_fail: 1767 1768 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1768 1769 out_dropref: 1770 + entry = pmd_mknonnuma(entry); 1771 + set_pmd_at(mm, haddr, pmd, entry); 1772 + update_mmu_cache_pmd(vma, address, &entry); 1773 + 1769 1774 unlock_page(page); 1770 1775 put_page(page); 1771 1776 return 0;
+1 -1
mm/mprotect.c
··· 148 148 split_huge_page_pmd(vma, addr, pmd); 149 149 else if (change_huge_pmd(vma, pmd, addr, newprot, 150 150 prot_numa)) { 151 - pages += HPAGE_PMD_NR; 151 + pages++; 152 152 continue; 153 153 } 154 154 /* fall through */
+1 -1
mm/pagewalk.c
··· 242 242 if (err) 243 243 break; 244 244 pgd++; 245 - } while (addr = next, addr != end); 245 + } while (addr = next, addr < end); 246 246 247 247 return err; 248 248 }
+11 -1
scripts/kallsyms.c
··· 55 55 static unsigned int table_size, table_cnt; 56 56 static int all_symbols = 0; 57 57 static char symbol_prefix_char = '\0'; 58 + static unsigned long long kernel_start_addr = 0; 58 59 59 60 int token_profit[0x10000]; 60 61 ··· 66 65 67 66 static void usage(void) 68 67 { 69 - fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n"); 68 + fprintf(stderr, "Usage: kallsyms [--all-symbols] " 69 + "[--symbol-prefix=<prefix char>] " 70 + "[--page-offset=<CONFIG_PAGE_OFFSET>] " 71 + "< in.map > out.S\n"); 70 72 exit(1); 71 73 } 72 74 ··· 197 193 NULL }; 198 194 int i; 199 195 int offset = 1; 196 + 197 + if (s->addr < kernel_start_addr) 198 + return 0; 200 199 201 200 /* skip prefix char */ 202 201 if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char) ··· 653 646 if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) 654 647 p++; 655 648 symbol_prefix_char = *p; 649 + } else if (strncmp(argv[i], "--page-offset=", 14) == 0) { 650 + const char *p = &argv[i][14]; 651 + kernel_start_addr = strtoull(p, NULL, 16); 656 652 } else 657 653 usage(); 658 654 }
+4
sound/core/pcm.c
··· 49 49 struct snd_pcm *pcm; 50 50 51 51 list_for_each_entry(pcm, &snd_pcm_devices, list) { 52 + if (pcm->internal) 53 + continue; 52 54 if (pcm->card == card && pcm->device == device) 53 55 return pcm; 54 56 } ··· 62 60 struct snd_pcm *pcm; 63 61 64 62 list_for_each_entry(pcm, &snd_pcm_devices, list) { 63 + if (pcm->internal) 64 + continue; 65 65 if (pcm->card == card && pcm->device > device) 66 66 return pcm->device; 67 67 else if (pcm->card->number > card->number)
+2 -2
sound/pci/hda/hda_codec.c
··· 4864 4864 spin_unlock(&codec->power_lock); 4865 4865 4866 4866 state = hda_call_codec_suspend(codec, true); 4867 - codec->pm_down_notified = 0; 4868 - if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) { 4867 + if (!codec->pm_down_notified && 4868 + !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) { 4869 4869 codec->pm_down_notified = 1; 4870 4870 hda_call_pm_notify(bus, false); 4871 4871 }
+3 -1
sound/pci/hda/hda_generic.c
··· 4475 4475 true, &spec->vmaster_mute.sw_kctl); 4476 4476 if (err < 0) 4477 4477 return err; 4478 - if (spec->vmaster_mute.hook) 4478 + if (spec->vmaster_mute.hook) { 4479 4479 snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, 4480 4480 spec->vmaster_mute_enum); 4481 + snd_hda_sync_vmaster_hook(&spec->vmaster_mute); 4482 + } 4481 4483 } 4482 4484 4483 4485 free_kctls(spec); /* no longer needed */
+17 -1
sound/pci/hda/patch_analog.c
··· 968 968 } 969 969 } 970 970 971 + static void ad1884_fixup_thinkpad(struct hda_codec *codec, 972 + const struct hda_fixup *fix, int action) 973 + { 974 + struct ad198x_spec *spec = codec->spec; 975 + 976 + if (action == HDA_FIXUP_ACT_PRE_PROBE) 977 + spec->gen.keep_eapd_on = 1; 978 + } 979 + 971 980 /* set magic COEFs for dmic */ 972 981 static const struct hda_verb ad1884_dmic_init_verbs[] = { 973 982 {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7}, ··· 988 979 AD1884_FIXUP_AMP_OVERRIDE, 989 980 AD1884_FIXUP_HP_EAPD, 990 981 AD1884_FIXUP_DMIC_COEF, 982 + AD1884_FIXUP_THINKPAD, 991 983 AD1884_FIXUP_HP_TOUCHSMART, 992 984 }; 993 985 ··· 1007 997 .type = HDA_FIXUP_VERBS, 1008 998 .v.verbs = ad1884_dmic_init_verbs, 1009 999 }, 1000 + [AD1884_FIXUP_THINKPAD] = { 1001 + .type = HDA_FIXUP_FUNC, 1002 + .v.func = ad1884_fixup_thinkpad, 1003 + .chained = true, 1004 + .chain_id = AD1884_FIXUP_DMIC_COEF, 1005 + }, 1010 1006 [AD1884_FIXUP_HP_TOUCHSMART] = { 1011 1007 .type = HDA_FIXUP_VERBS, 1012 1008 .v.verbs = ad1884_dmic_init_verbs, ··· 1024 1008 static const struct snd_pci_quirk ad1884_fixup_tbl[] = { 1025 1009 SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART), 1026 1010 SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD), 1027 - SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF), 1011 + SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD), 1028 1012 {} 1029 1013 }; 1030 1014
+1
sound/pci/hda/patch_realtek.c
··· 4623 4623 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4624 4624 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 4625 4625 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4), 4626 + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4), 4626 4627 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 4627 4628 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), 4628 4629 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+1
sound/soc/codecs/wm_hubs.c
··· 530 530 hubs->hp_startup_mode); 531 531 break; 532 532 } 533 + break; 533 534 534 535 case SND_SOC_DAPM_PRE_PMD: 535 536 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+3 -1
sound/soc/soc-dapm.c
··· 1949 1949 w->active ? "active" : "inactive"); 1950 1950 1951 1951 list_for_each_entry(p, &w->sources, list_sink) { 1952 - if (p->connected && !p->connected(w, p->sink)) 1952 + if (p->connected && !p->connected(w, p->source)) 1953 1953 continue; 1954 1954 1955 1955 if (p->connect) ··· 3495 3495 if (!w) { 3496 3496 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", 3497 3497 dai->driver->playback.stream_name); 3498 + return -ENOMEM; 3498 3499 } 3499 3500 3500 3501 w->priv = dai; ··· 3514 3513 if (!w) { 3515 3514 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", 3516 3515 dai->driver->capture.stream_name); 3516 + return -ENOMEM; 3517 3517 } 3518 3518 3519 3519 w->priv = dai;
+3 -3
tools/perf/tests/attr/README
··· 44 44 perf record -c 123 kill (test-record-count) 45 45 perf record -d kill (test-record-data) 46 46 perf record -F 100 kill (test-record-freq) 47 - perf record -g -- kill (test-record-graph-default) 48 - perf record -g dwarf -- kill (test-record-graph-dwarf) 49 - perf record -g fp kill (test-record-graph-fp) 47 + perf record -g kill (test-record-graph-default) 48 + perf record --call-graph dwarf kill (test-record-graph-dwarf) 49 + perf record --call-graph fp kill (test-record-graph-fp) 50 50 perf record --group -e cycles,instructions kill (test-record-group) 51 51 perf record -e '{cycles,instructions}' kill (test-record-group1) 52 52 perf record -D kill (test-record-no-delay)
+1 -1
tools/perf/tests/attr/test-record-graph-default
··· 1 1 [config] 2 2 command = record 3 - args = -g -- kill >/dev/null 2>&1 3 + args = -g kill >/dev/null 2>&1 4 4 5 5 [event:base-record] 6 6 sample_type=295
+1 -1
tools/perf/tests/attr/test-record-graph-dwarf
··· 1 1 [config] 2 2 command = record 3 - args = -g dwarf -- kill >/dev/null 2>&1 3 + args = --call-graph dwarf -- kill >/dev/null 2>&1 4 4 5 5 [event:base-record] 6 6 sample_type=12583
+1 -1
tools/perf/tests/attr/test-record-graph-fp
··· 1 1 [config] 2 2 command = record 3 - args = -g fp kill >/dev/null 2>&1 3 + args = --call-graph fp kill >/dev/null 2>&1 4 4 5 5 [event:base-record] 6 6 sample_type=295
+1 -1
tools/perf/ui/hist.c
··· 117 117 struct perf_hpp *hpp, struct hist_entry *he) \ 118 118 { \ 119 119 return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ 120 - (hpp_snprint_fn)percent_color_snprintf, true); \ 120 + percent_color_snprintf, true); \ 121 121 } 122 122 123 123 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
+9 -2
tools/perf/util/color.c
··· 318 318 return r; 319 319 } 320 320 321 - int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent) 321 + int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...) 322 322 { 323 - const char *color = get_percent_color(percent); 323 + va_list args; 324 + double percent; 325 + const char *color; 326 + 327 + va_start(args, fmt); 328 + percent = va_arg(args, double); 329 + va_end(args); 330 + color = get_percent_color(percent); 324 331 return color_snprintf(bf, size, color, fmt, percent); 325 332 }
+1 -1
tools/perf/util/color.h
··· 39 39 int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...); 40 40 int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); 41 41 int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); 42 - int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent); 42 + int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...); 43 43 int percent_color_fprintf(FILE *fp, const char *fmt, double percent); 44 44 const char *get_percent_color(double percent); 45 45
+1 -1
virt/kvm/kvm_main.c
··· 3091 3091 3092 3092 static int kvm_init_debug(void) 3093 3093 { 3094 - int r = -EFAULT; 3094 + int r = -EEXIST; 3095 3095 struct kvm_stats_debugfs_item *p; 3096 3096 3097 3097 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);