···1313- touchscreen-size-y : See touchscreen.txt14141515Optional properties:1616+- firmware-name : File basename (string) for board specific firmware1617- touchscreen-inverted-x : See touchscreen.txt1718- touchscreen-inverted-y : See touchscreen.txt1819- touchscreen-swapped-x-y : See touchscreen.txt
+2-2
MAINTAINERS
···87538753F: include/linux/oprofile.h8754875487558755ORACLE CLUSTER FILESYSTEM 2 (OCFS2)87568756-M: Mark Fasheh <mfasheh@suse.com>87568756+M: Mark Fasheh <mfasheh@versity.com>87578757M: Joel Becker <jlbec@evilplan.org>87588758L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)87598759W: http://ocfs2.wiki.kernel.org···1164111641THERMAL/CPU_COOLING1164211642M: Amit Daniel Kachhap <amit.kachhap@gmail.com>1164311643M: Viresh Kumar <viresh.kumar@linaro.org>1164411644-M: Javi Merino <javi.merino@arm.com>1164411644+M: Javi Merino <javi.merino@kernel.org>1164511645L: linux-pm@vger.kernel.org1164611646S: Supported1164711647F: Documentation/thermal/cpu-cooling-api.txt
···111111/* The ARM override for dma_max_pfn() */112112static inline unsigned long dma_max_pfn(struct device *dev)113113{114114- return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);114114+ return dma_to_pfn(dev, *dev->dma_mask);115115}116116#define dma_max_pfn(dev) dma_max_pfn(dev)117117
+11-3
arch/arm/kernel/devtree.c
···8888 return;89899090 for_each_child_of_node(cpus, cpu) {9191+ const __be32 *cell;9292+ int prop_bytes;9193 u32 hwid;92949395 if (of_node_cmp(cpu->type, "cpu"))···10199 * properties is considered invalid to build the102100 * cpu_logical_map.103101 */104104- if (of_property_read_u32(cpu, "reg", &hwid)) {102102+ cell = of_get_property(cpu, "reg", &prop_bytes);103103+ if (!cell || prop_bytes < sizeof(*cell)) {105104 pr_debug(" * %s missing reg property\n",106105 cpu->full_name);107106 of_node_put(cpu);···110107 }111108112109 /*113113- * 8 MSBs must be set to 0 in the DT since the reg property110110+ * Bits n:24 must be set to 0 in the DT since the reg property114111 * defines the MPIDR[23:0].115112 */116116- if (hwid & ~MPIDR_HWID_BITMASK) {113113+ do {114114+ hwid = be32_to_cpu(*cell++);115115+ prop_bytes -= sizeof(*cell);116116+ } while (!hwid && prop_bytes > 0);117117+118118+ if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) {117119 of_node_put(cpu);118120 return;119121 }
···1919 * along with this program. If not, see <http://www.gnu.org/licenses/>.2020 */21212222+#include <linux/bug.h>2223#include <linux/irq.h>2324#include <linux/kdebug.h>2425#include <linux/kgdb.h>2526#include <linux/kprobes.h>2727+#include <asm/debug-monitors.h>2828+#include <asm/insn.h>2629#include <asm/traps.h>27302831struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {···341338 unregister_die_notifier(&kgdb_notifier);342339}343340344344-/*345345- * ARM instructions are always in LE.346346- * Break instruction is encoded in LE format347347- */348348-struct kgdb_arch arch_kgdb_ops = {349349- .gdb_bpt_instr = {350350- KGDB_DYN_BRK_INS_BYTE(0),351351- KGDB_DYN_BRK_INS_BYTE(1),352352- KGDB_DYN_BRK_INS_BYTE(2),353353- KGDB_DYN_BRK_INS_BYTE(3),354354- }355355-};341341+struct kgdb_arch arch_kgdb_ops;342342+343343+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)344344+{345345+ int err;346346+347347+ BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);348348+349349+ err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);350350+ if (err)351351+ return err;352352+353353+ return aarch64_insn_write((void *)bpt->bpt_addr,354354+ (u32)AARCH64_BREAK_KGDB_DYN_DBG);355355+}356356+357357+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)358358+{359359+ return aarch64_insn_write((void *)bpt->bpt_addr,360360+ *(u32 *)bpt->saved_instr);361361+}
+6-8
arch/arm64/kernel/smp.c
···201201 return ret;202202}203203204204-static void smp_store_cpu_info(unsigned int cpuid)205205-{206206- store_cpu_topology(cpuid);207207- numa_store_cpu_info(cpuid);208208-}209209-210204/*211205 * This is the secondary CPU boot entry. We're using this CPUs212206 * idle thread stack, but a set of temporary page tables.···248254 */249255 notify_cpu_starting(cpu);250256251251- smp_store_cpu_info(cpu);257257+ store_cpu_topology(cpu);252258253259 /*254260 * OK, now it's safe to let the boot CPU continue. Wait for···683689{684690 int err;685691 unsigned int cpu;692692+ unsigned int this_cpu;686693687694 init_cpu_topology();688695689689- smp_store_cpu_info(smp_processor_id());696696+ this_cpu = smp_processor_id();697697+ store_cpu_topology(this_cpu);698698+ numa_store_cpu_info(this_cpu);690699691700 /*692701 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set···716719 continue;717720718721 set_cpu_present(cpu, true);722722+ numa_store_cpu_info(cpu);719723 }720724}721725
···113113 help114114 Add several files to the debugfs to test spinlock speed.115115116116-if CPU_MIPSR6117117-118118-choice119119- prompt "Compact branch policy"120120- default MIPS_COMPACT_BRANCHES_OPTIMAL121121-122122-config MIPS_COMPACT_BRANCHES_NEVER123123- bool "Never (force delay slot branches)"124124- help125125- Pass the -mcompact-branches=never flag to the compiler in order to126126- force it to always emit branches with delay slots, and make no use127127- of the compact branch instructions introduced by MIPSr6. This is128128- useful if you suspect there may be an issue with compact branches in129129- either the compiler or the CPU.130130-131131-config MIPS_COMPACT_BRANCHES_OPTIMAL132132- bool "Optimal (use where beneficial)"133133- help134134- Pass the -mcompact-branches=optimal flag to the compiler in order for135135- it to make use of compact branch instructions where it deems them136136- beneficial, and use branches with delay slots elsewhere. This is the137137- default compiler behaviour, and should be used unless you have a138138- reason to choose otherwise.139139-140140-config MIPS_COMPACT_BRANCHES_ALWAYS141141- bool "Always (force compact branches)"142142- help143143- Pass the -mcompact-branches=always flag to the compiler in order to144144- force it to always emit compact branches, making no use of branch145145- instructions with delay slots. This can result in more compact code146146- which may be beneficial in some scenarios.147147-148148-endchoice149149-150150-endif # CPU_MIPSR6151151-152116config SCACHE_DEBUGFS153117 bool "L2 cache debugfs entries"154118 depends on DEBUG_FS
···9696 struct clk *clk;97979898 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);9999- if (!clk)9999+ if (IS_ERR(clk))100100 panic("failed to allocate %s clock structure", name);101101102102 return clk;
+6
arch/mips/cavium-octeon/octeon-irq.c
···16191619 return -ENOMEM;16201620 }1621162116221622+ /*16231623+ * Clear the OF_POPULATED flag that was set by of_irq_init()16241624+ * so that all GPIO devices will be probed.16251625+ */16261626+ of_node_clear_flag(gpio_node, OF_POPULATED);16271627+16221628 return 0;16231629}16241630/*
···458458static inline unsigned int mips_cm_max_vp_width(void)459459{460460 extern int smp_num_siblings;461461+ uint32_t cfg;461462462463 if (mips_cm_revision() >= CM_REV_CM3)463464 return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;465465+466466+ if (mips_cm_present()) {467467+ /*468468+ * We presume that all cores in the system will have the same469469+ * number of VP(E)s, and if that ever changes then this will470470+ * need revisiting.471471+ */472472+ cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;473473+ return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;474474+ }464475465476 if (IS_ENABLED(CONFIG_SMP))466477 return smp_num_siblings;
···11641164 regs->regs[31] = r31;11651165 regs->cp0_epc = epc;11661166 if (!used_math()) { /* First time FPU user. */11671167+ preempt_disable();11671168 err = init_fpu();11691169+ preempt_enable();11681170 set_used_math();11691171 }11701172 lose_fpu(1); /* Save FPU state for the emulator. */
+4-4
arch/mips/kernel/process.c
···605605 return -EOPNOTSUPP;606606607607 /* Avoid inadvertently triggering emulation */608608- if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&609609- !(current_cpu_data.fpu_id & MIPS_FPIR_F64))608608+ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&609609+ !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))610610 return -EOPNOTSUPP;611611- if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)611611+ if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)612612 return -EOPNOTSUPP;613613614614 /* FR = 0 not supported in MIPS R6 */615615- if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)615615+ if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)616616 return -EOPNOTSUPP;617617618618 /* Proceed with the mode switch */
+8-1
arch/mips/kernel/setup.c
···8787 int x = boot_mem_map.nr_map;8888 int i;89899090+ /*9191+ * If the region reaches the top of the physical address space, adjust9292+ * the size slightly so that (start + size) doesn't overflow9393+ */9494+ if (start + size - 1 == (phys_addr_t)ULLONG_MAX)9595+ --size;9696+9097 /* Sanity check */9198 if (start + size < start) {9299 pr_warn("Trying to add an invalid memory region, skipped\n");···764757 device_tree_init();765758 sparse_init();766759 plat_swiotlb_setup();767767- paging_init();768760769761 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));770762 /* Tell bootmem about cma reserved memblock section */···876870 prefill_possible_map();877871878872 cpu_cache_init();873873+ paging_init();879874}880875881876unsigned long kernelsp[NR_CPUS];
+1-1
arch/mips/kernel/smp-cps.c
···513513 * in which case the CPC will refuse to power down the core.514514 */515515 do {516516- mips_cm_lock_other(core, vpe_id);516516+ mips_cm_lock_other(core, 0);517517 mips_cpc_lock_other(core);518518 stat = read_cpc_co_stat_conf();519519 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+3-4
arch/mips/kernel/smp.c
···322322 cpumask_set_cpu(cpu, &cpu_coherent_mask);323323 notify_cpu_starting(cpu);324324325325+ cpumask_set_cpu(cpu, &cpu_callin_map);326326+ synchronise_count_slave(cpu);327327+325328 set_cpu_online(cpu, true);326329327330 set_cpu_sibling_map(cpu);328331 set_cpu_core_map(cpu);329332330333 calculate_cpu_foreign_map();331331-332332- cpumask_set_cpu(cpu, &cpu_callin_map);333333-334334- synchronise_count_slave(cpu);335334336335 /*337336 * irq will be enabled in ->smp_finish(), enabling it too early
+4-23
arch/mips/kernel/uprobes.c
···157157int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)158158{159159 struct uprobe_task *utask = current->utask;160160- union mips_instruction insn;161160162161 /*163162 * Now find the EPC where to resume after the breakpoint has been···167168 unsigned long epc;168169169170 epc = regs->cp0_epc;170170- __compute_return_epc_for_insn(regs, insn);171171+ __compute_return_epc_for_insn(regs,172172+ (union mips_instruction) aup->insn[0]);171173 aup->resume_epc = regs->cp0_epc;172174 }173173-174175 utask->autask.saved_trap_nr = current->thread.trap_nr;175176 current->thread.trap_nr = UPROBE_TRAP_NR;176177 regs->cp0_epc = current->utask->xol_vaddr;···221222 return NOTIFY_DONE;222223223224 switch (val) {224224- case DIE_BREAK:225225+ case DIE_UPROBE:225226 if (uprobe_pre_sstep_notifier(regs))226227 return NOTIFY_STOP;227228 break;···256257 ra = regs->regs[31];257258258259 /* Replace the return address with the trampoline address */259259- regs->regs[31] = ra;260260+ regs->regs[31] = trampoline_vaddr;260261261262 return ra;262263}···277278 unsigned long vaddr)278279{279280 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);280280-}281281-282282-/**283283- * set_orig_insn - Restore the original instruction.284284- * @mm: the probed process address space.285285- * @auprobe: arch specific probepoint information.286286- * @vaddr: the virtual address to insert the opcode.287287- *288288- * For mm @mm, restore the original opcode (opcode) at @vaddr.289289- * Return 0 (success) or a negative errno.290290- *291291- * This overrides the weak version in kernel/events/uprobes.c.292292- */293293-int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,294294- unsigned long vaddr)295295-{296296- return uprobe_write_opcode(mm, vaddr,297297- *(uprobe_opcode_t *)&auprobe->orig_inst[0].word);298281}299282300283void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+4-4
arch/mips/kernel/vdso.c
···3939static void __init init_vdso_image(struct mips_vdso_image *image)4040{4141 unsigned long num_pages, i;4242+ unsigned long data_pfn;42434344 BUG_ON(!PAGE_ALIGNED(image->data));4445 BUG_ON(!PAGE_ALIGNED(image->size));45464647 num_pages = image->size / PAGE_SIZE;47484848- for (i = 0; i < num_pages; i++) {4949- image->mapping.pages[i] =5050- virt_to_page(image->data + (i * PAGE_SIZE));5151- }4949+ data_pfn = __phys_to_pfn(__pa_symbol(image->data));5050+ for (i = 0; i < num_pages; i++)5151+ image->mapping.pages[i] = pfn_to_page(data_pfn + i);5252}53535454static int __init init_vdso(void)
+1
arch/mips/math-emu/dsemul.c
···298298 /* Set EPC to return to post-branch instruction */299299 xcp->cp0_epc = current->thread.bd_emu_cont_pc;300300 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);301301+ MIPS_FPU_EMU_INC_STATS(ds_emul);301302 return true;302303}
+1-1
arch/mips/mm/c-r4k.c
···800800 * If address-based cache ops don't require an SMP call, then801801 * use them exclusively for small flushes.802802 */803803- size = start - end;803803+ size = end - start;804804 cache_size = icache_size;805805 if (!cpu_has_ic_fills_f_dc) {806806 size *= 2;
···4343int hard_smp_processor_id(void);4444#define raw_smp_processor_id() (current_thread_info()->cpu)45454646+void smp_fill_in_cpu_possible_map(void);4647void smp_fill_in_sib_core_maps(void);4748void cpu_play_dead(void);4849···7372#define smp_fill_in_sib_core_maps() do { } while (0)7473#define smp_fetch_global_regs() do { } while (0)7574#define smp_fetch_global_pmu() do { } while (0)7575+#define smp_fill_in_cpu_possible_map() do { } while (0)76767777#endif /* !(CONFIG_SMP) */7878
+26
arch/sparc/kernel/setup_64.c
···3131#include <linux/initrd.h>3232#include <linux/module.h>3333#include <linux/start_kernel.h>3434+#include <linux/bootmem.h>34353536#include <asm/io.h>3637#include <asm/processor.h>···5150#include <asm/elf.h>5251#include <asm/mdesc.h>5352#include <asm/cacheflush.h>5353+#include <asm/dma.h>5454+#include <asm/irq.h>54555556#ifdef CONFIG_IP_PNP5657#include <net/ipconfig.h>···593590 pause_patch();594591}595592593593+void __init alloc_irqstack_bootmem(void)594594+{595595+ unsigned int i, node;596596+597597+ for_each_possible_cpu(i) {598598+ node = cpu_to_node(i);599599+600600+ softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),601601+ THREAD_SIZE,602602+ THREAD_SIZE, 0);603603+ hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),604604+ THREAD_SIZE,605605+ THREAD_SIZE, 0);606606+ }607607+}608608+596609void __init setup_arch(char **cmdline_p)597610{598611 /* Initialize PROM console and command line. */···669650670651 paging_init();671652 init_sparc64_elf_hwcap();653653+ smp_fill_in_cpu_possible_map();654654+ /*655655+ * Once the OF device tree and MDESC have been setup and nr_cpus has656656+ * been parsed, we know the list of possible cpus. Therefore we can657657+ * allocate the IRQ stacks.658658+ */659659+ alloc_irqstack_bootmem();672660}673661674662extern int stop_a_enabled;
+14
arch/sparc/kernel/smp_64.c
···12271227 xcall_deliver_impl = hypervisor_xcall_deliver;12281228}1229122912301230+void __init smp_fill_in_cpu_possible_map(void)12311231+{12321232+ int possible_cpus = num_possible_cpus();12331233+ int i;12341234+12351235+ if (possible_cpus > nr_cpu_ids)12361236+ possible_cpus = nr_cpu_ids;12371237+12381238+ for (i = 0; i < possible_cpus; i++)12391239+ set_cpu_possible(i, true);12401240+ for (; i < NR_CPUS; i++)12411241+ set_cpu_possible(i, false);12421242+}12431243+12301244void smp_fill_in_sib_core_maps(void)12311245{12321246 unsigned int i;
···11601160 return numa_latency[from][to];11611161}1162116211631163-static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)11631163+static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)11641164{11651165 int i;11661166···11731173 return i;11741174}1175117511761176-static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,11771177- int index)11761176+static void __init find_numa_latencies_for_group(struct mdesc_handle *md,11771177+ u64 grp, int index)11781178{11791179 u64 arc;11801180···20812081{20822082 unsigned long end_pfn, shift, phys_base;20832083 unsigned long real_end, i;20842084- int node;2085208420862085 setup_page_offset();20872086···2248224922492250 /* Setup bootmem... */22502251 last_valid_pfn = end_pfn = bootmem_init(phys_base);22512251-22522252- /* Once the OF device tree and MDESC have been setup, we know22532253- * the list of possible cpus. Therefore we can allocate the22542254- * IRQ stacks.22552255- */22562256- for_each_possible_cpu(i) {22572257- node = cpu_to_node(i);22582258-22592259- softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),22602260- THREAD_SIZE,22612261- THREAD_SIZE, 0);22622262- hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),22632263- THREAD_SIZE,22642264- THREAD_SIZE, 0);22652265- }2266225222672253 kernel_physical_mapping_init();22682254
+31-4
arch/sparc/mm/tlb.c
···174174 return;175175176176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {177177- if (pmd_val(pmd) & _PAGE_PMD_HUGE)178178- mm->context.thp_pte_count++;179179- else180180- mm->context.thp_pte_count--;177177+ /*178178+ * Note that this routine only sets pmds for THP pages.179179+ * Hugetlb pages are handled elsewhere. We need to check180180+ * for huge zero page. Huge zero pages are like hugetlb181181+ * pages in that there is no RSS, but there is the need182182+ * for TSB entries. So, huge zero page counts go into183183+ * hugetlb_pte_count.184184+ */185185+ if (pmd_val(pmd) & _PAGE_PMD_HUGE) {186186+ if (is_huge_zero_page(pmd_page(pmd)))187187+ mm->context.hugetlb_pte_count++;188188+ else189189+ mm->context.thp_pte_count++;190190+ } else {191191+ if (is_huge_zero_page(pmd_page(orig)))192192+ mm->context.hugetlb_pte_count--;193193+ else194194+ mm->context.thp_pte_count--;195195+ }181196182197 /* Do not try to allocate the TSB hash table if we183198 * don't have one already. We have various locks held···219204 }220205}221206207207+/*208208+ * This routine is only called when splitting a THP209209+ */222210void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,223211 pmd_t *pmdp)224212{···231213232214 set_pmd_at(vma->vm_mm, address, pmdp, entry);233215 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);216216+217217+ /*218218+ * set_pmd_at() will not be called in a way to decrement219219+ * thp_pte_count when splitting a THP, so do it now.220220+ * Sanity check pmd before doing the actual decrement.221221+ */222222+ if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&223223+ !is_huge_zero_page(pmd_page(entry)))224224+ (vma->vm_mm)->context.thp_pte_count--;234225}235226236227void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+12-6
arch/sparc/mm/tsb.c
···469469470470int init_new_context(struct task_struct *tsk, struct mm_struct *mm)471471{472472+ unsigned long mm_rss = get_mm_rss(mm);472473#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)473473- unsigned long total_huge_pte_count;474474+ unsigned long saved_hugetlb_pte_count;475475+ unsigned long saved_thp_pte_count;474476#endif475477 unsigned int i;476478···485483 * will re-increment the counters as the parent PTEs are486484 * copied into the child address space.487485 */488488- total_huge_pte_count = mm->context.hugetlb_pte_count +489489- mm->context.thp_pte_count;486486+ saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;487487+ saved_thp_pte_count = mm->context.thp_pte_count;490488 mm->context.hugetlb_pte_count = 0;491489 mm->context.thp_pte_count = 0;490490+491491+ mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);492492#endif493493494494 /* copy_mm() copies over the parent's mm_struct before calling···503499 /* If this is fork, inherit the parent's TSB size. We would504500 * grow it to that size on the first page fault anyways.505501 */506506- tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));502502+ tsb_grow(mm, MM_TSB_BASE, mm_rss);507503508504#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)509509- if (unlikely(total_huge_pte_count))510510- tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);505505+ if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))506506+ tsb_grow(mm, MM_TSB_HUGE,507507+ (saved_hugetlb_pte_count + saved_thp_pte_count) *508508+ REAL_HPAGE_PER_HPAGE);511509#endif512510513511 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
+2-2
arch/x86/entry/entry_64.S
···10021002 testb $3, CS+8(%rsp)10031003 jz .Lerror_kernelspace1004100410051005-.Lerror_entry_from_usermode_swapgs:10061005 /*10071006 * We entered from user mode or we're pretending to have entered10081007 * from user mode due to an IRET fault.···10441045 * gsbase and proceed. We'll fix up the exception and land in10451046 * .Lgs_change's error handler with kernel gsbase.10461047 */10471047- jmp .Lerror_entry_from_usermode_swapgs10481048+ SWAPGS10491049+ jmp .Lerror_entry_done1048105010491051.Lbstep_iret:10501052 /* Fix truncated RIP */
+1-1
arch/x86/entry/vdso/vdso2c.h
···22222323 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));24242525- if (hdr->e_type != ET_DYN)2525+ if (GET_LE(&hdr->e_type) != ET_DYN)2626 fail("input is not a shared object\n");27272828 /* Walk the segment table. */
+3-2
arch/x86/events/intel/bts.c
···455455 * The only surefire way of knowing if this NMI is ours is by checking456456 * the write ptr against the PMI threshold.457457 */458458- if (ds->bts_index >= ds->bts_interrupt_threshold)458458+ if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))459459 handled = 1;460460461461 /*···584584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)585585 return -ENODEV;586586587587- bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;587587+ bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |588588+ PERF_PMU_CAP_EXCLUSIVE;588589 bts_pmu.task_ctx_nr = perf_sw_context;589590 bts_pmu.event_init = bts_event_init;590591 bts_pmu.add = bts_event_add;
+1-1
arch/x86/include/asm/tlbflush.h
···8181/* Initialize cr4 shadow for this CPU. */8282static inline void cr4_init_shadow(void)8383{8484- this_cpu_write(cpu_tlbstate.cr4, __read_cr4());8484+ this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());8585}86868787/* Set in this cpu's CR4. */
+11-12
arch/x86/kernel/cpu/common.c
···804804 identify_cpu_without_cpuid(c);805805806806 /* cyrix could have cpuid enabled via c_identify()*/807807- if (!have_cpuid_p())808808- return;807807+ if (have_cpuid_p()) {808808+ cpu_detect(c);809809+ get_cpu_vendor(c);810810+ get_cpu_cap(c);809811810810- cpu_detect(c);811811- get_cpu_vendor(c);812812- get_cpu_cap(c);812812+ if (this_cpu->c_early_init)813813+ this_cpu->c_early_init(c);813814814814- if (this_cpu->c_early_init)815815- this_cpu->c_early_init(c);815815+ c->cpu_index = 0;816816+ filter_cpuid_features(c, false);816817817817- c->cpu_index = 0;818818- filter_cpuid_features(c, false);819819-820820- if (this_cpu->c_bsp_init)821821- this_cpu->c_bsp_init(c);818818+ if (this_cpu->c_bsp_init)819819+ this_cpu->c_bsp_init(c);820820+ }822821823822 setup_force_cpu_cap(X86_FEATURE_ALWAYS);824823 fpu__init_system(c);
+1-3
arch/x86/kernel/setup.c
···11371137 * auditing all the early-boot CR4 manipulation would be needed to11381138 * rule it out.11391139 */11401140- if (boot_cpu_data.cpuid_level >= 0)11411141- /* A CPU has %cr4 if and only if it has CPUID. */11421142- mmu_cr4_features = __read_cr4();11401140+ mmu_cr4_features = __read_cr4_safe();1143114111441142 memblock_set_current_limit(get_max_mapped());11451143
+11-10
arch/x86/mm/pageattr.c
···917917 }918918}919919920920-static int populate_pmd(struct cpa_data *cpa,921921- unsigned long start, unsigned long end,922922- unsigned num_pages, pud_t *pud, pgprot_t pgprot)920920+static long populate_pmd(struct cpa_data *cpa,921921+ unsigned long start, unsigned long end,922922+ unsigned num_pages, pud_t *pud, pgprot_t pgprot)923923{924924- unsigned int cur_pages = 0;924924+ long cur_pages = 0;925925 pmd_t *pmd;926926 pgprot_t pmd_pgprot;927927···991991 return num_pages;992992}993993994994-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,995995- pgprot_t pgprot)994994+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,995995+ pgprot_t pgprot)996996{997997 pud_t *pud;998998 unsigned long end;999999- int cur_pages = 0;999999+ long cur_pages = 0;10001000 pgprot_t pud_pgprot;1001100110021002 end = start + (cpa->numpages << PAGE_SHIFT);···1052105210531053 /* Map trailing leftover */10541054 if (start < end) {10551055- int tmp;10551055+ long tmp;1056105610571057 pud = pud_offset(pgd, start);10581058 if (pud_none(*pud))···10781078 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);10791079 pud_t *pud = NULL; /* shut up gcc */10801080 pgd_t *pgd_entry;10811081- int ret;10811081+ long ret;1082108210831083 pgd_entry = cpa->pgd + pgd_index(addr);10841084···1327132713281328static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)13291329{13301330- int ret, numpages = cpa->numpages;13301330+ unsigned long numpages = cpa->numpages;13311331+ int ret;1331133213321333 while (numpages) {13331334 /*
+1-1
arch/x86/platform/efi/efi_64.c
···245245 * text and allocate a new stack because we can't rely on the246246 * stack pointer being < 4GB.247247 */248248- if (!IS_ENABLED(CONFIG_EFI_MIXED))248248+ if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())249249 return 0;250250251251 /*
+14-2
block/blk-mq.c
···296296 if (ret)297297 return ERR_PTR(ret);298298299299+ /*300300+ * Check if the hardware context is actually mapped to anything.301301+ * If not tell the caller that it should skip this queue.302302+ */299303 hctx = q->queue_hw_ctx[hctx_idx];304304+ if (!blk_mq_hw_queue_mapped(hctx)) {305305+ ret = -EXDEV;306306+ goto out_queue_exit;307307+ }300308 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));301309302310 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);303311 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);304312 if (!rq) {305305- blk_queue_exit(q);306306- return ERR_PTR(-EWOULDBLOCK);313313+ ret = -EWOULDBLOCK;314314+ goto out_queue_exit;307315 }308316309317 return rq;318318+319319+out_queue_exit:320320+ blk_queue_exit(q);321321+ return ERR_PTR(ret);310322}311323EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);312324
+4-2
block/blk-throttle.c
···780780 /*781781 * If previous slice expired, start a new one otherwise renew/extend782782 * existing slice to make sure it is at least throtl_slice interval783783- * long since now.783783+ * long since now. New slice is started only for empty throttle group.784784+ * If there is queued bio, that means there should be an active785785+ * slice and it should be extended instead.784786 */785785- if (throtl_slice_used(tg, rw))787787+ if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))786788 throtl_start_new_slice(tg, rw);787789 else {788790 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
+24-17
crypto/rsa-pkcs1pad.c
···298298 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);299299 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);300300 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);301301+ unsigned int dst_len;301302 unsigned int pos;302302-303303- if (err == -EOVERFLOW)304304- /* Decrypted value had no leading 0 byte */305305- err = -EINVAL;303303+ u8 *out_buf;306304307305 if (err)308306 goto done;309307310310- if (req_ctx->child_req.dst_len != ctx->key_size - 1) {311311- err = -EINVAL;308308+ err = -EINVAL;309309+ dst_len = req_ctx->child_req.dst_len;310310+ if (dst_len < ctx->key_size - 1)312311 goto done;312312+313313+ out_buf = req_ctx->out_buf;314314+ if (dst_len == ctx->key_size) {315315+ if (out_buf[0] != 0x00)316316+ /* Decrypted value had no leading 0 byte */317317+ goto done;318318+319319+ dst_len--;320320+ out_buf++;313321 }314322315315- if (req_ctx->out_buf[0] != 0x02) {316316- err = -EINVAL;323323+ if (out_buf[0] != 0x02)317324 goto done;318318- }319319- for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)320320- if (req_ctx->out_buf[pos] == 0x00)325325+326326+ for (pos = 1; pos < dst_len; pos++)327327+ if (out_buf[pos] == 0x00)321328 break;322322- if (pos < 9 || pos == req_ctx->child_req.dst_len) {323323- err = -EINVAL;329329+ if (pos < 9 || pos == dst_len)324330 goto done;325325- }326331 pos++;327332328328- if (req->dst_len < req_ctx->child_req.dst_len - pos)333333+ err = 0;334334+335335+ if (req->dst_len < dst_len - pos)329336 err = -EOVERFLOW;330330- req->dst_len = req_ctx->child_req.dst_len - pos;337337+ req->dst_len = dst_len - pos;331338332339 if (!err)333340 sg_copy_from_buffer(req->dst,334341 sg_nents_for_len(req->dst, req->dst_len),335335- req_ctx->out_buf + pos, req->dst_len);342342+ out_buf + pos, req->dst_len);336343337344done:338345 kzfree(req_ctx->out_buf);
+28-20
drivers/acpi/nfit/core.c
···9494 return to_acpi_device(acpi_desc->dev);9595}96969797-static int xlat_status(void *buf, unsigned int cmd)9797+static int xlat_status(void *buf, unsigned int cmd, u32 status)9898{9999 struct nd_cmd_clear_error *clear_err;100100 struct nd_cmd_ars_status *ars_status;101101- struct nd_cmd_ars_start *ars_start;102102- struct nd_cmd_ars_cap *ars_cap;103101 u16 flags;104102105103 switch (cmd) {106104 case ND_CMD_ARS_CAP:107107- ars_cap = buf;108108- if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)105105+ if ((status & 0xffff) == NFIT_ARS_CAP_NONE)109106 return -ENOTTY;110107111108 /* Command failed */112112- if (ars_cap->status & 0xffff)109109+ if (status & 0xffff)113110 return -EIO;114111115112 /* No supported scan types for this range */116113 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;117117- if ((ars_cap->status >> 16 & flags) == 0)114114+ if ((status >> 16 & flags) == 0)118115 return -ENOTTY;119116 break;120117 case ND_CMD_ARS_START:121121- ars_start = buf;122118 /* ARS is in progress */123123- if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)119119+ if ((status & 0xffff) == NFIT_ARS_START_BUSY)124120 return -EBUSY;125121126122 /* Command failed */127127- if (ars_start->status & 0xffff)123123+ if (status & 0xffff)128124 return -EIO;129125 break;130126 case ND_CMD_ARS_STATUS:131127 ars_status = buf;132128 /* Command failed */133133- if (ars_status->status & 0xffff)129129+ if (status & 0xffff)134130 return -EIO;135131 /* Check extended status (Upper two bytes) */136136- if (ars_status->status == NFIT_ARS_STATUS_DONE)132132+ if (status == NFIT_ARS_STATUS_DONE)137133 return 0;138134139135 /* ARS is in progress */140140- if (ars_status->status == NFIT_ARS_STATUS_BUSY)136136+ if (status == NFIT_ARS_STATUS_BUSY)141137 return -EBUSY;142138143139 /* No ARS performed for the current boot */144144- if (ars_status->status == NFIT_ARS_STATUS_NONE)140140+ if (status == NFIT_ARS_STATUS_NONE)145141 return -EAGAIN;146142147143 /*···145149 * agent wants the scan to stop. If we didn't overflow146150 * then just continue with the returned results.147151 */148148- if (ars_status->status == NFIT_ARS_STATUS_INTR) {152152+ if (status == NFIT_ARS_STATUS_INTR) {149153 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)150154 return -ENOSPC;151155 return 0;152156 }153157154158 /* Unknown status */155155- if (ars_status->status >> 16)159159+ if (status >> 16)156160 return -EIO;157161 break;158162 case ND_CMD_CLEAR_ERROR:159163 clear_err = buf;160160- if (clear_err->status & 0xffff)164164+ if (status & 0xffff)161165 return -EIO;162166 if (!clear_err->cleared)163167 return -EIO;···168172 break;169173 }170174175175+ /* all other non-zero status results in an error */176176+ if (status)177177+ return -EIO;171178 return 0;172179}173180···185186 struct nd_cmd_pkg *call_pkg = NULL;186187 const char *cmd_name, *dimm_name;187188 unsigned long cmd_mask, dsm_mask;189189+ u32 offset, fw_status = 0;188190 acpi_handle handle;189191 unsigned int func;190192 const u8 *uuid;191191- u32 offset;192193 int rc, i;193194194195 func = cmd;···316317 out_obj->buffer.pointer + offset, out_size);317318 offset += out_size;318319 }320320+321321+ /*322322+ * Set fw_status for all the commands with a known format to be323323+ * later interpreted by xlat_status().324324+ */325325+ if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)326326+ || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))327327+ fw_status = *(u32 *) out_obj->buffer.pointer;328328+319329 if (offset + in_buf.buffer.length < buf_len) {320330 if (i >= 1) {321331 /*···333325 */334326 rc = buf_len - offset - in_buf.buffer.length;335327 if (cmd_rc)336336- *cmd_rc = xlat_status(buf, cmd);328328+ *cmd_rc = xlat_status(buf, cmd, fw_status);337329 } else {338330 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",339331 __func__, dimm_name, cmd_name, buf_len,···343335 } else {344336 rc = 0;345337 if (cmd_rc)346346- *cmd_rc = xlat_status(buf, cmd);338338+ *cmd_rc = xlat_status(buf, cmd, fw_status);347339 }348340349341 out:
···30153015 if (rdev->pdev->device == 0x6811 &&30163016 rdev->pdev->revision == 0x81)30173017 max_mclk = 120000;30183018+ /* limit sclk/mclk on Jet parts for stability */30193019+ if (rdev->pdev->device == 0x6665 &&30203020+ rdev->pdev->revision == 0xc3) {30213021+ max_sclk = 75000;30223022+ max_mclk = 80000;30233023+ }3018302430193025 if (rps->vce_active) {30203026 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+1-1
drivers/gpu/drm/udl/udl_fb.c
···122122 return 0;123123 cmd = urb->transfer_buffer;124124125125- for (i = y; i < height ; i++) {125125+ for (i = y; i < y + height ; i++) {126126 const int line_offset = fb->base.pitches[0] * i;127127 const int byte_offset = line_offset + (x * bpp);128128 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
+11-7
drivers/i2c/busses/i2c-eg20t.c
···773773 /* Set the number of I2C channel instance */774774 adap_info->ch_num = id->driver_data;775775776776- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,777777- KBUILD_MODNAME, adap_info);778778- if (ret) {779779- pch_pci_err(pdev, "request_irq FAILED\n");780780- goto err_request_irq;781781- }782782-783776 for (i = 0; i < adap_info->ch_num; i++) {784777 pch_adap = &adap_info->pch_data[i].pch_adapter;785778 adap_info->pch_i2c_suspended = false;···790797791798 pch_adap->dev.of_node = pdev->dev.of_node;792799 pch_adap->dev.parent = &pdev->dev;800800+ }801801+802802+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,803803+ KBUILD_MODNAME, adap_info);804804+ if (ret) {805805+ pch_pci_err(pdev, "request_irq FAILED\n");806806+ goto err_request_irq;807807+ }808808+809809+ for (i = 0; i < adap_info->ch_num; i++) {810810+ pch_adap = &adap_info->pch_data[i].pch_adapter;793811794812 pch_i2c_init(&adap_info->pch_data[i]);795813
+2-1
drivers/i2c/busses/i2c-qup.c
···15991599#ifdef CONFIG_PM_SLEEP16001600static int qup_i2c_suspend(struct device *device)16011601{16021602- qup_i2c_pm_suspend_runtime(device);16021602+ if (!pm_runtime_suspended(device))16031603+ return qup_i2c_pm_suspend_runtime(device);16031604 return 0;16041605}16051606
+1-1
drivers/i2c/muxes/i2c-mux-pca954x.c
···164164 /* Only select the channel if its different from the last channel */165165 if (data->last_chan != regval) {166166 ret = pca954x_reg_write(muxc->parent, client, regval);167167- data->last_chan = regval;167167+ data->last_chan = ret ? 0 : regval;168168 }169169170170 return ret;
···548548static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,549549 unsigned long cluster_id)550550{551551- int cpu = *base_cpu;551551+ int next_cpu, cpu = *base_cpu;552552 unsigned long mpidr = cpu_logical_map(cpu);553553 u16 tlist = 0;554554···562562563563 tlist |= 1 << (mpidr & 0xf);564564565565- cpu = cpumask_next(cpu, mask);566566- if (cpu >= nr_cpu_ids)565565+ next_cpu = cpumask_next(cpu, mask);566566+ if (next_cpu >= nr_cpu_ids)567567 goto out;568568+ cpu = next_cpu;568569569570 mpidr = cpu_logical_map(cpu);570571
+50-55
drivers/irqchip/irq-mips-gic.c
···638638 if (!gic_local_irq_is_routable(intr))639639 return -EPERM;640640641641- /*642642- * HACK: These are all really percpu interrupts, but the rest643643- * of the MIPS kernel code does not use the percpu IRQ API for644644- * the CP0 timer and performance counter interrupts.645645- */646646- switch (intr) {647647- case GIC_LOCAL_INT_TIMER:648648- case GIC_LOCAL_INT_PERFCTR:649649- case GIC_LOCAL_INT_FDC:650650- irq_set_chip_and_handler(virq,651651- &gic_all_vpes_local_irq_controller,652652- handle_percpu_irq);653653- break;654654- default:655655- irq_set_chip_and_handler(virq,656656- &gic_local_irq_controller,657657- handle_percpu_devid_irq);658658- irq_set_percpu_devid(virq);659659- break;660660- }661661-662641 spin_lock_irqsave(&gic_lock, flags);663642 for (i = 0; i < gic_vpes; i++) {664643 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;···703724 return 0;704725}705726706706-static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,707707- irq_hw_number_t hw)727727+static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,728728+ unsigned int hwirq)708729{709709- if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)710710- return gic_local_irq_domain_map(d, virq, hw);730730+ struct irq_chip *chip;731731+ int err;711732712712- irq_set_chip_and_handler(virq, &gic_level_irq_controller,713713- handle_level_irq);733733+ if (hwirq >= GIC_SHARED_HWIRQ_BASE) {734734+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,735735+ &gic_level_irq_controller,736736+ NULL);737737+ } else {738738+ switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {739739+ case GIC_LOCAL_INT_TIMER:740740+ case GIC_LOCAL_INT_PERFCTR:741741+ case GIC_LOCAL_INT_FDC:742742+ /*743743+ * HACK: These are all really percpu interrupts, but744744+ * the rest of the MIPS kernel code does not use the745745+ * percpu IRQ API for them.746746+ */747747+ chip = &gic_all_vpes_local_irq_controller;748748+ irq_set_handler(virq, handle_percpu_irq);749749+ break;714750715715- return gic_shared_irq_domain_map(d, virq, hw, 0);751751+ default:752752+ chip = &gic_local_irq_controller;753753+ irq_set_handler(virq, handle_percpu_devid_irq);754754+ irq_set_percpu_devid(virq);755755+ break;756756+ }757757+758758+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,759759+ chip, NULL);760760+ }761761+762762+ return err;716763}717764718765static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,···749744 int cpu, ret, i;750745751746 if (spec->type == GIC_DEVICE) {752752- /* verify that it doesn't conflict with an IPI irq */753753- if (test_bit(spec->hwirq, ipi_resrv))747747+ /* verify that shared irqs don't conflict with an IPI irq */748748+ if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&749749+ test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))754750 return -EBUSY;755751756756- hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);757757-758758- return irq_domain_set_hwirq_and_chip(d, virq, hwirq,759759- &gic_level_irq_controller,760760- NULL);752752+ return gic_setup_dev_chip(d, virq, spec->hwirq);761753 } else {762754 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);763755 if (base_hwirq == gic_shared_intrs) {···823821}824822825823static const struct irq_domain_ops gic_irq_domain_ops = {826826- .map = gic_irq_domain_map,827824 .alloc = gic_irq_domain_alloc,828825 .free = gic_irq_domain_free,829826 .match = gic_irq_domain_match,···853852 struct irq_fwspec *fwspec = arg;854853 struct gic_irq_spec spec = {855854 .type = GIC_DEVICE,856856- .hwirq = fwspec->param[1],857855 };858856 int i, ret;859859- bool is_shared = fwspec->param[0] == GIC_SHARED;860857861861- if (is_shared) {862862- ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);863863- if (ret)864864- return ret;865865- }858858+ if (fwspec->param[0] == GIC_SHARED)859859+ spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);860860+ else861861+ spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);862862+863863+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);864864+ if (ret)865865+ return ret;866866867867 for (i = 0; i < nr_irqs; i++) {868868- irq_hw_number_t hwirq;869869-870870- if (is_shared)871871- hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);872872- else873873- hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);874874-875875- ret = irq_domain_set_hwirq_and_chip(d, virq + i,876876- hwirq,877877- &gic_level_irq_controller,878878- NULL);868868+ ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);879869 if (ret)880870 goto error;881871 }···888896static void gic_dev_domain_activate(struct irq_domain *domain,889897 struct irq_data *d)890898{891891- gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);899899+ if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)900900+ gic_local_irq_domain_map(domain, d->irq, d->hwirq);901901+ else902902+ gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);892903}893904894905static struct irq_domain_ops gic_dev_domain_ops = {
+9-5
drivers/mmc/host/dw_mmc.c
···1112111211131113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;1114111411151115- dev_info(&slot->mmc->class_dev,11161116- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",11171117- slot->id, host->bus_hz, clock,11181118- div ? ((host->bus_hz / div) >> 1) :11191119- host->bus_hz, div);11151115+ if (clock != slot->__clk_old || force_clkinit)11161116+ dev_info(&slot->mmc->class_dev,11171117+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",11181118+ slot->id, host->bus_hz, clock,11191119+ div ? ((host->bus_hz / div) >> 1) :11201120+ host->bus_hz, div);1120112111211122 /* disable clock */11221123 mci_writel(host, CLKENA, 0);···1140113911411140 /* inform CIU */11421141 mci_send_cmd(slot, sdmmc_cmd_bits, 0);11421142+11431143+ /* keep the last clock value that was requested from core */11441144+ slot->__clk_old = clock;11431145 }1144114611451147 host->current_speed = clock;
+3
drivers/mmc/host/dw_mmc.h
···249249 * @queue_node: List node for placing this node in the @queue list of250250 * &struct dw_mci.251251 * @clock: Clock rate configured by set_ios(). Protected by host->lock.252252+ * @__clk_old: The last clock value that was requested from core.253253+ * Keeping track of this helps us to avoid spamming the console.252254 * @flags: Random state bits associated with the slot.253255 * @id: Number of this slot.254256 * @sdio_id: Number of this slot in the SDIO interrupt registers.···265263 struct list_head queue_node;266264267265 unsigned int clock;266266+ unsigned int __clk_old;268267269268 unsigned long flags;270269#define DW_MMC_CARD_PRESENT 0
···943943 struct nand_chip *nand_chip = mtd_to_nand(mtd);944944 int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;945945946946- if (section > nand_chip->ecc.steps)946946+ if (section >= nand_chip->ecc.steps)947947 return -ERANGE;948948949949 if (!section) {
+1-1
drivers/mtd/nand/omap2.c
···21692169 return 0;2170217021712171return_error:21722172- if (info->dma)21722172+ if (!IS_ERR_OR_NULL(info->dma))21732173 dma_release_channel(info->dma);21742174 if (nand_chip->ecc.priv) {21752175 nand_bch_free(nand_chip->ecc.priv);
···8989 .driver_data = 0,9090 }, {9191 .name = "imx25-fec",9292- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,9292+ .driver_data = FEC_QUIRK_USE_GASKET,9393 }, {9494 .name = "imx27-fec",9595- .driver_data = FEC_QUIRK_HAS_RACC,9595+ .driver_data = 0,9696 }, {9797 .name = "imx28-fec",9898 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |···180180/* FEC receive acceleration */181181#define FEC_RACC_IPDIS (1 << 1)182182#define FEC_RACC_PRODIS (1 << 2)183183+#define FEC_RACC_SHIFT16 BIT(7)183184#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)184185185186/*···946945947946#if !defined(CONFIG_M5272)948947 if (fep->quirks & FEC_QUIRK_HAS_RACC) {949949- /* set RX checksum */950948 val = readl(fep->hwp + FEC_RACC);949949+ /* align IP header */950950+ val |= FEC_RACC_SHIFT16;951951 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)952952+ /* set RX checksum */952953 val |= FEC_RACC_OPTIONS;953954 else954955 val &= ~FEC_RACC_OPTIONS;···14311428 prefetch(skb->data - NET_IP_ALIGN);14321429 skb_put(skb, pkt_len - 4);14331430 data = skb->data;14311431+14321432+#if !defined(CONFIG_M5272)14331433+ if (fep->quirks & FEC_QUIRK_HAS_RACC)14341434+ data = skb_pull_inline(skb, 2);14351435+#endif14361436+14341437 if (!is_copybreak && need_swap)14351438 swap_buffer(data, pkt_len);14361439
+7-1
drivers/nvdimm/core.c
···9999 nvdimm_map->size = size;100100 kref_init(&nvdimm_map->kref);101101102102- if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))102102+ if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {103103+ dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",104104+ &offset, size, dev_name(dev));103105 goto err_request_region;106106+ }104107105108 if (flags)106109 nvdimm_map->mem = memremap(offset, size, flags);···173170 else174171 kref_get(&nvdimm_map->kref);175172 nvdimm_bus_unlock(dev);173173+174174+ if (!nvdimm_map)175175+ return NULL;176176177177 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))178178 return NULL;
+20-2
drivers/nvdimm/nd.h
···5252struct nd_region_data {5353 int ns_count;5454 int ns_active;5555- unsigned int flush_mask;5656- void __iomem *flush_wpq[0][0];5555+ unsigned int hints_shift;5656+ void __iomem *flush_wpq[0];5757};5858+5959+static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,6060+ int dimm, int hint)6161+{6262+ unsigned int num = 1 << ndrd->hints_shift;6363+ unsigned int mask = num - 1;6464+6565+ return ndrd->flush_wpq[dimm * num + (hint & mask)];6666+}6767+6868+static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,6969+ int hint, void __iomem *flush)7070+{7171+ unsigned int num = 1 << ndrd->hints_shift;7272+ unsigned int mask = num - 1;7373+7474+ ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;7575+}58765977static inline struct nd_namespace_index *to_namespace_index(6078 struct nvdimm_drvdata *ndd, int i)
+13-9
drivers/nvdimm/region_devs.c
···38383939 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),4040 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");4141- for (i = 0; i < nvdimm->num_flush; i++) {4141+ for (i = 0; i < (1 << ndrd->hints_shift); i++) {4242 struct resource *res = &nvdimm->flush_wpq[i];4343 unsigned long pfn = PHYS_PFN(res->start);4444 void __iomem *flush_page;···54545555 if (j < i)5656 flush_page = (void __iomem *) ((unsigned long)5757- ndrd->flush_wpq[dimm][j] & PAGE_MASK);5757+ ndrd_get_flush_wpq(ndrd, dimm, j)5858+ & PAGE_MASK);5859 else5960 flush_page = devm_nvdimm_ioremap(dev,6060- PHYS_PFN(pfn), PAGE_SIZE);6161+ PFN_PHYS(pfn), PAGE_SIZE);6162 if (!flush_page)6263 return -ENXIO;6363- ndrd->flush_wpq[dimm][i] = flush_page6464- + (res->start & ~PAGE_MASK);6464+ ndrd_set_flush_wpq(ndrd, dimm, i, flush_page6565+ + (res->start & ~PAGE_MASK));6566 }66676768 return 0;···9493 return -ENOMEM;9594 dev_set_drvdata(dev, ndrd);96959797- ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;9696+ if (!num_flush)9797+ return 0;9898+9999+ ndrd->hints_shift = ilog2(num_flush);98100 for (i = 0; i < nd_region->ndr_mappings; i++) {99101 struct nd_mapping *nd_mapping = &nd_region->mapping[i];100102 struct nvdimm *nvdimm = nd_mapping->nvdimm;···904900 */905901 wmb();906902 for (i = 0; i < nd_region->ndr_mappings; i++)907907- if (ndrd->flush_wpq[i][0])908908- writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);903903+ if (ndrd_get_flush_wpq(ndrd, i, 0))904904+ writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));909905 wmb();910906}911907EXPORT_SYMBOL_GPL(nvdimm_flush);···929925930926 for (i = 0; i < nd_region->ndr_mappings; i++)931927 /* flush hints present, flushing required */932932- if (ndrd->flush_wpq[i][0])928928+ if (ndrd_get_flush_wpq(ndrd, i, 0))933929 return 1;934930935931 /*
···42714271 if (ret < 0)42724272 return ret;4273427342744274- /*42754275- * Use new btrfs_qgroup_reserve_data to reserve precious data space42764276- *42774277- * TODO: Find a good method to avoid reserve data space for NOCOW42784278- * range, but don't impact performance on quota disable case.42794279- */42744274+ /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */42804275 ret = btrfs_qgroup_reserve_data(inode, start, len);42764276+ if (ret)42774277+ btrfs_free_reserved_data_space_noquota(inode, start, len);42814278 return ret;42824279}42834280
+12
fs/btrfs/ioctl.c
···16341634 int namelen;16351635 int ret = 0;1636163616371637+ if (!S_ISDIR(file_inode(file)->i_mode))16381638+ return -ENOTDIR;16391639+16371640 ret = mnt_want_write_file(file);16381641 if (ret)16391642 goto out;···16941691 struct btrfs_ioctl_vol_args *vol_args;16951692 int ret;1696169316941694+ if (!S_ISDIR(file_inode(file)->i_mode))16951695+ return -ENOTDIR;16961696+16971697 vol_args = memdup_user(arg, sizeof(*vol_args));16981698 if (IS_ERR(vol_args))16991699 return PTR_ERR(vol_args);···17191713 u64 *ptr = NULL;17201714 bool readonly = false;17211715 struct btrfs_qgroup_inherit *inherit = NULL;17161716+17171717+ if (!S_ISDIR(file_inode(file)->i_mode))17181718+ return -ENOTDIR;1722171917231720 vol_args = memdup_user(arg, sizeof(*vol_args));17241721 if (IS_ERR(vol_args))···23652356 int namelen;23662357 int ret;23672358 int err = 0;23592359+23602360+ if (!S_ISDIR(dir->i_mode))23612361+ return -ENOTDIR;2368236223692363 vol_args = memdup_user(arg, sizeof(*vol_args));23702364 if (IS_ERR(vol_args))
···18421842 ocfs2_commit_trans(osb, handle);1843184318441844out:18451845+ /*18461846+ * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),18471847+ * even in case of error here like ENOSPC and ENOMEM. So, we need18481848+ * to unlock the target page manually to prevent deadlocks when18491849+ * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED18501850+ * to VM code.18511851+ */18521852+ if (wc->w_target_locked)18531853+ unlock_page(mmap_page);18541854+18451855 ocfs2_free_write_ctxt(inode, wc);1846185618471857 if (data_ac) {
+2-1
include/linux/can/dev.h
···3232 * CAN common private data3333 */3434struct can_priv {3535+ struct net_device *dev;3536 struct can_device_stats can_stats;36373738 struct can_bittiming bittiming, data_bittiming;···4847 u32 ctrlmode_static; /* static enabled options for driver/hardware */49485049 int restart_ms;5151- struct timer_list restart_timer;5050+ struct delayed_work restart_work;52515352 int (*do_set_bittiming)(struct net_device *dev);5453 int (*do_set_data_bittiming)(struct net_device *dev);
···555555556556 atomic_t refcnt;557557558558+ /* How many times this chunk have been sent, for prsctp RTX policy */559559+ int sent_count;560560+558561 /* This is our link to the per-transport transmitted list. */559562 struct list_head transmitted_list;560563···606603607604 /* This needs to be recoverable for SCTP_SEND_FAILED events. */608605 struct sctp_sndrcvinfo sinfo;609609-610610- /* We use this field to record param for prsctp policies,611611- * for TTL policy, it is the time_to_drop of this chunk,612612- * for RTX policy, it is the max_sent_count of this chunk,613613- * for PRIO policy, it is the priority of this chunk.614614- */615615- unsigned long prsctp_param;616616-617617- /* How many times this chunk have been sent, for prsctp RTX policy */618618- int sent_count;619606620607 /* Which association does this belong to? */621608 struct sctp_association *asoc;
···34463446 * Except for the root, subtree_control must be zero for a cgroup34473447 * with tasks so that child cgroups don't compete against tasks.34483448 */34493449- if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {34503450- ret = -EBUSY;34513451- goto out_unlock;34493449+ if (enable && cgroup_parent(cgrp)) {34503450+ struct cgrp_cset_link *link;34513451+34523452+ /*34533453+ * Because namespaces pin csets too, @cgrp->cset_links34543454+ * might not be empty even when @cgrp is empty. Walk and34553455+ * verify each cset.34563456+ */34573457+ spin_lock_irq(&css_set_lock);34583458+34593459+ ret = 0;34603460+ list_for_each_entry(link, &cgrp->cset_links, cset_link) {34613461+ if (css_set_populated(link->cset)) {34623462+ ret = -EBUSY;34633463+ break;34643464+ }34653465+ }34663466+34673467+ spin_unlock_irq(&css_set_lock);34683468+34693469+ if (ret)34703470+ goto out_unlock;34523471 }3453347234543473 /* save and update control masks and prepare csses */···39183899 * cgroup_task_count - count the number of tasks in a cgroup.39193900 * @cgrp: the cgroup in question39203901 *39213921- * Return the number of tasks in the cgroup.39023902+ * Return the number of tasks in the cgroup. The returned number can be39033903+ * higher than the actual number of tasks due to css_set references from39043904+ * namespace roots and temporary usages.39223905 */39233906static int cgroup_task_count(const struct cgroup *cgrp)39243907{
+15-4
kernel/cpuset.c
···325325/*326326 * Return in pmask the portion of a cpusets's cpus_allowed that327327 * are online. If none are online, walk up the cpuset hierarchy328328- * until we find one that does have some online cpus. The top329329- * cpuset always has some cpus online.328328+ * until we find one that does have some online cpus.330329 *331330 * One way or another, we guarantee to return some non-empty subset332331 * of cpu_online_mask.···334335 */335336static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)336337{337337- while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))338338+ while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {338339 cs = parent_cs(cs);340340+ if (unlikely(!cs)) {341341+ /*342342+ * The top cpuset doesn't have any online cpu as a343343+ * consequence of a race between cpuset_hotplug_work344344+ * and cpu hotplug notifier. But we know the top345345+ * cpuset's effective_cpus is on its way to to be346346+ * identical to cpu_online_mask.347347+ */348348+ cpumask_copy(pmask, cpu_online_mask);349349+ return;350350+ }351351+ }339352 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);340353}341354···20852074 * which could have been changed by cpuset just after it inherits the20862075 * state from the parent and before it sits on the cgroup's task list.20872076 */20882088-void cpuset_fork(struct task_struct *task)20772077+static void cpuset_fork(struct task_struct *task)20892078{20902079 if (task_css_is_root(task, cpuset_cgrp_id))20912080 return;
···820820 desc->name = name;821821822822 if (handle != handle_bad_irq && is_chained) {823823+ unsigned int type = irqd_get_trigger_type(&desc->irq_data);824824+823825 /*824826 * We're about to start this interrupt immediately,825827 * hence the need to set the trigger configuration.···830828 * chained interrupt. Reset it immediately because we831829 * do know better.832830 */833833- __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));834834- desc->handle_irq = handle;831831+ if (type != IRQ_TYPE_NONE) {832832+ __irq_set_trigger(desc, type);833833+ desc->handle_irq = handle;834834+ }835835836836 irq_settings_set_noprobe(desc);837837 irq_settings_set_norequest(desc);
+16-13
kernel/trace/trace.c
···51245124 struct trace_iterator *iter = filp->private_data;51255125 ssize_t sret;5126512651275127- /* return any leftover data */51285128- sret = trace_seq_to_user(&iter->seq, ubuf, cnt);51295129- if (sret != -EBUSY)51305130- return sret;51315131-51325132- trace_seq_init(&iter->seq);51335133-51345127 /*51355128 * Avoid more than one consumer on a single file descriptor51365129 * This is just a matter of traces coherency, the ring buffer itself51375130 * is protected.51385131 */51395132 mutex_lock(&iter->mutex);51335133+51345134+ /* return any leftover data */51355135+ sret = trace_seq_to_user(&iter->seq, ubuf, cnt);51365136+ if (sret != -EBUSY)51375137+ goto out;51385138+51395139+ trace_seq_init(&iter->seq);51405140+51405141 if (iter->trace->read) {51415142 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);51425143 if (sret)···61646163 return -EBUSY;61656164#endif6166616561676167- if (splice_grow_spd(pipe, &spd))61686168- return -ENOMEM;61696169-61706166 if (*ppos & (PAGE_SIZE - 1))61716167 return -EINVAL;61726168···61726174 return -EINVAL;61736175 len &= PAGE_MASK;61746176 }61776177+61786178+ if (splice_grow_spd(pipe, &spd))61796179+ return -ENOMEM;6175618061766181 again:61776182 trace_access_lock(iter->cpu_file);···62336232 /* did we read anything? */62346233 if (!spd.nr_pages) {62356234 if (ret)62366236- return ret;62356235+ goto out;6237623662376237+ ret = -EAGAIN;62386238 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))62396239- return -EAGAIN;62396239+ goto out;6240624062416241 ret = wait_on_pipe(iter, true);62426242 if (ret)62436243- return ret;62436243+ goto out;6244624462456245 goto again;62466246 }6247624762486248 ret = splice_to_pipe(pipe, &spd);62496249+out:62496250 splice_shrink_spd(&spd);6250625162516252 return ret;
+1-1
lib/Kconfig.debug
···821821 help822822 Say Y here to enable the kernel to detect "hung tasks",823823 which are bugs that cause the task to be stuck in824824- uninterruptible "D" state indefinitiley.824824+ uninterruptible "D" state indefinitely.825825826826 When a hung task is detected, the kernel will print the827827 current stack trace (which you should report), but the
···110110 * ->tasklist_lock (memory_failure, collect_procs_ao)111111 */112112113113+static int page_cache_tree_insert(struct address_space *mapping,114114+ struct page *page, void **shadowp)115115+{116116+ struct radix_tree_node *node;117117+ void **slot;118118+ int error;119119+120120+ error = __radix_tree_create(&mapping->page_tree, page->index, 0,121121+ &node, &slot);122122+ if (error)123123+ return error;124124+ if (*slot) {125125+ void *p;126126+127127+ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);128128+ if (!radix_tree_exceptional_entry(p))129129+ return -EEXIST;130130+131131+ mapping->nrexceptional--;132132+ if (!dax_mapping(mapping)) {133133+ if (shadowp)134134+ *shadowp = p;135135+ if (node)136136+ workingset_node_shadows_dec(node);137137+ } else {138138+ /* DAX can replace empty locked entry with a hole */139139+ WARN_ON_ONCE(p !=140140+ (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |141141+ RADIX_DAX_ENTRY_LOCK));142142+ /* DAX accounts exceptional entries as normal pages */143143+ if (node)144144+ workingset_node_pages_dec(node);145145+ /* Wakeup waiters for exceptional entry lock */146146+ dax_wake_mapping_entry_waiter(mapping, page->index,147147+ false);148148+ }149149+ }150150+ radix_tree_replace_slot(slot, page);151151+ mapping->nrpages++;152152+ if (node) {153153+ workingset_node_pages_inc(node);154154+ /*155155+ * Don't track node that contains actual pages.156156+ *157157+ * Avoid acquiring the list_lru lock if already158158+ * untracked. The list_empty() test is safe as159159+ * node->private_list is protected by160160+ * mapping->tree_lock.161161+ */162162+ if (!list_empty(&node->private_list))163163+ list_lru_del(&workingset_shadow_nodes,164164+ &node->private_list);165165+ }166166+ return 0;167167+}168168+113169static void page_cache_tree_delete(struct address_space *mapping,114170 struct page *page, void *shadow)115171{···617561618562 spin_lock_irqsave(&mapping->tree_lock, flags);619563 __delete_from_page_cache(old, NULL);620620- error = radix_tree_insert(&mapping->page_tree, offset, new);564564+ error = page_cache_tree_insert(mapping, new, NULL);621565 BUG_ON(error);622566 mapping->nrpages++;623567···639583 return error;640584}641585EXPORT_SYMBOL_GPL(replace_page_cache_page);642642-643643-static int page_cache_tree_insert(struct address_space *mapping,644644- struct page *page, void **shadowp)645645-{646646- struct radix_tree_node *node;647647- void **slot;648648- int error;649649-650650- error = __radix_tree_create(&mapping->page_tree, page->index, 0,651651- &node, &slot);652652- if (error)653653- return error;654654- if (*slot) {655655- void *p;656656-657657- p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);658658- if (!radix_tree_exceptional_entry(p))659659- return -EEXIST;660660-661661- mapping->nrexceptional--;662662- if (!dax_mapping(mapping)) {663663- if (shadowp)664664- *shadowp = p;665665- if (node)666666- workingset_node_shadows_dec(node);667667- } else {668668- /* DAX can replace empty locked entry with a hole */669669- WARN_ON_ONCE(p !=670670- (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |671671- RADIX_DAX_ENTRY_LOCK));672672- /* DAX accounts exceptional entries as normal pages */673673- if (node)674674- workingset_node_pages_dec(node);675675- /* Wakeup waiters for exceptional entry lock */676676- dax_wake_mapping_entry_waiter(mapping, page->index,677677- false);678678- }679679- }680680- radix_tree_replace_slot(slot, page);681681- mapping->nrpages++;682682- if (node) {683683- workingset_node_pages_inc(node);684684- /*685685- * Don't track node that contains actual pages.686686- *687687- * Avoid acquiring the list_lru lock if already688688- * untracked. The list_empty() test is safe as689689- * node->private_list is protected by690690- * mapping->tree_lock.691691- */692692- if (!list_empty(&node->private_list))693693- list_lru_del(&workingset_shadow_nodes,694694- &node->private_list);695695- }696696- return 0;697697-}698586699587static int __add_to_page_cache_locked(struct page *page,700588 struct address_space *mapping,
-3
mm/huge_memory.c
···11381138 bool was_writable;11391139 int flags = 0;1140114011411141- /* A PROT_NONE fault should not end up here */11421142- BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));11431143-11441141 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);11451142 if (unlikely(!pmd_same(pmd, *fe->pmd)))11461143 goto out_unlock;
···33513351 bool was_writable = pte_write(pte);33523352 int flags = 0;3353335333543354- /* A PROT_NONE fault should not end up here */33553355- BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));33563356-33573354 /*33583355 * The "pte" at this point cannot be used safely without33593356 * validation through pte_unmap_same(). It's of NUMA type but···34553458 return VM_FAULT_FALLBACK;34563459}3457346034613461+static inline bool vma_is_accessible(struct vm_area_struct *vma)34623462+{34633463+ return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);34643464+}34653465+34583466/*34593467 * These routines also need to handle stuff like marking pages dirty34603468 * and/or accessed for architectures that don't do it in hardware (most···35263524 if (!pte_present(entry))35273525 return do_swap_page(fe, entry);3528352635293529- if (pte_protnone(entry))35273527+ if (pte_protnone(entry) && vma_is_accessible(fe->vma))35303528 return do_numa_page(fe, entry);3531352935323530 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);···3592359035933591 barrier();35943592 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {35953595- if (pmd_protnone(orig_pmd))35933593+ if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))35963594 return do_huge_pmd_numa_page(&fe, orig_pmd);3597359535983596 if ((fe.flags & FAULT_FLAG_WRITE) &&
+5-5
mm/memory_hotplug.c
···15551555{15561556 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;15571557 int nid = page_to_nid(page);15581558- nodemask_t nmask = node_online_map;15591559- struct page *new_page;15581558+ nodemask_t nmask = node_states[N_MEMORY];15591559+ struct page *new_page = NULL;1560156015611561 /*15621562 * TODO: allocate a destination hugepage from a nearest neighbor node,···15671567 return alloc_huge_page_node(page_hstate(compound_head(page)),15681568 next_node_in(nid, nmask));1569156915701570- if (nid != next_node_in(nid, nmask))15711571- node_clear(nid, nmask);15701570+ node_clear(nid, nmask);1572157115731572 if (PageHighMem(page)15741573 || (zone_idx(page_zone(page)) == ZONE_MOVABLE))15751574 gfp_mask |= __GFP_HIGHMEM;1576157515771577- new_page = __alloc_pages_nodemask(gfp_mask, 0,15761576+ if (!nodes_empty(nmask))15771577+ new_page = __alloc_pages_nodemask(gfp_mask, 0,15781578 node_zonelist(nid, gfp_mask), &nmask);15791579 if (!new_page)15801580 new_page = __alloc_pages(gfp_mask, 0,
···23032303 }23042304}2305230523062306-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH23072307-static void init_tlb_ubc(void)23082308-{23092309- /*23102310- * This deliberately does not clear the cpumask as it's expensive23112311- * and unnecessary. If there happens to be data in there then the23122312- * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and23132313- * then will be cleared.23142314- */23152315- current->tlb_ubc.flush_required = false;23162316-}23172317-#else23182318-static inline void init_tlb_ubc(void)23192319-{23202320-}23212321-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */23222322-23232306/*23242307 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.23252308 */···23372354 */23382355 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&23392356 sc->priority == DEF_PRIORITY);23402340-23412341- init_tlb_ubc();2342235723432358 blk_start_plug(&plug);23442359 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+4-6
mm/workingset.c
···418418 * no pages, so we expect to be able to remove them all and419419 * delete and free the empty node afterwards.420420 */421421-422422- BUG_ON(!node->count);423423- BUG_ON(node->count & RADIX_TREE_COUNT_MASK);421421+ BUG_ON(!workingset_node_shadows(node));422422+ BUG_ON(workingset_node_pages(node));424423425424 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {426425 if (node->slots[i]) {427426 BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));428427 node->slots[i] = NULL;429429- BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));430430- node->count -= 1U << RADIX_TREE_COUNT_SHIFT;428428+ workingset_node_shadows_dec(node);431429 BUG_ON(!mapping->nrexceptional);432430 mapping->nrexceptional--;433431 }434432 }435435- BUG_ON(node->count);433433+ BUG_ON(workingset_node_shadows(node));436434 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);437435 if (!__radix_tree_delete_node(&mapping->page_tree, node))438436 BUG();
···22852285 return 1;22862286}2287228722882288-int ip6mr_get_route(struct net *net,22892289- struct sk_buff *skb, struct rtmsg *rtm, int nowait)22882288+int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,22892289+ int nowait, u32 portid)22902290{22912291 int err;22922292 struct mr6_table *mrt;···23312331 return -ENOMEM;23322332 }2333233323342334+ NETLINK_CB(skb2).portid = portid;23342335 skb_reset_transport_header(skb2);2335233623362337 skb_put(skb2, sizeof(struct ipv6hdr));
+3-1
net/ipv6/route.c
···32163216 if (iif) {32173217#ifdef CONFIG_IPV6_MROUTE32183218 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {32193219- int err = ip6mr_get_route(net, skb, rtm, nowait);32193219+ int err = ip6mr_get_route(net, skb, rtm, nowait,32203220+ portid);32213221+32203222 if (err <= 0) {32213223 if (!nowait) {32223224 if (err == 0)
···192192 msg, msg->expires_at, jiffies);193193 }194194195195+ if (asoc->peer.prsctp_capable &&196196+ SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))197197+ msg->expires_at =198198+ jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);199199+195200 /* This is the biggest possible DATA chunk that can fit into196201 * the packet197202 */···354349/* Check whether this message has expired. */355350int sctp_chunk_abandoned(struct sctp_chunk *chunk)356351{357357- if (!chunk->asoc->prsctp_enable ||352352+ if (!chunk->asoc->peer.prsctp_capable ||358353 !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) {359354 struct sctp_datamsg *msg = chunk->msg;360355···368363 }369364370365 if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&371371- time_after(jiffies, chunk->prsctp_param)) {366366+ time_after(jiffies, chunk->msg->expires_at)) {372367 if (chunk->sent_count)373368 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;374369 else375370 chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;376371 return 1;377372 } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&378378- chunk->sent_count > chunk->prsctp_param) {373373+ chunk->sent_count > chunk->sinfo.sinfo_timetolive) {379374 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;380375 return 1;381376 }
+6-6
net/sctp/outqueue.c
···304304 "illegal chunk");305305306306 sctp_outq_tail_data(q, chunk);307307- if (chunk->asoc->prsctp_enable &&307307+ if (chunk->asoc->peer.prsctp_capable &&308308 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))309309 chunk->asoc->sent_cnt_removable++;310310 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)···354354355355 list_for_each_entry_safe(chk, temp, queue, transmitted_list) {356356 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||357357- chk->prsctp_param <= sinfo->sinfo_timetolive)357357+ chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)358358 continue;359359360360 list_del_init(&chk->transmitted_list);···389389390390 list_for_each_entry_safe(chk, temp, queue, list) {391391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||392392- chk->prsctp_param <= sinfo->sinfo_timetolive)392392+ chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)393393 continue;394394395395 list_del_init(&chk->list);···413413{414414 struct sctp_transport *transport;415415416416- if (!asoc->prsctp_enable || !asoc->sent_cnt_removable)416416+ if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)417417 return;418418419419 msg_len = sctp_prsctp_prune_sent(asoc, sinfo,···1026102610271027 /* Mark as failed send. */10281028 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);10291029- if (asoc->prsctp_enable &&10291029+ if (asoc->peer.prsctp_capable &&10301030 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))10311031 asoc->sent_cnt_removable--;10321032 sctp_chunk_free(chunk);···13191319 tsn = ntohl(tchunk->subh.data_hdr->tsn);13201320 if (TSN_lte(tsn, ctsn)) {13211321 list_del_init(&tchunk->transmitted_list);13221322- if (asoc->prsctp_enable &&13221322+ if (asoc->peer.prsctp_capable &&13231323 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))13241324 asoc->sent_cnt_removable--;13251325 sctp_chunk_free(tchunk);
+40-18
net/sctp/sctp_diag.c
···276276 return err;277277}278278279279-static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)279279+static int sctp_sock_dump(struct sock *sk, void *p)280280{281281- struct sctp_endpoint *ep = tsp->asoc->ep;281281+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;282282 struct sctp_comm_param *commp = p;283283- struct sock *sk = ep->base.sk;284283 struct sk_buff *skb = commp->skb;285284 struct netlink_callback *cb = commp->cb;286285 const struct inet_diag_req_v2 *r = commp->r;287287- struct sctp_association *assoc =288288- list_entry(ep->asocs.next, struct sctp_association, asocs);286286+ struct sctp_association *assoc;289287 int err = 0;290288291291- /* find the ep only once through the transports by this condition */292292- if (tsp->asoc != assoc)293293- goto out;294294-295295- if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)296296- goto out;297297-298289 lock_sock(sk);299299- if (sk != assoc->base.sk)300300- goto release;301290 list_for_each_entry(assoc, &ep->asocs, asocs) {302291 if (cb->args[4] < cb->args[1])303292 goto next;···306317 NLM_F_MULTI, cb->nlh,307318 commp->net_admin) < 0) {308319 cb->args[3] = 1;309309- err = 2;320320+ err = 1;310321 goto release;311322 }312323 cb->args[3] = 1;···316327 NETLINK_CB(cb->skb).portid,317328 cb->nlh->nlmsg_seq, 0, cb->nlh,318329 commp->net_admin) < 0) {319319- err = 2;330330+ err = 1;320331 goto release;321332 }322333next:···328339 cb->args[4] = 0;329340release:330341 release_sock(sk);342342+ sock_put(sk);331343 return err;344344+}345345+346346+static int sctp_get_sock(struct sctp_transport *tsp, void *p)347347+{348348+ struct sctp_endpoint *ep = tsp->asoc->ep;349349+ struct sctp_comm_param *commp = p;350350+ struct sock *sk = ep->base.sk;351351+ struct netlink_callback *cb = commp->cb;352352+ const struct inet_diag_req_v2 *r = commp->r;353353+ struct sctp_association *assoc =354354+ list_entry(ep->asocs.next, struct sctp_association, asocs);355355+356356+ /* find the ep only once through the transports by this condition */357357+ if (tsp->asoc != assoc)358358+ goto out;359359+360360+ if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)361361+ goto out;362362+363363+ sock_hold(sk);364364+ cb->args[5] = (long)sk;365365+366366+ return 1;367367+332368out:333369 cb->args[2]++;334334- return err;370370+ return 0;335371}336372337373static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)···494480 * 2 : to record the transport pos of this time's traversal495481 * 3 : to mark if we have dumped the ep info of the current asoc496482 * 4 : to work as a temporary variable to traversal list483483+ * 5 : to save the sk we get from travelsing the tsp list.497484 */498485 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))499486 goto done;500500- sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);487487+488488+next:489489+ cb->args[5] = 0;490490+ sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);491491+492492+ if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))493493+ goto next;494494+501495done:502496 cb->args[1] = cb->args[4];503497 cb->args[4] = 0;
-15
net/sctp/sm_make_chunk.c
···706706 return retval;707707}708708709709-static void sctp_set_prsctp_policy(struct sctp_chunk *chunk,710710- const struct sctp_sndrcvinfo *sinfo)711711-{712712- if (!chunk->asoc->prsctp_enable)713713- return;714714-715715- if (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))716716- chunk->prsctp_param =717717- jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);718718- else if (SCTP_PR_RTX_ENABLED(sinfo->sinfo_flags) ||719719- SCTP_PR_PRIO_ENABLED(sinfo->sinfo_flags))720720- chunk->prsctp_param = sinfo->sinfo_timetolive;721721-}722722-723709/* Make a DATA chunk for the given association from the provided724710 * parameters. However, do not populate the data payload.725711 */···739753740754 retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);741755 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));742742- sctp_set_prsctp_policy(retval, sinfo);743756744757nodata:745758 return retval;
+7-3
net/sctp/socket.c
···44734473 const union sctp_addr *paddr, void *p)44744474{44754475 struct sctp_transport *transport;44764476- int err = 0;44764476+ int err = -ENOENT;4477447744784478 rcu_read_lock();44794479 transport = sctp_addrs_lookup_transport(net, laddr, paddr);44804480 if (!transport || !sctp_transport_hold(transport))44814481 goto out;44824482- err = cb(transport, p);44824482+44834483+ sctp_association_hold(transport->asoc);44834484 sctp_transport_put(transport);4484448544854485-out:44864486 rcu_read_unlock();44874487+ err = cb(transport, p);44884488+ sctp_association_put(transport->asoc);44894489+44904490+out:44874491 return err;44884492}44894493EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
+3-3
net/vmw_vsock/af_vsock.c
···465465466466 if (vsock_is_pending(sk)) {467467 vsock_remove_pending(listener, sk);468468+469469+ listener->sk_ack_backlog--;468470 } else if (!vsk->rejected) {469471 /* We are not on the pending list and accept() did not reject470472 * us, so we must have been accepted by our user process. We···476474 cleanup = false;477475 goto out;478476 }479479-480480- listener->sk_ack_backlog--;481477482478 /* We need to remove ourself from the global connected sockets list so483479 * incoming packets can't find this socket, and to reduce the reference···2010201020112011MODULE_AUTHOR("VMware, Inc.");20122012MODULE_DESCRIPTION("VMware Virtual Socket Family");20132013-MODULE_VERSION("1.0.1.0-k");20132013+MODULE_VERSION("1.0.2.0-k");20142014MODULE_LICENSE("GPL v2");