Merge our fixes branch into next, this brings in a number of commits that fix bugs we don't want to hit in next, in particular the fix for CVE-2019-12817.
···884884 return false;885885}886886887887+static inline int pmd_is_serializing(pmd_t pmd)888888+{889889+ /*890890+ * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear891891+ * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).892892+ *893893+ * This condition may also occur when flushing a pmd while flushing894894+ * it (see ptep_modify_prot_start), so callers must ensure this895895+ * case is fine as well.896896+ */897897+ if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==898898+ cpu_to_be64(_PAGE_INVALID))899899+ return true;900900+901901+ return false;902902+}903903+887904static inline int pmd_bad(pmd_t pmd)888905{889906 if (radix_enabled())···11171100#define pmd_access_permitted pmd_access_permitted11181101static inline bool pmd_access_permitted(pmd_t pmd, bool write)11191102{11031103+ /*11041104+ * pmdp_invalidate sets this combination (which is not caught by11051105+ * !pte_present() check in pte_access_permitted), to prevent11061106+ * lock-free lookups, as part of the serialize_against_pte_lookup()11071107+ * synchronisation.11081108+ *11091109+ * This also catches the case where the PTE's hardware PRESENT bit is11101110+ * cleared while TLB is flushed, which is suboptimal but should not11111111+ * be frequent.11121112+ */11131113+ if (pmd_is_serializing(pmd))11141114+ return false;11151115+11201116 return pte_access_permitted(pmd_pte(pmd), write);11211117}11221118
+4
arch/powerpc/include/asm/btext.h
···1313 int depth, int pitch);1414extern void btext_setup_display(int width, int height, int depth, int pitch,1515 unsigned long address);1616+#ifdef CONFIG_PPC321617extern void btext_prepare_BAT(void);1818+#else1919+static inline void btext_prepare_BAT(void) { }2020+#endif1721extern void btext_map(void);1822extern void btext_unmap(void);1923
+3
arch/powerpc/include/asm/kexec.h
···9494 return crashing_cpu >= 0;9595}96969797+void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,9898+ unsigned long start_address) __noreturn;9999+97100#ifdef CONFIG_KEXEC_FILE98101extern const struct kexec_file_ops kexec_elf64_ops;99102
+7
arch/powerpc/include/asm/page.h
···323323#endif /* __ASSEMBLY__ */324324#include <asm/slice.h>325325326326+/*327327+ * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.328328+ */329329+#ifdef CONFIG_PPC32330330+#define ARCH_ZONE_DMA_BITS 30331331+#else326332#define ARCH_ZONE_DMA_BITS 31333333+#endif327334328335#endif /* _ASM_POWERPC_PAGE_H */
+1-1
arch/powerpc/kernel/exceptions-64s.S
···315315 mfspr r11,SPRN_DSISR /* Save DSISR */316316 std r11,_DSISR(r1)317317 std r9,_CCR(r1) /* Save CR in stackframe */318318- kuap_save_amr_and_lock r9, r10, cr1318318+ /* We don't touch AMR here, we never go to virtual mode */319319 /* Save r9 through r13 from EXMC save area to stack frame. */320320 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)321321 mfmsr r11 /* get MSR value */
···25072507 LOAD_REG_ADDR(r11, dawr_force_enable)25082508 lbz r11, 0(r11)25092509 cmpdi r11, 025102510+ bne 3f25102511 li r3, H_HARDWARE25112511- beqlr25122512+ blr25132513+3:25122514 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */25132515 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW25142516 rlwimi r5, r4, 2, DAWRX_WT25152517 clrrdi r4, r4, 325162518 std r4, VCPU_DAWR(r3)25172519 std r5, VCPU_DAWRX(r3)25202520+ /*25212521+ * If came in through the real mode hcall handler then it is necessary25222522+ * to write the registers since the return path won't. Otherwise it is25232523+ * sufficient to store then in the vcpu struct as they will be loaded25242524+ * next time the vcpu is run.25252525+ */25262526+ mfmsr r625272527+ andi. r6, r6, MSR_DR /* in real mode? */25282528+ bne 4f25182529 mtspr SPRN_DAWR, r425192530 mtspr SPRN_DAWRX, r525202520- li r3, 025312531+4: li r3, 025212532 blr2522253325232534_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
+47-8
arch/powerpc/mm/book3s64/mmu_context.c
···55555656void slb_setup_new_exec(void);57575858+static int realloc_context_ids(mm_context_t *ctx)5959+{6060+ int i, id;6161+6262+ /*6363+ * id 0 (aka. ctx->id) is special, we always allocate a new one, even if6464+ * there wasn't one allocated previously (which happens in the exec6565+ * case where ctx is newly allocated).6666+ *6767+ * We have to be a bit careful here. We must keep the existing ids in6868+ * the array, so that we can test if they're non-zero to decide if we6969+ * need to allocate a new one. However in case of error we must free the7070+ * ids we've allocated but *not* any of the existing ones (or risk a7171+ * UAF). That's why we decrement i at the start of the error handling7272+ * loop, to skip the id that we just tested but couldn't reallocate.7373+ */7474+ for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {7575+ if (i == 0 || ctx->extended_id[i]) {7676+ id = hash__alloc_context_id();7777+ if (id < 0)7878+ goto error;7979+8080+ ctx->extended_id[i] = id;8181+ }8282+ }8383+8484+ /* The caller expects us to return id */8585+ return ctx->id;8686+8787+error:8888+ for (i--; i >= 0; i--) {8989+ if (ctx->extended_id[i])9090+ ida_free(&mmu_context_ida, ctx->extended_id[i]);9191+ }9292+9393+ return id;9494+}9595+5896static int hash__init_new_context(struct mm_struct *mm)5997{6098 int index;61996262- index = hash__alloc_context_id();6363- if (index < 0)6464- return index;6565-66100 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),67101 GFP_KERNEL);6868- if (!mm->context.hash_context) {6969- ida_free(&mmu_context_ida, index);102102+ if (!mm->context.hash_context)70103 return -ENOMEM;7171- }7210473105 /*74106 * The old code would re-promote on fork, we don't do that when using···12896 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),12997 GFP_KERNEL);13098 if (!mm->context.hash_context->spt) {131131- ida_free(&mmu_context_ida, index);13299 kfree(mm->context.hash_context);133100 return -ENOMEM;134101 }135102 }136103#endif104104+ }137105106106+ index = realloc_context_ids(&mm->context);107107+ if (index < 0) {108108+#ifdef CONFIG_PPC_SUBPAGE_PROT109109+ kfree(mm->context.hash_context->spt);110110+#endif111111+ kfree(mm->context.hash_context);112112+ return index;138113 }139114140115 pkey_mm_init(mm);
+3
arch/powerpc/mm/book3s64/pgtable.c
···116116 /*117117 * This ensures that generic code that rely on IRQ disabling118118 * to prevent a parallel THP split work as expected.119119+ *120120+ * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires121121+ * a special case check in pmd_access_permitted.119122 */120123 serialize_against_pte_lookup(vma->vm_mm);121124 return __pmd(old_pmd);
···372372 pdshift = PMD_SHIFT;373373 pmdp = pmd_offset(&pud, ea);374374 pmd = READ_ONCE(*pmdp);375375+375376 /*376376- * A hugepage collapse is captured by pmd_none, because377377- * it mark the pmd none and do a hpte invalidate.377377+ * A hugepage collapse is captured by this condition, see378378+ * pmdp_collapse_flush.378379 */379380 if (pmd_none(pmd))380381 return NULL;382382+383383+#ifdef CONFIG_PPC_BOOK3S_64384384+ /*385385+ * A hugepage split is captured by this condition, see386386+ * pmdp_invalidate.387387+ *388388+ * Huge page modification can be caught here too.389389+ */390390+ if (pmd_is_serializing(pmd))391391+ return NULL;392392+#endif381393382394 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {383395 if (is_thp)
+1
arch/powerpc/platforms/powermac/Kconfig
···77 select PPC_INDIRECT_PCI if PPC3288 select PPC_MPC106 if PPC3299 select PPC_NATIVE1010+ select ZONE_DMA if PPC321011 default y11121213config PPC_PMAC64