Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] fix tlb flushing for page table pages

Git commit 36409f6353fc2d7b6516e631415f938eadd92ffa "use generic RCU
page-table freeing code" introduced a tlb flushing bug. Partially revert
the above git commit and go back to s390 specific page table flush code.

For s390 the TLB can contain three types of entries, "normal" TLB
page-table entries, TLB combined region-and-segment-table (CRST) entries
and real-space entries. Linux does not use real-space entries which
leaves normal TLB entries and CRST entries. The CRST entries are
intermediate steps in the page-table translation called translation paths.
For example a 4K page access in a three-level page table setup will
create two CRST TLB entries and one page-table TLB entry. The advantage
of that approach is that a page access next to the previous one can reuse
the CRST entries and needs just a single read from memory to create the
page-table TLB entry. The disadvantage is that the TLB flushing rules are
more complicated, before any page-table may be freed the TLB needs to be
flushed.

In short: the generic RCU page-table freeing code is incorrect for the
CRST entries, in particular the check for mm_users < 2 is troublesome.

This is applicable to 3.0+ kernels.

Cc: <stable@vger.kernel.org>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+61 -28
-1
arch/s390/Kconfig
··· 90 90 select HAVE_KERNEL_XZ 91 91 select HAVE_ARCH_MUTEX_CPU_RELAX 92 92 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 93 - select HAVE_RCU_TABLE_FREE if SMP 94 93 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 94 select HAVE_MEMBLOCK 96 95 select HAVE_MEMBLOCK_NODE_MAP
-3
arch/s390/include/asm/pgalloc.h
··· 22 22 23 23 unsigned long *page_table_alloc(struct mm_struct *, unsigned long); 24 24 void page_table_free(struct mm_struct *, unsigned long *); 25 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 26 25 void page_table_free_rcu(struct mmu_gather *, unsigned long *); 27 - void __tlb_remove_table(void *_table); 28 - #endif 29 26 30 27 static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 31 28 {
+1 -21
arch/s390/include/asm/tlb.h
··· 30 30 31 31 struct mmu_gather { 32 32 struct mm_struct *mm; 33 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 34 33 struct mmu_table_batch *batch; 35 - #endif 36 34 unsigned int fullmm; 37 - unsigned int need_flush; 38 35 }; 39 36 40 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 41 37 struct mmu_table_batch { 42 38 struct rcu_head rcu; 43 39 unsigned int nr; ··· 45 49 46 50 extern void tlb_table_flush(struct mmu_gather *tlb); 47 51 extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 48 - #endif 49 52 50 53 static inline void tlb_gather_mmu(struct mmu_gather *tlb, 51 54 struct mm_struct *mm, ··· 52 57 { 53 58 tlb->mm = mm; 54 59 tlb->fullmm = full_mm_flush; 55 - tlb->need_flush = 0; 56 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 57 60 tlb->batch = NULL; 58 - #endif 59 61 if (tlb->fullmm) 60 62 __tlb_flush_mm(mm); 61 63 } 62 64 63 65 static inline void tlb_flush_mmu(struct mmu_gather *tlb) 64 66 { 65 - if (!tlb->need_flush) 66 - return; 67 - tlb->need_flush = 0; 68 - __tlb_flush_mm(tlb->mm); 69 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 70 67 tlb_table_flush(tlb); 71 - #endif 72 68 } 73 69 74 70 static inline void tlb_finish_mmu(struct mmu_gather *tlb, 75 71 unsigned long start, unsigned long end) 76 72 { 77 - tlb_flush_mmu(tlb); 73 + tlb_table_flush(tlb); 78 74 } 79 75 80 76 /* ··· 91 105 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 92 106 unsigned long address) 93 107 { 94 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 95 108 if (!tlb->fullmm) 96 109 return page_table_free_rcu(tlb, (unsigned long *) pte); 97 - #endif 98 110 page_table_free(tlb->mm, (unsigned long *) pte); 99 111 } 100 112 ··· 109 125 #ifdef __s390x__ 110 126 if (tlb->mm->context.asce_limit <= (1UL << 31)) 111 127 return; 112 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 113 128 if (!tlb->fullmm) 114 129 return tlb_remove_table(tlb, pmd); 115 - #endif 116 130 crst_table_free(tlb->mm, (unsigned long *) pmd); 117 131 #endif 118 132 } ··· 128 146 #ifdef __s390x__ 129 147 if (tlb->mm->context.asce_limit <= (1UL << 42)) 130 148 return; 131 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 132 149 if (!tlb->fullmm) 133 150 return tlb_remove_table(tlb, pud); 134 - #endif 135 151 crst_table_free(tlb->mm, (unsigned long *) pud); 136 152 #endif 137 153 }
+60 -3
arch/s390/mm/pgtable.c
··· 678 678 } 679 679 } 680 680 681 - #ifdef CONFIG_HAVE_RCU_TABLE_FREE 682 - 683 681 static void __page_table_free_rcu(void *table, unsigned bit) 684 682 { 685 683 struct page *page; ··· 731 733 free_pages((unsigned long) table, ALLOC_ORDER); 732 734 } 733 735 734 - #endif 736 + static void tlb_remove_table_smp_sync(void *arg) 737 + { 738 + /* Simply deliver the interrupt */ 739 + } 740 + 741 + static void tlb_remove_table_one(void *table) 742 + { 743 + /* 744 + * This isn't an RCU grace period and hence the page-tables cannot be 745 + * assumed to be actually RCU-freed. 746 + * 747 + * It is however sufficient for software page-table walkers that rely 748 + * on IRQ disabling. See the comment near struct mmu_table_batch. 749 + */ 750 + smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 751 + __tlb_remove_table(table); 752 + } 753 + 754 + static void tlb_remove_table_rcu(struct rcu_head *head) 755 + { 756 + struct mmu_table_batch *batch; 757 + int i; 758 + 759 + batch = container_of(head, struct mmu_table_batch, rcu); 760 + 761 + for (i = 0; i < batch->nr; i++) 762 + __tlb_remove_table(batch->tables[i]); 763 + 764 + free_page((unsigned long)batch); 765 + } 766 + 767 + void tlb_table_flush(struct mmu_gather *tlb) 768 + { 769 + struct mmu_table_batch **batch = &tlb->batch; 770 + 771 + if (*batch) { 772 + __tlb_flush_mm(tlb->mm); 773 + call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 774 + *batch = NULL; 775 + } 776 + } 777 + 778 + void tlb_remove_table(struct mmu_gather *tlb, void *table) 779 + { 780 + struct mmu_table_batch **batch = &tlb->batch; 781 + 782 + if (*batch == NULL) { 783 + *batch = (struct mmu_table_batch *) 784 + __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 785 + if (*batch == NULL) { 786 + __tlb_flush_mm(tlb->mm); 787 + tlb_remove_table_one(table); 788 + return; 789 + } 790 + (*batch)->nr = 0; 791 + } 792 + (*batch)->tables[(*batch)->nr++] = table; 793 + if ((*batch)->nr == MAX_TABLE_BATCH) 794 + tlb_table_flush(tlb); 795 + } 735 796 736 797 /* 737 798 * switch on pgstes for its userspace process (for kvm)