Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.18 149 lines 4.8 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _S390_TLB_H 3#define _S390_TLB_H 4 5/* 6 * TLB flushing on s390 is complicated. The following requirement 7 * from the principles of operation is the most arduous: 8 * 9 * "A valid table entry must not be changed while it is attached 10 * to any CPU and may be used for translation by that CPU except to 11 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY, 12 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page 13 * table entry, or (3) make a change by means of a COMPARE AND SWAP 14 * AND PURGE instruction that purges the TLB." 15 * 16 * The modification of a pte of an active mm struct therefore is 17 * a two step process: i) invalidate the pte, ii) store the new pte. 18 * This is true for the page protection bit as well. 19 * The only possible optimization is to flush at the beginning of 20 * a tlb_gather_mmu cycle if the mm_struct is currently not in use. 21 * 22 * Pages used for the page tables is a different story. FIXME: more 23 */ 24 25static inline void tlb_flush(struct mmu_gather *tlb); 26static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, 27 struct page *page, bool delay_rmap, int page_size); 28static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, 29 struct page *page, unsigned int nr_pages, bool delay_rmap); 30 31#define tlb_flush tlb_flush 32#define pte_free_tlb pte_free_tlb 33#define pmd_free_tlb pmd_free_tlb 34#define p4d_free_tlb p4d_free_tlb 35#define pud_free_tlb pud_free_tlb 36 37#include <asm/tlbflush.h> 38#include <asm-generic/tlb.h> 39#include <asm/gmap.h> 40 41/* 42 * Release the page cache reference for a pte removed by 43 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page 44 * has already been freed, so just do free_folio_and_swap_cache. 45 * 46 * s390 doesn't delay rmap removal. 47 */ 48static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, 49 struct page *page, bool delay_rmap, int page_size) 50{ 51 VM_WARN_ON_ONCE(delay_rmap); 52 53 free_folio_and_swap_cache(page_folio(page)); 54 return false; 55} 56 57static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, 58 struct page *page, unsigned int nr_pages, bool delay_rmap) 59{ 60 struct encoded_page *encoded_pages[] = { 61 encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT), 62 encode_nr_pages(nr_pages), 63 }; 64 65 VM_WARN_ON_ONCE(delay_rmap); 66 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); 67 68 free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages)); 69 return false; 70} 71 72static inline void tlb_flush(struct mmu_gather *tlb) 73{ 74 __tlb_flush_mm_lazy(tlb->mm); 75} 76 77/* 78 * pte_free_tlb frees a pte table and clears the CRSTE for the 79 * page table from the tlb. 80 */ 81static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 82 unsigned long address) 83{ 84 __tlb_adjust_range(tlb, address, PAGE_SIZE); 85 tlb->mm->context.flush_mm = 1; 86 tlb->freed_tables = 1; 87 tlb->cleared_pmds = 1; 88 if (mm_has_pgste(tlb->mm)) 89 gmap_unlink(tlb->mm, (unsigned long *)pte, address); 90 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte)); 91} 92 93/* 94 * pmd_free_tlb frees a pmd table and clears the CRSTE for the 95 * segment table entry from the tlb. 96 * If the mm uses a two level page table the single pmd is freed 97 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB 98 * to avoid the double free of the pmd in this case. 99 */ 100static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 101 unsigned long address) 102{ 103 if (mm_pmd_folded(tlb->mm)) 104 return; 105 __tlb_adjust_range(tlb, address, PAGE_SIZE); 106 tlb->mm->context.flush_mm = 1; 107 tlb->freed_tables = 1; 108 tlb->cleared_puds = 1; 109 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); 110} 111 112/* 113 * p4d_free_tlb frees a pud table and clears the CRSTE for the 114 * region second table entry from the tlb. 115 * If the mm uses a four level page table the single p4d is freed 116 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB 117 * to avoid the double free of the p4d in this case. 118 */ 119static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 120 unsigned long address) 121{ 122 if (mm_p4d_folded(tlb->mm)) 123 return; 124 __tlb_adjust_range(tlb, address, PAGE_SIZE); 125 tlb->mm->context.flush_mm = 1; 126 tlb->freed_tables = 1; 127 tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); 128} 129 130/* 131 * pud_free_tlb frees a pud table and clears the CRSTE for the 132 * region third table entry from the tlb. 133 * If the mm uses a three level page table the single pud is freed 134 * as the pgd. pud_free_tlb checks the asce_limit against 4TB 135 * to avoid the double free of the pud in this case. 136 */ 137static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 138 unsigned long address) 139{ 140 if (mm_pud_folded(tlb->mm)) 141 return; 142 __tlb_adjust_range(tlb, address, PAGE_SIZE); 143 tlb->mm->context.flush_mm = 1; 144 tlb->freed_tables = 1; 145 tlb->cleared_p4ds = 1; 146 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); 147} 148 149#endif /* _S390_TLB_H */