Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] zero page cache synonyms

If the zero page is mapped to virtual user space addresses that differ
only in bit 2^12 or 2^13 we get L1 cache synonyms which can affect
performance. Follow the mips model and use multiple zero pages to avoid
the synonyms.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Martin Schwidefsky and committed by
Martin Schwidefsky
238ec4ef 229aebb8

+64 -7
+19 -3
arch/s390/include/asm/pgtable.h
··· 46 46 #define update_mmu_cache(vma, address, ptep) do { } while (0) 47 47 48 48 /* 49 - * ZERO_PAGE is a global shared page that is always zero: used 49 + * ZERO_PAGE is a global shared page that is always zero; used 50 50 * for zero-mapped memory areas etc.. 51 51 */ 52 - extern char empty_zero_page[PAGE_SIZE]; 53 - #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 52 + 53 + extern unsigned long empty_zero_page; 54 + extern unsigned long zero_page_mask; 55 + 56 + #define ZERO_PAGE(vaddr) \ 57 + (virt_to_page((void *)(empty_zero_page + \ 58 + (((unsigned long)(vaddr)) &zero_page_mask)))) 59 + 60 + #define is_zero_pfn is_zero_pfn 61 + static inline int is_zero_pfn(unsigned long pfn) 62 + { 63 + extern unsigned long zero_pfn; 64 + unsigned long offset_from_zero_pfn = pfn - zero_pfn; 65 + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 66 + } 67 + 68 + #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 69 + 54 70 #endif /* !__ASSEMBLY__ */ 55 71 56 72 /*
+45 -4
arch/s390/mm/init.c
··· 42 42 43 43 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); 44 44 45 - char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 45 + unsigned long empty_zero_page, zero_page_mask; 46 46 EXPORT_SYMBOL(empty_zero_page); 47 + 48 + static unsigned long setup_zero_pages(void) 49 + { 50 + struct cpuid cpu_id; 51 + unsigned int order; 52 + unsigned long size; 53 + struct page *page; 54 + int i; 55 + 56 + get_cpu_id(&cpu_id); 57 + switch (cpu_id.machine) { 58 + case 0x9672: /* g5 */ 59 + case 0x2064: /* z900 */ 60 + case 0x2066: /* z900 */ 61 + case 0x2084: /* z990 */ 62 + case 0x2086: /* z990 */ 63 + case 0x2094: /* z9-109 */ 64 + case 0x2096: /* z9-109 */ 65 + order = 0; 66 + break; 67 + case 0x2097: /* z10 */ 68 + case 0x2098: /* z10 */ 69 + default: 70 + order = 2; 71 + break; 72 + } 73 + 74 + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 75 + if (!empty_zero_page) 76 + panic("Out of memory in setup_zero_pages"); 77 + 78 + page = virt_to_page((void *) empty_zero_page); 79 + split_page(page, order); 80 + for (i = 1 << order; i > 0; i--) { 81 + SetPageReserved(page); 82 + page++; 83 + } 84 + 85 + size = PAGE_SIZE << order; 86 + zero_page_mask = (size - 1) & PAGE_MASK; 87 + 88 + return 1UL << order; 89 + } 47 90 48 91 /* 49 92 * paging_init() sets up the page tables ··· 135 92 max_mapnr = num_physpages = max_low_pfn; 136 93 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 137 94 138 - /* clear the zero-page */ 139 - memset(empty_zero_page, 0, PAGE_SIZE); 140 - 141 95 /* Setup guest page hinting */ 142 96 cmma_init(); 143 97 144 98 /* this will put all low memory onto the freelists */ 145 99 totalram_pages += free_all_bootmem(); 100 + totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 146 101 147 102 reservedpages = 0; 148 103