Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/mm: force swiotlb for protected virtualization

On s390, protected virtualization guests have to use bounced I/O
buffers. That requires some plumbing.

Let us make sure, any device that uses DMA API with direct ops correctly
is spared from the problems, that a hypervisor attempting I/O to a
non-shared page would bring.

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
Tested-by: Michael Mueller <mimu@linux.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>

authored by

Halil Pasic and committed by
Heiko Carstens
64e1f0c5 45488c48

+68
+4
arch/s390/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 + config ARCH_HAS_MEM_ENCRYPT 3 + def_bool y 4 + 2 5 config MMU 3 6 def_bool y 4 7 ··· 189 186 select VIRT_CPU_ACCOUNTING 190 187 select ARCH_HAS_SCALED_CPUTIME 191 188 select HAVE_NMI 189 + select SWIOTLB 192 190 193 191 194 192 config SCHED_OMIT_FRAME_POINTER
+17
arch/s390/include/asm/mem_encrypt.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef S390_MEM_ENCRYPT_H__ 3 + #define S390_MEM_ENCRYPT_H__ 4 + 5 + #ifndef __ASSEMBLY__ 6 + 7 + #define sme_me_mask 0ULL 8 + 9 + static inline bool sme_active(void) { return false; } 10 + extern bool sev_active(void); 11 + 12 + int set_memory_encrypted(unsigned long addr, int numpages); 13 + int set_memory_decrypted(unsigned long addr, int numpages); 14 + 15 + #endif /* __ASSEMBLY__ */ 16 + 17 + #endif /* S390_MEM_ENCRYPT_H__ */
+47
arch/s390/mm/init.c
··· 18 18 #include <linux/mman.h> 19 19 #include <linux/mm.h> 20 20 #include <linux/swap.h> 21 + #include <linux/swiotlb.h> 21 22 #include <linux/smp.h> 22 23 #include <linux/init.h> 23 24 #include <linux/pagemap.h> ··· 30 29 #include <linux/export.h> 31 30 #include <linux/cma.h> 32 31 #include <linux/gfp.h> 32 + #include <linux/dma-mapping.h> 33 33 #include <asm/processor.h> 34 34 #include <linux/uaccess.h> 35 35 #include <asm/pgtable.h> ··· 44 42 #include <asm/sclp.h> 45 43 #include <asm/set_memory.h> 46 44 #include <asm/kasan.h> 45 + #include <asm/dma-mapping.h> 46 + #include <asm/uv.h> 47 47 48 48 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); 49 49 ··· 132 128 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); 133 129 } 134 130 131 + int set_memory_encrypted(unsigned long addr, int numpages) 132 + { 133 + int i; 134 + 135 + /* make specified pages unshared, (swiotlb, dma_free) */ 136 + for (i = 0; i < numpages; ++i) { 137 + uv_remove_shared(addr); 138 + addr += PAGE_SIZE; 139 + } 140 + return 0; 141 + } 142 + 143 + int set_memory_decrypted(unsigned long addr, int numpages) 144 + { 145 + int i; 146 + /* make specified pages shared (swiotlb, dma_alloca) */ 147 + for (i = 0; i < numpages; ++i) { 148 + uv_set_shared(addr); 149 + addr += PAGE_SIZE; 150 + } 151 + return 0; 152 + } 153 + 154 + /* are we a protected virtualization guest? */ 155 + bool sev_active(void) 156 + { 157 + return is_prot_virt_guest(); 158 + } 159 + 160 + /* protected virtualization */ 161 + static void pv_init(void) 162 + { 163 + if (!is_prot_virt_guest()) 164 + return; 165 + 166 + /* make sure bounce buffers are shared */ 167 + swiotlb_init(1); 168 + swiotlb_update_mem_attributes(); 169 + swiotlb_force = SWIOTLB_FORCE; 170 + } 171 + 135 172 void __init mem_init(void) 136 173 { 137 174 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); ··· 180 135 181 136 set_max_mapnr(max_low_pfn); 182 137 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 138 + 139 + pv_init(); 183 140 184 141 /* Setup guest page hinting */ 185 142 cmma_init();