Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm/arm64: Get rid of KERN_TO_HYP

We have both KERN_TO_HYP and kern_hyp_va, which do the exact same
thing. Let's standardize on the latter.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Marc Zyngier and committed by
Christoffer Dall
6c41a413 eac378a9

+10 -13
-2
arch/arm/include/asm/kvm_hyp.h
··· 25 25 26 26 #define __hyp_text __section(.hyp.text) notrace 27 27 28 - #define kern_hyp_va(v) (v) 29 - 30 28 #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ 31 29 "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 32 30 #define __ACCESS_CP15_64(Op1, CRm) \
+1 -1
arch/arm/include/asm/kvm_mmu.h
··· 26 26 * We directly use the kernel VA for the HYP, as we can directly share 27 27 * the mapping (HTTBR "covers" TTBR1). 28 28 */ 29 - #define KERN_TO_HYP(kva) (kva) 29 + #define kern_hyp_va(kva) (kva) 30 30 31 31 /* 32 32 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
+9 -9
arch/arm/kvm/mmu.c
··· 506 506 if (hyp_pgd) { 507 507 unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); 508 508 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) 509 - unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 509 + unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); 510 510 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 511 - unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 511 + unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); 512 512 513 513 free_pages((unsigned long)hyp_pgd, hyp_pgd_order); 514 514 hyp_pgd = NULL; ··· 670 670 { 671 671 phys_addr_t phys_addr; 672 672 unsigned long virt_addr; 673 - unsigned long start = KERN_TO_HYP((unsigned long)from); 674 - unsigned long end = KERN_TO_HYP((unsigned long)to); 673 + unsigned long start = kern_hyp_va((unsigned long)from); 674 + unsigned long end = kern_hyp_va((unsigned long)to); 675 675 676 676 if (is_kernel_in_hyp_mode()) 677 677 return 0; ··· 705 705 */ 706 706 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) 707 707 { 708 - unsigned long start = KERN_TO_HYP((unsigned long)from); 709 - unsigned long end = KERN_TO_HYP((unsigned long)to); 708 + unsigned long start = kern_hyp_va((unsigned long)from); 709 + unsigned long end = kern_hyp_va((unsigned long)to); 710 710 711 711 if (is_kernel_in_hyp_mode()) 712 712 return 0; ··· 1711 1711 1712 1712 kvm_info("IDMAP page: %lx\n", hyp_idmap_start); 1713 1713 kvm_info("HYP VA range: %lx:%lx\n", 1714 - KERN_TO_HYP(PAGE_OFFSET), KERN_TO_HYP(~0UL)); 1714 + kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); 1715 1715 1716 - if (hyp_idmap_start >= KERN_TO_HYP(PAGE_OFFSET) && 1717 - hyp_idmap_start < KERN_TO_HYP(~0UL)) { 1716 + if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && 1717 + hyp_idmap_start < kern_hyp_va(~0UL)) { 1718 1718 /* 1719 1719 * The idmap page is intersecting with the VA space, 1720 1720 * it is not safe to continue further.
-1
arch/arm64/include/asm/kvm_mmu.h
··· 133 133 } 134 134 135 135 #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) 136 - #define KERN_TO_HYP(v) kern_hyp_va(v) 137 136 138 137 /* 139 138 * We currently only support a 40bit IPA.