Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kvm-arm: Add stage2 page table modifiers

Now that the hyp page table is handled by different set of
routines, rename the original shared routines to stage2 handlers.
Also make explicit use of the stage2 page table helpers.

unmap_range has been merged to existing unmap_stage2_range.

Cc: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>

authored by

Suzuki K Poulose and committed by
Christoffer Dall
7a1c831e 64f32497

+44 -53
+44 -53
arch/arm/kvm/mmu.c
··· 152 152 return p; 153 153 } 154 154 155 - static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) 155 + static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) 156 156 { 157 - pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); 158 - pgd_clear(pgd); 157 + pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL); 158 + stage2_pgd_clear(pgd); 159 159 kvm_tlb_flush_vmid_ipa(kvm, addr); 160 - pud_free(NULL, pud_table); 160 + stage2_pud_free(pud_table); 161 161 put_page(virt_to_page(pgd)); 162 162 } 163 163 164 - static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 164 + static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 165 165 { 166 - pmd_t *pmd_table = pmd_offset(pud, 0); 167 - VM_BUG_ON(pud_huge(*pud)); 168 - pud_clear(pud); 166 + pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0); 167 + VM_BUG_ON(stage2_pud_huge(*pud)); 168 + stage2_pud_clear(pud); 169 169 kvm_tlb_flush_vmid_ipa(kvm, addr); 170 - pmd_free(NULL, pmd_table); 170 + stage2_pmd_free(pmd_table); 171 171 put_page(virt_to_page(pud)); 172 172 } 173 173 174 - static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 174 + static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 175 175 { 176 176 pte_t *pte_table = pte_offset_kernel(pmd, 0); 177 177 VM_BUG_ON(pmd_thp_or_huge(*pmd)); ··· 201 201 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure 202 202 * the IO subsystem will never hit in the cache. 203 203 */ 204 - static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 204 + static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, 205 205 phys_addr_t addr, phys_addr_t end) 206 206 { 207 207 phys_addr_t start_addr = addr; ··· 223 223 } 224 224 } while (pte++, addr += PAGE_SIZE, addr != end); 225 225 226 - if (kvm_pte_table_empty(kvm, start_pte)) 227 - clear_pmd_entry(kvm, pmd, start_addr); 226 + if (stage2_pte_table_empty(start_pte)) 227 + clear_stage2_pmd_entry(kvm, pmd, start_addr); 228 228 } 229 229 230 - static void unmap_pmds(struct kvm *kvm, pud_t *pud, 230 + static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud, 231 231 phys_addr_t addr, phys_addr_t end) 232 232 { 233 233 phys_addr_t next, start_addr = addr; 234 234 pmd_t *pmd, *start_pmd; 235 235 236 - start_pmd = pmd = pmd_offset(pud, addr); 236 + start_pmd = pmd = stage2_pmd_offset(pud, addr); 237 237 do { 238 - next = kvm_pmd_addr_end(addr, end); 238 + next = stage2_pmd_addr_end(addr, end); 239 239 if (!pmd_none(*pmd)) { 240 240 if (pmd_thp_or_huge(*pmd)) { 241 241 pmd_t old_pmd = *pmd; ··· 247 247 248 248 put_page(virt_to_page(pmd)); 249 249 } else { 250 - unmap_ptes(kvm, pmd, addr, next); 250 + unmap_stage2_ptes(kvm, pmd, addr, next); 251 251 } 252 252 } 253 253 } while (pmd++, addr = next, addr != end); 254 254 255 - if (kvm_pmd_table_empty(kvm, start_pmd)) 256 - clear_pud_entry(kvm, pud, start_addr); 255 + if (stage2_pmd_table_empty(start_pmd)) 256 + clear_stage2_pud_entry(kvm, pud, start_addr); 257 257 } 258 258 259 - static void unmap_puds(struct kvm *kvm, pgd_t *pgd, 259 + static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, 260 260 phys_addr_t addr, phys_addr_t end) 261 261 { 262 262 phys_addr_t next, start_addr = addr; 263 263 pud_t *pud, *start_pud; 264 264 265 - start_pud = pud = pud_offset(pgd, addr); 265 + start_pud = pud = stage2_pud_offset(pgd, addr); 266 266 do { 267 - next = kvm_pud_addr_end(addr, end); 268 - if (!pud_none(*pud)) { 269 - if (pud_huge(*pud)) { 267 + next = stage2_pud_addr_end(addr, end); 268 + if (!stage2_pud_none(*pud)) { 269 + if (stage2_pud_huge(*pud)) { 270 270 pud_t old_pud = *pud; 271 271 272 - pud_clear(pud); 272 + stage2_pud_clear(pud); 273 273 kvm_tlb_flush_vmid_ipa(kvm, addr); 274 - 275 274 kvm_flush_dcache_pud(old_pud); 276 - 277 275 put_page(virt_to_page(pud)); 278 276 } else { 279 - unmap_pmds(kvm, pud, addr, next); 277 + unmap_stage2_pmds(kvm, pud, addr, next); 280 278 } 281 279 } 282 280 } while (pud++, addr = next, addr != end); 283 281 284 - if (kvm_pud_table_empty(kvm, start_pud)) 285 - clear_pgd_entry(kvm, pgd, start_addr); 282 + if (stage2_pud_table_empty(start_pud)) 283 + clear_stage2_pgd_entry(kvm, pgd, start_addr); 286 284 } 287 285 288 - 289 - static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 290 - phys_addr_t start, u64 size) 286 + /** 287 + * unmap_stage2_range -- Clear stage2 page table entries to unmap a range 288 + * @kvm: The VM pointer 289 + * @start: The intermediate physical base address of the range to unmap 290 + * @size: The size of the area to unmap 291 + * 292 + * Clear a range of stage-2 mappings, lowering the various ref-counts. Must 293 + * be called while holding mmu_lock (unless for freeing the stage2 pgd before 294 + * destroying the VM), otherwise another faulting VCPU may come in and mess 295 + * with things behind our backs. 296 + */ 297 + static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) 291 298 { 292 299 pgd_t *pgd; 293 300 phys_addr_t addr = start, end = start + size; 294 301 phys_addr_t next; 295 302 296 - pgd = pgdp + kvm_pgd_index(addr); 303 + pgd = kvm->arch.pgd + stage2_pgd_index(addr); 297 304 do { 298 - next = kvm_pgd_addr_end(addr, end); 299 - if (!pgd_none(*pgd)) 300 - unmap_puds(kvm, pgd, addr, next); 305 + next = stage2_pgd_addr_end(addr, end); 306 + if (!stage2_pgd_none(*pgd)) 307 + unmap_stage2_puds(kvm, pgd, addr, next); 301 308 } while (pgd++, addr = next, addr != end); 302 309 } 303 310 ··· 797 790 kvm_clean_pgd(pgd); 798 791 kvm->arch.pgd = pgd; 799 792 return 0; 800 - } 801 - 802 - /** 803 - * unmap_stage2_range -- Clear stage2 page table entries to unmap a range 804 - * @kvm: The VM pointer 805 - * @start: The intermediate physical base address of the range to unmap 806 - * @size: The size of the area to unmap 807 - * 808 - * Clear a range of stage-2 mappings, lowering the various ref-counts. Must 809 - * be called while holding mmu_lock (unless for freeing the stage2 pgd before 810 - * destroying the VM), otherwise another faulting VCPU may come in and mess 811 - * with things behind our backs. 812 - */ 813 - static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) 814 - { 815 - unmap_range(kvm, kvm->arch.pgd, start, size); 816 793 } 817 794 818 795 static void stage2_unmap_memslot(struct kvm *kvm,