Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: get rid of kmap_high_l1_vipt()

Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is no longer
necessary to carry an ad hoc version of kmap_atomic() added in commit
7e5a69e83b "ARM: 6007/1: fix highmem with VIPT cache and DMA" to cope
with reentrancy.

In fact, it is now actively wrong to rely on fixed kmap type indices
(namely KM_L1_CACHE) as kmap_atomic() totally ignores them now and a
concurrent instance of it may reuse any slot for any purpose.

Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>

authored by

Nicolas Pitre and committed by
Nicolas Pitre
39af22a7 b0c3844d

+8 -96
-3
arch/arm/include/asm/highmem.h
··· 25 25 extern void *kmap_high_get(struct page *page); 26 26 extern void kunmap_high(struct page *page); 27 27 28 - extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); 29 - extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); 30 - 31 28 /* 32 29 * The following functions are already defined by <linux/highmem.h> 33 30 * when CONFIG_HIGHMEM is not set.
+4 -3
arch/arm/mm/dma-mapping.c
··· 17 17 #include <linux/init.h> 18 18 #include <linux/device.h> 19 19 #include <linux/dma-mapping.h> 20 + #include <linux/highmem.h> 20 21 21 22 #include <asm/memory.h> 22 23 #include <asm/highmem.h> ··· 481 480 op(vaddr, len, dir); 482 481 kunmap_high(page); 483 482 } else if (cache_is_vipt()) { 484 - pte_t saved_pte; 485 - vaddr = kmap_high_l1_vipt(page, &saved_pte); 483 + /* unmapped pages might still be cached */ 484 + vaddr = kmap_atomic(page); 486 485 op(vaddr + offset, len, dir); 487 - kunmap_high_l1_vipt(page, saved_pte); 486 + kunmap_atomic(vaddr); 488 487 } 489 488 } else { 490 489 vaddr = page_address(page) + offset;
+4 -3
arch/arm/mm/flush.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/mm.h> 12 12 #include <linux/pagemap.h> 13 + #include <linux/highmem.h> 13 14 14 15 #include <asm/cacheflush.h> 15 16 #include <asm/cachetype.h> ··· 181 180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 182 181 kunmap_high(page); 183 182 } else if (cache_is_vipt()) { 184 - pte_t saved_pte; 185 - addr = kmap_high_l1_vipt(page, &saved_pte); 183 + /* unmapped pages might still be cached */ 184 + addr = kmap_atomic(page); 186 185 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 187 - kunmap_high_l1_vipt(page, saved_pte); 186 + kunmap_atomic(addr); 188 187 } 189 188 } 190 189
-87
arch/arm/mm/highmem.c
··· 140 140 pte = TOP_PTE(vaddr); 141 141 return pte_page(*pte); 142 142 } 143 - 144 - #ifdef CONFIG_CPU_CACHE_VIPT 145 - 146 - #include <linux/percpu.h> 147 - 148 - /* 149 - * The VIVT cache of a highmem page is always flushed before the page 150 - * is unmapped. Hence unmapped highmem pages need no cache maintenance 151 - * in that case. 152 - * 153 - * However unmapped pages may still be cached with a VIPT cache, and 154 - * it is not possible to perform cache maintenance on them using physical 155 - * addresses unfortunately. So we have no choice but to set up a temporary 156 - * virtual mapping for that purpose. 157 - * 158 - * Yet this VIPT cache maintenance may be triggered from DMA support 159 - * functions which are possibly called from interrupt context. As we don't 160 - * want to keep interrupt disabled all the time when such maintenance is 161 - * taking place, we therefore allow for some reentrancy by preserving and 162 - * restoring the previous fixmap entry before the interrupted context is 163 - * resumed. If the reentrancy depth is 0 then there is no need to restore 164 - * the previous fixmap, and leaving the current one in place allow it to 165 - * be reused the next time without a TLB flush (common with DMA). 166 - */ 167 - 168 - static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); 169 - 170 - void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 171 - { 172 - unsigned int idx, cpu; 173 - int *depth; 174 - unsigned long vaddr, flags; 175 - pte_t pte, *ptep; 176 - 177 - if (!in_interrupt()) 178 - preempt_disable(); 179 - 180 - cpu = smp_processor_id(); 181 - depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 182 - 183 - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 184 - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 185 - ptep = TOP_PTE(vaddr); 186 - pte = mk_pte(page, kmap_prot); 187 - 188 - raw_local_irq_save(flags); 189 - (*depth)++; 190 - if (pte_val(*ptep) == pte_val(pte)) { 191 - *saved_pte = pte; 192 - } else { 193 - *saved_pte = *ptep; 194 - set_pte_ext(ptep, pte, 0); 195 - local_flush_tlb_kernel_page(vaddr); 196 - } 197 - raw_local_irq_restore(flags); 198 - 199 - return (void *)vaddr; 200 - } 201 - 202 - void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) 203 - { 204 - unsigned int idx, cpu = smp_processor_id(); 205 - int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 206 - unsigned long vaddr, flags; 207 - pte_t pte, *ptep; 208 - 209 - idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 210 - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 211 - ptep = TOP_PTE(vaddr); 212 - pte = mk_pte(page, kmap_prot); 213 - 214 - BUG_ON(pte_val(*ptep) != pte_val(pte)); 215 - BUG_ON(*depth <= 0); 216 - 217 - raw_local_irq_save(flags); 218 - (*depth)--; 219 - if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { 220 - set_pte_ext(ptep, saved_pte, 0); 221 - local_flush_tlb_kernel_page(vaddr); 222 - } 223 - raw_local_irq_restore(flags); 224 - 225 - if (!in_interrupt()) 226 - preempt_enable(); 227 - } 228 - 229 - #endif /* CONFIG_CPU_CACHE_VIPT */