Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/tile: complete migration to new kmap_atomic scheme

This change makes KM_TYPE_NR independent of the actual deprecated
list of km_type values, which are no longer used in tile code anywhere.
For now we leave it set to 8, allowing that many nested mappings,
and thus reserving 32MB of address space.

A few remaining places using KM_* values were cleaned up as well.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+40 -24
-1
arch/tile/include/asm/highmem.h
··· 23 23 24 24 #include <linux/interrupt.h> 25 25 #include <linux/threads.h> 26 - #include <asm/kmap_types.h> 27 26 #include <asm/tlbflush.h> 28 27 #include <asm/homecache.h> 29 28
+24 -10
arch/tile/include/asm/kmap_types.h
··· 16 16 #define _ASM_TILE_KMAP_TYPES_H 17 17 18 18 /* 19 - * In TILE Linux each set of four of these uses another 16MB chunk of 20 - * address space, given 64 tiles and 64KB pages, so we only enable 21 - * ones that are required by the kernel configuration. 19 + * In 32-bit TILE Linux we have to balance the desire to have a lot of 20 + * nested atomic mappings with the fact that large page sizes and many 21 + * processors chew up address space quickly. In a typical 22 + * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger 23 + * adds 4MB of required address-space. For now we leave KM_TYPE_NR 24 + * set to depth 8. 22 25 */ 23 26 enum km_type { 27 + KM_TYPE_NR = 8 28 + }; 29 + 30 + /* 31 + * We provide dummy definitions of all the stray values that used to be 32 + * required for kmap_atomic() and no longer are. 33 + */ 34 + enum { 24 35 KM_BOUNCE_READ, 25 36 KM_SKB_SUNRPC_DATA, 26 37 KM_SKB_DATA_SOFTIRQ, 27 38 KM_USER0, 28 39 KM_USER1, 29 40 KM_BIO_SRC_IRQ, 41 + KM_BIO_DST_IRQ, 42 + KM_PTE0, 43 + KM_PTE1, 30 44 KM_IRQ0, 31 45 KM_IRQ1, 32 46 KM_SOFTIRQ0, 33 47 KM_SOFTIRQ1, 34 - KM_MEMCPY0, 35 - KM_MEMCPY1, 36 - #if defined(CONFIG_HIGHPTE) 37 - KM_PTE0, 38 - KM_PTE1, 39 - #endif 40 - KM_TYPE_NR 48 + KM_SYNC_ICACHE, 49 + KM_SYNC_DCACHE, 50 + KM_UML_USERCOPY, 51 + KM_IRQ_PTE, 52 + KM_NMI, 53 + KM_NMI_PTE, 54 + KM_KDB 41 55 }; 42 56 43 57 #endif /* _ASM_TILE_KMAP_TYPES_H */
+2 -4
arch/tile/include/asm/pgtable.h
··· 344 344 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 345 345 346 346 #if defined(CONFIG_HIGHPTE) 347 - extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); 348 - #define pte_offset_map(dir, address) \ 349 - _pte_offset_map(dir, address, KM_PTE0) 350 - #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 347 + extern pte_t *pte_offset_map(pmd_t *, unsigned long address); 348 + #define pte_unmap(pte) kunmap_atomic(pte) 351 349 #else 352 350 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 353 351 #define pte_unmap(pte) do { } while (0)
+3 -3
arch/tile/kernel/machine_kexec.c
··· 182 182 183 183 if ((entry & IND_SOURCE)) { 184 184 void *va = 185 - kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 185 + kmap_atomic_pfn(entry >> PAGE_SHIFT); 186 186 r = kexec_bn2cl(va); 187 187 if (r) { 188 188 command_line = r; 189 189 break; 190 190 } 191 - kunmap_atomic(va, KM_USER0); 191 + kunmap_atomic(va); 192 192 } 193 193 } 194 194 ··· 198 198 199 199 hverr = hv_set_command_line( 200 200 (HV_VirtAddr) command_line, strlen(command_line)); 201 - kunmap_atomic(command_line, KM_USER0); 201 + kunmap_atomic(command_line); 202 202 } else { 203 203 pr_info("%s: no command line found; making empty\n", 204 204 __func__);
+8 -3
arch/tile/lib/memcpy_tile64.c
··· 54 54 * we must run with interrupts disabled to avoid the risk of some 55 55 * other code seeing the incoherent data in our cache. (Recall that 56 56 * our cache is indexed by PA, so even if the other code doesn't use 57 - * our KM_MEMCPY virtual addresses, they'll still hit in cache using 57 + * our kmap_atomic virtual addresses, they'll still hit in cache using 58 58 * the normal VAs that aren't supposed to hit in cache.) 59 59 */ 60 60 static void memcpy_multicache(void *dest, const void *source, ··· 64 64 unsigned long flags, newsrc, newdst; 65 65 pmd_t *pmdp; 66 66 pte_t *ptep; 67 + int type0, type1; 67 68 int cpu = get_cpu(); 68 69 69 70 /* ··· 78 77 sim_allow_multiple_caching(1); 79 78 80 79 /* Set up the new dest mapping */ 81 - idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; 80 + type0 = kmap_atomic_idx_push(); 81 + idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0; 82 82 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 83 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 84 ptep = pte_offset_kernel(pmdp, newdst); ··· 89 87 } 90 88 91 89 /* Set up the new source mapping */ 92 - idx += (KM_MEMCPY0 - KM_MEMCPY1); 90 + type1 = kmap_atomic_idx_push(); 91 + idx += (type0 - type1); 93 92 src_pte = hv_pte_set_nc(src_pte); 94 93 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 95 94 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); ··· 122 119 * We're done: notify the simulator that all is back to normal, 123 120 * and re-enable interrupts and pre-emption. 124 121 */ 122 + kmap_atomic_idx_pop(); 123 + kmap_atomic_idx_pop(); 125 124 sim_allow_multiple_caching(0); 126 125 local_irq_restore(flags); 127 126 put_cpu();
+1 -1
arch/tile/mm/highmem.c
··· 227 227 void *__kmap_atomic(struct page *page) 228 228 { 229 229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 230 - return kmap_atomic_prot(page, type, PAGE_NONE); 230 + return kmap_atomic_prot(page, PAGE_NONE); 231 231 } 232 232 EXPORT_SYMBOL(__kmap_atomic); 233 233
+2 -2
arch/tile/mm/pgtable.c
··· 134 134 } 135 135 136 136 #if defined(CONFIG_HIGHPTE) 137 - pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) 137 + pte_t *_pte_offset_map(pmd_t *dir, unsigned long address) 138 138 { 139 - pte_t *pte = kmap_atomic(pmd_page(*dir), type) + 139 + pte_t *pte = kmap_atomic(pmd_page(*dir)) + 140 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 141 141 return &pte[pte_index(address)]; 142 142 }