Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

More .set push/pop encapsulation, more eyefriendly code formatting.

Signed-off-by: Thiemo Seufer <ths@networkno.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Thiemo Seufer and committed by
Ralf Baechle
2fe25f67 f8670e66

+36 -36
+36 -36
include/asm-mips/r4kcache.h
··· 21 21 * 22 22 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive 23 23 * the index bits from the virtual address. This breaks with tradition 24 - * set by the R4000. To keep unpleassant surprises from happening we pick 24 + * set by the R4000. To keep unpleasant surprises from happening we pick 25 25 * an address in KSEG0 / CKSEG0. 26 26 * - We need a properly sign extended address for 64-bit code. To get away 27 27 * without ifdefs we let the compiler do it by a type cast. ··· 30 30 31 31 #define cache_op(op,addr) \ 32 32 __asm__ __volatile__( \ 33 + " .set push \n" \ 33 34 " .set noreorder \n" \ 34 35 " .set mips3\n\t \n" \ 35 36 " cache %0, %1 \n" \ 36 - " .set mips0 \n" \ 37 - " .set reorder" \ 37 + " .set pop \n" \ 38 38 : \ 39 39 : "i" (op), "m" (*(unsigned char *)(addr))) 40 40 ··· 84 84 static inline void protected_flush_icache_line(unsigned long addr) 85 85 { 86 86 __asm__ __volatile__( 87 - ".set noreorder\n\t" 88 - ".set mips3\n" 89 - "1:\tcache %0,(%1)\n" 90 - "2:\t.set mips0\n\t" 91 - ".set reorder\n\t" 92 - ".section\t__ex_table,\"a\"\n\t" 93 - STR(PTR)"\t1b,2b\n\t" 94 - ".previous" 87 + " .set push \n" 88 + " .set noreorder \n" 89 + " .set mips3 \n" 90 + "1: cache %0, (%1) \n" 91 + "2: .set pop \n" 92 + " .section __ex_table,\"a\" \n" 93 + " "STR(PTR)" 1b, 2b \n" 94 + " .previous" 95 95 : 96 96 : "i" (Hit_Invalidate_I), "r" (addr)); 97 97 } ··· 100 100 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D 101 101 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style 102 102 * caches. We're talking about one cacheline unnecessarily getting invalidated 103 - * here so the penaltiy isn't overly hard. 103 + * here so the penalty isn't overly hard. 104 104 */ 105 105 static inline void protected_writeback_dcache_line(unsigned long addr) 106 106 { 107 107 __asm__ __volatile__( 108 - ".set noreorder\n\t" 109 - ".set mips3\n" 110 - "1:\tcache %0,(%1)\n" 111 - "2:\t.set mips0\n\t" 112 - ".set reorder\n\t" 113 - ".section\t__ex_table,\"a\"\n\t" 114 - STR(PTR)"\t1b,2b\n\t" 115 - ".previous" 108 + " .set push \n" 109 + " .set noreorder \n" 110 + " .set mips3 \n" 111 + "1: cache %0, (%1) \n" 112 + "2: .set pop \n" 113 + " .section __ex_table,\"a\" \n" 114 + " "STR(PTR)" 1b, 2b \n" 115 + " .previous" 116 116 : 117 117 : "i" (Hit_Writeback_Inv_D), "r" (addr)); 118 118 } ··· 120 120 static inline void protected_writeback_scache_line(unsigned long addr) 121 121 { 122 122 __asm__ __volatile__( 123 - ".set noreorder\n\t" 124 - ".set mips3\n" 125 - "1:\tcache %0,(%1)\n" 126 - "2:\t.set mips0\n\t" 127 - ".set reorder\n\t" 128 - ".section\t__ex_table,\"a\"\n\t" 129 - STR(PTR)"\t1b,2b\n\t" 130 - ".previous" 123 + " .set push \n" 124 + " .set noreorder \n" 125 + " .set mips3 \n" 126 + "1: cache %0, (%1) \n" 127 + "2: .set pop \n" 128 + " .section __ex_table,\"a\" \n" 129 + " "STR(PTR)" 1b, 2b \n" 130 + " .previous" 131 131 : 132 132 : "i" (Hit_Writeback_Inv_SD), "r" (addr)); 133 133 } ··· 142 142 143 143 #define cache16_unroll32(base,op) \ 144 144 __asm__ __volatile__( \ 145 + " .set push \n" \ 145 146 " .set noreorder \n" \ 146 147 " .set mips3 \n" \ 147 148 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ ··· 161 160 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ 162 161 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ 163 162 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ 164 - " .set mips0 \n" \ 165 - " .set reorder \n" \ 163 + " .set pop \n" \ 166 164 : \ 167 165 : "r" (base), \ 168 166 "i" (op)); ··· 285 285 286 286 #define cache32_unroll32(base,op) \ 287 287 __asm__ __volatile__( \ 288 + " .set push \n" \ 288 289 " .set noreorder \n" \ 289 290 " .set mips3 \n" \ 290 291 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ ··· 304 303 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ 305 304 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ 306 305 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ 307 - " .set mips0 \n" \ 308 - " .set reorder \n" \ 306 + " .set pop \n" \ 309 307 : \ 310 308 : "r" (base), \ 311 309 "i" (op)); ··· 428 428 429 429 #define cache64_unroll32(base,op) \ 430 430 __asm__ __volatile__( \ 431 + " .set push \n" \ 431 432 " .set noreorder \n" \ 432 433 " .set mips3 \n" \ 433 434 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ ··· 447 446 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ 448 447 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ 449 448 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ 450 - " .set mips0 \n" \ 451 - " .set reorder \n" \ 449 + " .set pop \n" \ 452 450 : \ 453 451 : "r" (base), \ 454 452 "i" (op)); ··· 532 532 533 533 #define cache128_unroll32(base,op) \ 534 534 __asm__ __volatile__( \ 535 + " .set push \n" \ 535 536 " .set noreorder \n" \ 536 537 " .set mips3 \n" \ 537 538 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ ··· 551 550 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ 552 551 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ 553 552 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ 554 - " .set mips0 \n" \ 555 - " .set reorder \n" \ 553 + " .set pop \n" \ 556 554 : \ 557 555 : "r" (base), \ 558 556 "i" (op));