Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: tlbex: Use GPR number macros

Use GPR number macros in uasm code generation parts to
reduce code duplication.

No functional change.

Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>

authored by

Jiaxun Yang and committed by
Thomas Bogendoerfer
8cc461b8 6aec8e05

+97 -99
+97 -99
arch/mips/mm/tlbex.c
··· 34 34 #include <asm/cpu-type.h> 35 35 #include <asm/mipsregs.h> 36 36 #include <asm/mmu_context.h> 37 + #include <asm/regdef.h> 37 38 #include <asm/uasm.h> 38 39 #include <asm/setup.h> 39 40 #include <asm/tlbex.h> ··· 278 277 pr_debug("\tEND(%s)\n", symbol); 279 278 } 280 279 281 - /* The only general purpose registers allowed in TLB handlers. */ 282 - #define K0 26 283 - #define K1 27 284 - 285 280 #ifdef CONFIG_64BIT 286 281 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 287 282 #else ··· 337 340 if (scratch_reg >= 0) { 338 341 /* Save in CPU local C0_KScratch? */ 339 342 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); 340 - r.r1 = K0; 341 - r.r2 = K1; 342 - r.r3 = 1; 343 + r.r1 = GPR_K0; 344 + r.r2 = GPR_K1; 345 + r.r3 = GPR_AT; 343 346 return r; 344 347 } 345 348 346 349 if (num_possible_cpus() > 1) { 347 350 /* Get smp_processor_id */ 348 - UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); 349 - UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); 351 + UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG); 352 + UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT); 350 353 351 - /* handler_reg_save index in K0 */ 352 - UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 354 + /* handler_reg_save index in GPR_K0 */ 355 + UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save))); 353 356 354 - UASM_i_LA(p, K1, (long)&handler_reg_save); 355 - UASM_i_ADDU(p, K0, K0, K1); 357 + UASM_i_LA(p, GPR_K1, (long)&handler_reg_save); 358 + UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1); 356 359 } else { 357 - UASM_i_LA(p, K0, (long)&handler_reg_save); 360 + UASM_i_LA(p, GPR_K0, (long)&handler_reg_save); 358 361 } 359 - /* K0 now points to save area, save $1 and $2 */ 360 - UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 361 - UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 362 + /* GPR_K0 now points to save area, save $1 and $2 */ 363 + UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0); 364 + UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0); 362 365 363 - r.r1 = K1; 366 + r.r1 = GPR_K1; 364 367 r.r2 = 1; 365 368 r.r3 = 2; 366 369 return r; ··· 373 376 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 374 377 return; 375 378 } 376 - /* K0 already points to save area, restore $1 and $2 */ 377 - UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 378 - UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 379 + /* GPR_K0 already points to save area, restore $1 and $2 */ 380 + UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0); 381 + UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0); 379 382 } 380 383 381 384 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT ··· 394 397 memset(tlb_handler, 0, sizeof(tlb_handler)); 395 398 p = tlb_handler; 396 399 397 - uasm_i_mfc0(&p, K0, C0_BADVADDR); 398 - uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 399 - uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 400 - uasm_i_srl(&p, K0, K0, 22); /* load delay */ 401 - uasm_i_sll(&p, K0, K0, 2); 402 - uasm_i_addu(&p, K1, K1, K0); 403 - uasm_i_mfc0(&p, K0, C0_CONTEXT); 404 - uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 405 - uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 406 - uasm_i_addu(&p, K1, K1, K0); 407 - uasm_i_lw(&p, K0, 0, K1); 400 + uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR); 401 + uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 402 + uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1); 403 + uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */ 404 + uasm_i_sll(&p, GPR_K0, GPR_K0, 2); 405 + uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0); 406 + uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT); 407 + uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */ 408 + uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */ 409 + uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0); 410 + uasm_i_lw(&p, GPR_K0, 0, GPR_K1); 408 411 uasm_i_nop(&p); /* load delay */ 409 - uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 410 - uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 412 + uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0); 413 + uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */ 411 414 uasm_i_tlbwr(&p); /* cp0 delay */ 412 - uasm_i_jr(&p, K1); 415 + uasm_i_jr(&p, GPR_K1); 413 416 uasm_i_rfe(&p); /* branch delay */ 414 417 415 418 if (p > tlb_handler + 32) ··· 1257 1260 memset(final_handler, 0, sizeof(final_handler)); 1258 1261 1259 1262 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1260 - htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1263 + htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1, 1261 1264 scratch_reg); 1262 1265 vmalloc_mode = refill_scratch; 1263 1266 } else { 1264 - htlb_info.huge_pte = K0; 1267 + htlb_info.huge_pte = GPR_K0; 1265 1268 htlb_info.restore_scratch = 0; 1266 1269 htlb_info.need_reload_pte = true; 1267 1270 vmalloc_mode = refill_noscratch; ··· 1271 1274 if (bcm1250_m3_war()) { 1272 1275 unsigned int segbits = 44; 1273 1276 1274 - uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1275 - uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1276 - uasm_i_xor(&p, K0, K0, K1); 1277 - uasm_i_dsrl_safe(&p, K1, K0, 62); 1278 - uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1279 - uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1280 - uasm_i_or(&p, K0, K0, K1); 1281 - uasm_il_bnez(&p, &r, K0, label_leave); 1277 + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); 1278 + uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI); 1279 + uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1); 1280 + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62); 1281 + uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1); 1282 + uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits); 1283 + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1); 1284 + uasm_il_bnez(&p, &r, GPR_K0, label_leave); 1282 1285 /* No need for uasm_i_nop */ 1283 1286 } 1284 1287 1285 1288 #ifdef CONFIG_64BIT 1286 - build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1289 + build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ 1287 1290 #else 1288 - build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1291 + build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ 1289 1292 #endif 1290 1293 1291 1294 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1292 - build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1295 + build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update); 1293 1296 #endif 1294 1297 1295 - build_get_ptep(&p, K0, K1); 1296 - build_update_entries(&p, K0, K1); 1298 + build_get_ptep(&p, GPR_K0, GPR_K1); 1299 + build_update_entries(&p, GPR_K0, GPR_K1); 1297 1300 build_tlb_write_entry(&p, &l, &r, tlb_random); 1298 1301 uasm_l_leave(&l, p); 1299 1302 uasm_i_eret(&p); /* return from trap */ ··· 1301 1304 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1302 1305 uasm_l_tlb_huge_update(&l, p); 1303 1306 if (htlb_info.need_reload_pte) 1304 - UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); 1305 - build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1306 - build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1307 + UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1); 1308 + build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1); 1309 + build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random, 1307 1310 htlb_info.restore_scratch); 1308 1311 #endif 1309 1312 1310 1313 #ifdef CONFIG_64BIT 1311 - build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1314 + build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode); 1312 1315 #endif 1313 1316 1314 1317 /* ··· 1481 1484 memset(tlb_handler, 0, sizeof(tlb_handler)); 1482 1485 1483 1486 if (check_for_high_segbits) { 1484 - uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1485 - uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); 1486 - uasm_il_beqz(&p, &r, K1, label_vmalloc); 1487 + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); 1488 + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 1489 + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); 1490 + uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc); 1487 1491 uasm_i_nop(&p); 1488 1492 1489 - uasm_il_bgez(&p, &r, K0, label_large_segbits_fault); 1493 + uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault); 1490 1494 uasm_i_nop(&p); 1491 1495 uasm_l_vmalloc(&l, p); 1492 1496 } 1493 1497 1494 - uasm_i_dmfc0(&p, K1, C0_PGD); 1498 + uasm_i_dmfc0(&p, GPR_K1, C0_PGD); 1495 1499 1496 - uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ 1500 + uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ 1497 1501 #ifndef __PAGETABLE_PMD_FOLDED 1498 - uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ 1502 + uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ 1499 1503 #endif 1500 - uasm_i_ldpte(&p, K1, 0); /* even */ 1501 - uasm_i_ldpte(&p, K1, 1); /* odd */ 1504 + uasm_i_ldpte(&p, GPR_K1, 0); /* even */ 1505 + uasm_i_ldpte(&p, GPR_K1, 1); /* odd */ 1502 1506 uasm_i_tlbwr(&p); 1503 1507 1504 1508 /* restore page mask */ 1505 1509 if (PM_DEFAULT_MASK >> 16) { 1506 - uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16); 1507 - uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff); 1508 - uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1510 + uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16); 1511 + uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff); 1512 + uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK); 1509 1513 } else if (PM_DEFAULT_MASK) { 1510 - uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK); 1511 - uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1514 + uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK); 1515 + uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK); 1512 1516 } else { 1513 1517 uasm_i_mtc0(&p, 0, C0_PAGEMASK); 1514 1518 } ··· 1518 1520 1519 1521 if (check_for_high_segbits) { 1520 1522 uasm_l_large_segbits_fault(&l, p); 1521 - UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0); 1522 - uasm_i_jr(&p, K1); 1523 + UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0); 1524 + uasm_i_jr(&p, GPR_K1); 1523 1525 uasm_i_nop(&p); 1524 1526 } 1525 1527 ··· 1885 1887 memset(labels, 0, sizeof(labels)); 1886 1888 memset(relocs, 0, sizeof(relocs)); 1887 1889 1888 - build_r3000_tlbchange_handler_head(&p, K0, K1); 1889 - build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1890 + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); 1891 + build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl); 1890 1892 uasm_i_nop(&p); /* load delay */ 1891 - build_make_valid(&p, &r, K0, K1, -1); 1892 - build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1893 + build_make_valid(&p, &r, GPR_K0, GPR_K1, -1); 1894 + build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1); 1893 1895 1894 1896 uasm_l_nopage_tlbl(&l, p); 1895 1897 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); ··· 1915 1917 memset(labels, 0, sizeof(labels)); 1916 1918 memset(relocs, 0, sizeof(relocs)); 1917 1919 1918 - build_r3000_tlbchange_handler_head(&p, K0, K1); 1919 - build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1920 + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); 1921 + build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs); 1920 1922 uasm_i_nop(&p); /* load delay */ 1921 - build_make_write(&p, &r, K0, K1, -1); 1922 - build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1923 + build_make_write(&p, &r, GPR_K0, GPR_K1, -1); 1924 + build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1); 1923 1925 1924 1926 uasm_l_nopage_tlbs(&l, p); 1925 1927 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); ··· 1945 1947 memset(labels, 0, sizeof(labels)); 1946 1948 memset(relocs, 0, sizeof(relocs)); 1947 1949 1948 - build_r3000_tlbchange_handler_head(&p, K0, K1); 1949 - build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 1950 + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); 1951 + build_pte_modifiable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbm); 1950 1952 uasm_i_nop(&p); /* load delay */ 1951 - build_make_write(&p, &r, K0, K1, -1); 1952 - build_r3000_pte_reload_tlbwi(&p, K0, K1); 1953 + build_make_write(&p, &r, GPR_K0, GPR_K1, -1); 1954 + build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1); 1953 1955 1954 1956 uasm_l_nopage_tlbm(&l, p); 1955 1957 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); ··· 2065 2067 if (bcm1250_m3_war()) { 2066 2068 unsigned int segbits = 44; 2067 2069 2068 - uasm_i_dmfc0(&p, K0, C0_BADVADDR); 2069 - uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 2070 - uasm_i_xor(&p, K0, K0, K1); 2071 - uasm_i_dsrl_safe(&p, K1, K0, 62); 2072 - uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 2073 - uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 2074 - uasm_i_or(&p, K0, K0, K1); 2075 - uasm_il_bnez(&p, &r, K0, label_leave); 2070 + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); 2071 + uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI); 2072 + uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1); 2073 + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62); 2074 + uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1); 2075 + uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits); 2076 + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1); 2077 + uasm_il_bnez(&p, &r, GPR_K0, label_leave); 2076 2078 /* No need for uasm_i_nop */ 2077 2079 } 2078 2080 ··· 2215 2217 build_restore_work_registers(&p); 2216 2218 #ifdef CONFIG_CPU_MICROMIPS 2217 2219 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2218 - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2219 - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2220 - uasm_i_jr(&p, K0); 2220 + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2221 + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2222 + uasm_i_jr(&p, GPR_K0); 2221 2223 } else 2222 2224 #endif 2223 2225 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); ··· 2271 2273 build_restore_work_registers(&p); 2272 2274 #ifdef CONFIG_CPU_MICROMIPS 2273 2275 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2274 - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2275 - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2276 - uasm_i_jr(&p, K0); 2276 + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2277 + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2278 + uasm_i_jr(&p, GPR_K0); 2277 2279 } else 2278 2280 #endif 2279 2281 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); ··· 2328 2330 build_restore_work_registers(&p); 2329 2331 #ifdef CONFIG_CPU_MICROMIPS 2330 2332 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2331 - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2332 - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2333 - uasm_i_jr(&p, K0); 2333 + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2334 + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2335 + uasm_i_jr(&p, GPR_K0); 2334 2336 } else 2335 2337 #endif 2336 2338 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);