[MIPS] Split the micro-assembler from tlbex.c.

This patch moves the micro-assembler in a separate implementation, as
it is useful for further run-time optimizations. The only change in
behaviour is cutting down printk noise at kernel startup time.

Checkpatch complains about macro parameters which aren't protected by
parentheses. I believe this is a flaw in checkpatch, the paste operator
used in those macros won't work with parenthesised parameters.

Signed-off-by: Thiemo Seufer <ths@networkno.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by Thiemo Seufer and committed by Ralf Baechle e30ec452 a055917e

+1109 -958
+2 -1
arch/mips/mm/Makefile
··· 3 3 # 4 4 5 5 obj-y += cache.o dma-default.o extable.o fault.o \ 6 - init.o pgtable.o tlbex.o tlbex-fault.o 6 + init.o pgtable.o tlbex.o tlbex-fault.o \ 7 + uasm.o 7 8 8 9 obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 9 10 obj-$(CONFIG_64BIT) += pgtable-64.o
+339 -957
arch/mips/mm/tlbex.c
··· 5 5 * 6 6 * Synthesize TLB refill handlers at runtime. 7 7 * 8 - * Copyright (C) 2004,2005,2006 by Thiemo Seufer 8 + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 9 * Copyright (C) 2005, 2007 Maciej W. Rozycki 10 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 11 * ··· 24 24 #include <linux/string.h> 25 25 #include <linux/init.h> 26 26 27 - #include <asm/bugs.h> 28 27 #include <asm/mmu_context.h> 29 - #include <asm/inst.h> 30 - #include <asm/elf.h> 31 28 #include <asm/war.h> 29 + 30 + #include "uasm.h" 32 31 33 32 static inline int r45k_bvahwbug(void) 34 33 { ··· 66 67 (PRID_COMP_MIPS | PRID_IMP_4KC); 67 68 } 68 69 69 - /* 70 - * A little micro-assembler, intended for TLB refill handler 71 - * synthesizing. It is intentionally kept simple, does only support 72 - * a subset of instructions, and does not try to hide pipeline effects 73 - * like branch delay slots. 74 - */ 75 - 76 - enum fields 77 - { 78 - RS = 0x001, 79 - RT = 0x002, 80 - RD = 0x004, 81 - RE = 0x008, 82 - SIMM = 0x010, 83 - UIMM = 0x020, 84 - BIMM = 0x040, 85 - JIMM = 0x080, 86 - FUNC = 0x100, 87 - SET = 0x200 88 - }; 89 - 90 - #define OP_MASK 0x3f 91 - #define OP_SH 26 92 - #define RS_MASK 0x1f 93 - #define RS_SH 21 94 - #define RT_MASK 0x1f 95 - #define RT_SH 16 96 - #define RD_MASK 0x1f 97 - #define RD_SH 11 98 - #define RE_MASK 0x1f 99 - #define RE_SH 6 100 - #define IMM_MASK 0xffff 101 - #define IMM_SH 0 102 - #define JIMM_MASK 0x3ffffff 103 - #define JIMM_SH 0 104 - #define FUNC_MASK 0x3f 105 - #define FUNC_SH 0 106 - #define SET_MASK 0x7 107 - #define SET_SH 0 108 - 109 - enum opcode { 110 - insn_invalid, 111 - insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, 112 - insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 113 - insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, 114 - insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, 115 - insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, 116 - insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, 117 - insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, 118 - insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi, 119 - insn_tlbwr, insn_xor, insn_xori 120 - }; 121 - 122 - struct insn { 123 - enum opcode opcode; 124 - u32 match; 125 - enum fields fields; 126 - }; 127 - 128 - /* This macro sets the non-variable bits of an instruction. */ 129 - #define M(a, b, c, d, e, f) \ 130 - ((a) << OP_SH \ 131 - | (b) << RS_SH \ 132 - | (c) << RT_SH \ 133 - | (d) << RD_SH \ 134 - | (e) << RE_SH \ 135 - | (f) << FUNC_SH) 136 - 137 - static struct insn insn_table[] __initdata = { 138 - { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 139 - { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 140 - { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, 141 - { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 142 - { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 143 - { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 144 - { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, 145 - { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, 146 - { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 147 - { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 148 - { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 149 - { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 150 - { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 151 - { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 152 - { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 153 - { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, 154 - { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, 155 - { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, 156 - { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, 157 - { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, 158 - { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, 159 - { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, 160 - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 161 - { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 162 - { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 163 - { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 164 - { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 165 - { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 166 - { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 167 - { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 168 - { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 169 - { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 170 - { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 171 - { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 172 - { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 173 - { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 174 - { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 175 - { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 176 - { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 177 - { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 178 - { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 179 - { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 180 - { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 181 - { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 182 - { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 183 - { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 184 - { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 185 - { insn_invalid, 0, 0 } 186 - }; 187 - 188 - #undef M 189 - 190 - static u32 __init build_rs(u32 arg) 191 - { 192 - if (arg & ~RS_MASK) 193 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 194 - 195 - return (arg & RS_MASK) << RS_SH; 196 - } 197 - 198 - static u32 __init build_rt(u32 arg) 199 - { 200 - if (arg & ~RT_MASK) 201 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 202 - 203 - return (arg & RT_MASK) << RT_SH; 204 - } 205 - 206 - static u32 __init build_rd(u32 arg) 207 - { 208 - if (arg & ~RD_MASK) 209 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 210 - 211 - return (arg & RD_MASK) << RD_SH; 212 - } 213 - 214 - static u32 __init build_re(u32 arg) 215 - { 216 - if (arg & ~RE_MASK) 217 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 218 - 219 - return (arg & RE_MASK) << RE_SH; 220 - } 221 - 222 - static u32 __init build_simm(s32 arg) 223 - { 224 - if (arg > 0x7fff || arg < -0x8000) 225 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 226 - 227 - return arg & 0xffff; 228 - } 229 - 230 - static u32 __init build_uimm(u32 arg) 231 - { 232 - if (arg & ~IMM_MASK) 233 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 234 - 235 - return arg & IMM_MASK; 236 - } 237 - 238 - static u32 __init build_bimm(s32 arg) 239 - { 240 - if (arg > 0x1ffff || arg < -0x20000) 241 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 242 - 243 - if (arg & 0x3) 244 - printk(KERN_WARNING "Invalid TLB synthesizer branch target\n"); 245 - 246 - return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 247 - } 248 - 249 - static u32 __init build_jimm(u32 arg) 250 - { 251 - if (arg & ~((JIMM_MASK) << 2)) 252 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 253 - 254 - return (arg >> 2) & JIMM_MASK; 255 - } 256 - 257 - static u32 __init build_func(u32 arg) 258 - { 259 - if (arg & ~FUNC_MASK) 260 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 261 - 262 - return arg & FUNC_MASK; 263 - } 264 - 265 - static u32 __init build_set(u32 arg) 266 - { 267 - if (arg & ~SET_MASK) 268 - printk(KERN_WARNING "TLB synthesizer field overflow\n"); 269 - 270 - return arg & SET_MASK; 271 - } 272 - 273 - /* 274 - * The order of opcode arguments is implicitly left to right, 275 - * starting with RS and ending with FUNC or IMM. 276 - */ 277 - static void __init build_insn(u32 **buf, enum opcode opc, ...) 278 - { 279 - struct insn *ip = NULL; 280 - unsigned int i; 281 - va_list ap; 282 - u32 op; 283 - 284 - for (i = 0; insn_table[i].opcode != insn_invalid; i++) 285 - if (insn_table[i].opcode == opc) { 286 - ip = &insn_table[i]; 287 - break; 288 - } 289 - 290 - if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) 291 - panic("Unsupported TLB synthesizer instruction %d", opc); 292 - 293 - op = ip->match; 294 - va_start(ap, opc); 295 - if (ip->fields & RS) op |= build_rs(va_arg(ap, u32)); 296 - if (ip->fields & RT) op |= build_rt(va_arg(ap, u32)); 297 - if (ip->fields & RD) op |= build_rd(va_arg(ap, u32)); 298 - if (ip->fields & RE) op |= build_re(va_arg(ap, u32)); 299 - if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32)); 300 - if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32)); 301 - if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); 302 - if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); 303 - if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); 304 - if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); 305 - va_end(ap); 306 - 307 - **buf = op; 308 - (*buf)++; 309 - } 310 - 311 - #define I_u1u2u3(op) \ 312 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 313 - unsigned int b, unsigned int c) \ 314 - { \ 315 - build_insn(buf, insn##op, a, b, c); \ 316 - } 317 - 318 - #define I_u2u1u3(op) \ 319 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 320 - unsigned int b, unsigned int c) \ 321 - { \ 322 - build_insn(buf, insn##op, b, a, c); \ 323 - } 324 - 325 - #define I_u3u1u2(op) \ 326 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 327 - unsigned int b, unsigned int c) \ 328 - { \ 329 - build_insn(buf, insn##op, b, c, a); \ 330 - } 331 - 332 - #define I_u1u2s3(op) \ 333 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 334 - unsigned int b, signed int c) \ 335 - { \ 336 - build_insn(buf, insn##op, a, b, c); \ 337 - } 338 - 339 - #define I_u2s3u1(op) \ 340 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 341 - signed int b, unsigned int c) \ 342 - { \ 343 - build_insn(buf, insn##op, c, a, b); \ 344 - } 345 - 346 - #define I_u2u1s3(op) \ 347 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 348 - unsigned int b, signed int c) \ 349 - { \ 350 - build_insn(buf, insn##op, b, a, c); \ 351 - } 352 - 353 - #define I_u1u2(op) \ 354 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 355 - unsigned int b) \ 356 - { \ 357 - build_insn(buf, insn##op, a, b); \ 358 - } 359 - 360 - #define I_u1s2(op) \ 361 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \ 362 - signed int b) \ 363 - { \ 364 - build_insn(buf, insn##op, a, b); \ 365 - } 366 - 367 - #define I_u1(op) \ 368 - static void __init __maybe_unused i##op(u32 **buf, unsigned int a) \ 369 - { \ 370 - build_insn(buf, insn##op, a); \ 371 - } 372 - 373 - #define I_0(op) \ 374 - static void __init __maybe_unused i##op(u32 **buf) \ 375 - { \ 376 - build_insn(buf, insn##op); \ 377 - } 378 - 379 - I_u2u1s3(_addiu); 380 - I_u3u1u2(_addu); 381 - I_u2u1u3(_andi); 382 - I_u3u1u2(_and); 383 - I_u1u2s3(_beq); 384 - I_u1u2s3(_beql); 385 - I_u1s2(_bgez); 386 - I_u1s2(_bgezl); 387 - I_u1s2(_bltz); 388 - I_u1s2(_bltzl); 389 - I_u1u2s3(_bne); 390 - I_u1u2u3(_dmfc0); 391 - I_u1u2u3(_dmtc0); 392 - I_u2u1s3(_daddiu); 393 - I_u3u1u2(_daddu); 394 - I_u2u1u3(_dsll); 395 - I_u2u1u3(_dsll32); 396 - I_u2u1u3(_dsra); 397 - I_u2u1u3(_dsrl); 398 - I_u2u1u3(_dsrl32); 399 - I_u3u1u2(_dsubu); 400 - I_0(_eret); 401 - I_u1(_j); 402 - I_u1(_jal); 403 - I_u1(_jr); 404 - I_u2s3u1(_ld); 405 - I_u2s3u1(_ll); 406 - I_u2s3u1(_lld); 407 - I_u1s2(_lui); 408 - I_u2s3u1(_lw); 409 - I_u1u2u3(_mfc0); 410 - I_u1u2u3(_mtc0); 411 - I_u2u1u3(_ori); 412 - I_0(_rfe); 413 - I_u2s3u1(_sc); 414 - I_u2s3u1(_scd); 415 - I_u2s3u1(_sd); 416 - I_u2u1u3(_sll); 417 - I_u2u1u3(_sra); 418 - I_u2u1u3(_srl); 419 - I_u3u1u2(_subu); 420 - I_u2s3u1(_sw); 421 - I_0(_tlbp); 422 - I_0(_tlbwi); 423 - I_0(_tlbwr); 424 - I_u3u1u2(_xor) 425 - I_u2u1u3(_xori); 426 - 427 - /* 428 - * handling labels 429 - */ 430 - 70 + /* Handle labels (which must be positive integers). */ 431 71 enum label_id { 432 - label_invalid, 433 - label_second_part, 72 + label_second_part = 1, 434 73 label_leave, 435 74 #ifdef MODULE_START 436 75 label_module_alloc, ··· 84 447 label_r3000_write_probe_fail, 85 448 }; 86 449 87 - struct label { 88 - u32 *addr; 89 - enum label_id lab; 90 - }; 91 - 92 - static void __init build_label(struct label **lab, u32 *addr, 93 - enum label_id l) 94 - { 95 - (*lab)->addr = addr; 96 - (*lab)->lab = l; 97 - (*lab)++; 98 - } 99 - 100 - #define L_LA(lb) \ 101 - static inline void __init l##lb(struct label **lab, u32 *addr) \ 102 - { \ 103 - build_label(lab, addr, label##lb); \ 104 - } 105 - 106 - L_LA(_second_part) 107 - L_LA(_leave) 450 + UASM_L_LA(_second_part) 451 + UASM_L_LA(_leave) 108 452 #ifdef MODULE_START 109 - L_LA(_module_alloc) 453 + UASM_L_LA(_module_alloc) 110 454 #endif 111 - L_LA(_vmalloc) 112 - L_LA(_vmalloc_done) 113 - L_LA(_tlbw_hazard) 114 - L_LA(_split) 115 - L_LA(_nopage_tlbl) 116 - L_LA(_nopage_tlbs) 117 - L_LA(_nopage_tlbm) 118 - L_LA(_smp_pgtable_change) 119 - L_LA(_r3000_write_probe_fail) 120 - 121 - /* convenience macros for instructions */ 122 - #ifdef CONFIG_64BIT 123 - # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off) 124 - # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off) 125 - # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) 126 - # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) 127 - # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) 128 - # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd) 129 - # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd) 130 - # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) 131 - # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) 132 - # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) 133 - # define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off) 134 - # define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off) 135 - #else 136 - # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off) 137 - # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off) 138 - # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) 139 - # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) 140 - # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) 141 - # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd) 142 - # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd) 143 - # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) 144 - # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) 145 - # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) 146 - # define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off) 147 - # define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off) 148 - #endif 149 - 150 - #define i_b(buf, off) i_beq(buf, 0, 0, off) 151 - #define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off) 152 - #define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off) 153 - #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off) 154 - #define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off) 155 - #define i_move(buf, a, b) i_ADDU(buf, a, 0, b) 156 - #define i_nop(buf) i_sll(buf, 0, 0, 0) 157 - #define i_ssnop(buf) i_sll(buf, 0, 0, 1) 158 - #define i_ehb(buf) i_sll(buf, 0, 0, 3) 159 - 160 - static int __init __maybe_unused in_compat_space_p(long addr) 161 - { 162 - /* Is this address in 32bit compat space? */ 163 - #ifdef CONFIG_64BIT 164 - return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); 165 - #else 166 - return 1; 167 - #endif 168 - } 169 - 170 - static int __init __maybe_unused rel_highest(long val) 171 - { 172 - #ifdef CONFIG_64BIT 173 - return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 174 - #else 175 - return 0; 176 - #endif 177 - } 178 - 179 - static int __init __maybe_unused rel_higher(long val) 180 - { 181 - #ifdef CONFIG_64BIT 182 - return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 183 - #else 184 - return 0; 185 - #endif 186 - } 187 - 188 - static int __init rel_hi(long val) 189 - { 190 - return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 191 - } 192 - 193 - static int __init rel_lo(long val) 194 - { 195 - return ((val & 0xffff) ^ 0x8000) - 0x8000; 196 - } 197 - 198 - static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr) 199 - { 200 - if (!in_compat_space_p(addr)) { 201 - i_lui(buf, rs, rel_highest(addr)); 202 - if (rel_higher(addr)) 203 - i_daddiu(buf, rs, rs, rel_higher(addr)); 204 - if (rel_hi(addr)) { 205 - i_dsll(buf, rs, rs, 16); 206 - i_daddiu(buf, rs, rs, rel_hi(addr)); 207 - i_dsll(buf, rs, rs, 16); 208 - } else 209 - i_dsll32(buf, rs, rs, 0); 210 - } else 211 - i_lui(buf, rs, rel_hi(addr)); 212 - } 213 - 214 - static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr) 215 - { 216 - i_LA_mostly(buf, rs, addr); 217 - if (rel_lo(addr)) { 218 - if (!in_compat_space_p(addr)) 219 - i_daddiu(buf, rs, rs, rel_lo(addr)); 220 - else 221 - i_addiu(buf, rs, rs, rel_lo(addr)); 222 - } 223 - } 224 - 225 - /* 226 - * handle relocations 227 - */ 228 - 229 - struct reloc { 230 - u32 *addr; 231 - unsigned int type; 232 - enum label_id lab; 233 - }; 234 - 235 - static void __init r_mips_pc16(struct reloc **rel, u32 *addr, 236 - enum label_id l) 237 - { 238 - (*rel)->addr = addr; 239 - (*rel)->type = R_MIPS_PC16; 240 - (*rel)->lab = l; 241 - (*rel)++; 242 - } 243 - 244 - static inline void __resolve_relocs(struct reloc *rel, struct label *lab) 245 - { 246 - long laddr = (long)lab->addr; 247 - long raddr = (long)rel->addr; 248 - 249 - switch (rel->type) { 250 - case R_MIPS_PC16: 251 - *rel->addr |= build_bimm(laddr - (raddr + 4)); 252 - break; 253 - 254 - default: 255 - panic("Unsupported TLB synthesizer relocation %d", 256 - rel->type); 257 - } 258 - } 259 - 260 - static void __init resolve_relocs(struct reloc *rel, struct label *lab) 261 - { 262 - struct label *l; 263 - 264 - for (; rel->lab != label_invalid; rel++) 265 - for (l = lab; l->lab != label_invalid; l++) 266 - if (rel->lab == l->lab) 267 - __resolve_relocs(rel, l); 268 - } 269 - 270 - static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end, 271 - long off) 272 - { 273 - for (; rel->lab != label_invalid; rel++) 274 - if (rel->addr >= first && rel->addr < end) 275 - rel->addr += off; 276 - } 277 - 278 - static void __init move_labels(struct label *lab, u32 *first, u32 *end, 279 - long off) 280 - { 281 - for (; lab->lab != label_invalid; lab++) 282 - if (lab->addr >= first && lab->addr < end) 283 - lab->addr += off; 284 - } 285 - 286 - static void __init copy_handler(struct reloc *rel, struct label *lab, 287 - u32 *first, u32 *end, u32 *target) 288 - { 289 - long off = (long)(target - first); 290 - 291 - memcpy(target, first, (end - first) * sizeof(u32)); 292 - 293 - move_relocs(rel, first, end, off); 294 - move_labels(lab, first, end, off); 295 - } 296 - 297 - static int __init __maybe_unused insn_has_bdelay(struct reloc *rel, 298 - u32 *addr) 299 - { 300 - for (; rel->lab != label_invalid; rel++) { 301 - if (rel->addr == addr 302 - && (rel->type == R_MIPS_PC16 303 - || rel->type == R_MIPS_26)) 304 - return 1; 305 - } 306 - 307 - return 0; 308 - } 309 - 310 - /* convenience functions for labeled branches */ 311 - static void __init __maybe_unused 312 - il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) 313 - { 314 - r_mips_pc16(r, *p, l); 315 - i_bltz(p, reg, 0); 316 - } 317 - 318 - static void __init __maybe_unused il_b(u32 **p, struct reloc **r, 319 - enum label_id l) 320 - { 321 - r_mips_pc16(r, *p, l); 322 - i_b(p, 0); 323 - } 324 - 325 - static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg, 326 - enum label_id l) 327 - { 328 - r_mips_pc16(r, *p, l); 329 - i_beqz(p, reg, 0); 330 - } 331 - 332 - static void __init __maybe_unused 333 - il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) 334 - { 335 - r_mips_pc16(r, *p, l); 336 - i_beqzl(p, reg, 0); 337 - } 338 - 339 - static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg, 340 - enum label_id l) 341 - { 342 - r_mips_pc16(r, *p, l); 343 - i_bnez(p, reg, 0); 344 - } 345 - 346 - static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, 347 - enum label_id l) 348 - { 349 - r_mips_pc16(r, *p, l); 350 - i_bgezl(p, reg, 0); 351 - } 352 - 353 - static void __init __maybe_unused 354 - il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) 355 - { 356 - r_mips_pc16(r, *p, l); 357 - i_bgez(p, reg, 0); 358 - } 455 + UASM_L_LA(_vmalloc) 456 + UASM_L_LA(_vmalloc_done) 457 + UASM_L_LA(_tlbw_hazard) 458 + UASM_L_LA(_split) 459 + UASM_L_LA(_nopage_tlbl) 460 + UASM_L_LA(_nopage_tlbs) 461 + UASM_L_LA(_nopage_tlbm) 462 + UASM_L_LA(_smp_pgtable_change) 463 + UASM_L_LA(_r3000_write_probe_fail) 359 464 360 465 /* 361 466 * For debug purposes. ··· 131 752 #define C0_XCONTEXT 20, 0 132 753 133 754 #ifdef CONFIG_64BIT 134 - # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) 755 + # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 135 756 #else 136 - # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT) 757 + # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 137 758 #endif 138 759 139 760 /* The worst case length of the handler is around 18 instructions for ··· 147 768 static u32 tlb_handler[128] __initdata; 148 769 149 770 /* simply assume worst case size for labels and relocs */ 150 - static struct label labels[128] __initdata; 151 - static struct reloc relocs[128] __initdata; 771 + static struct uasm_label labels[128] __initdata; 772 + static struct uasm_reloc relocs[128] __initdata; 152 773 153 774 /* 154 775 * The R3000 TLB handler is simple. ··· 161 782 memset(tlb_handler, 0, sizeof(tlb_handler)); 162 783 p = tlb_handler; 163 784 164 - i_mfc0(&p, K0, C0_BADVADDR); 165 - i_lui(&p, K1, rel_hi(pgdc)); /* cp0 delay */ 166 - i_lw(&p, K1, rel_lo(pgdc), K1); 167 - i_srl(&p, K0, K0, 22); /* load delay */ 168 - i_sll(&p, K0, K0, 2); 169 - i_addu(&p, K1, K1, K0); 170 - i_mfc0(&p, K0, C0_CONTEXT); 171 - i_lw(&p, K1, 0, K1); /* cp0 delay */ 172 - i_andi(&p, K0, K0, 0xffc); /* load delay */ 173 - i_addu(&p, K1, K1, K0); 174 - i_lw(&p, K0, 0, K1); 175 - i_nop(&p); /* load delay */ 176 - i_mtc0(&p, K0, C0_ENTRYLO0); 177 - i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 178 - i_tlbwr(&p); /* cp0 delay */ 179 - i_jr(&p, K1); 180 - i_rfe(&p); /* branch delay */ 785 + uasm_i_mfc0(&p, K0, C0_BADVADDR); 786 + uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 787 + uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 788 + uasm_i_srl(&p, K0, K0, 22); /* load delay */ 789 + uasm_i_sll(&p, K0, K0, 2); 790 + uasm_i_addu(&p, K1, K1, K0); 791 + uasm_i_mfc0(&p, K0, C0_CONTEXT); 792 + uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 793 + uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 794 + uasm_i_addu(&p, K1, K1, K0); 795 + uasm_i_lw(&p, K0, 0, K1); 796 + uasm_i_nop(&p); /* load delay */ 797 + uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 798 + uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 799 + uasm_i_tlbwr(&p); /* cp0 delay */ 800 + uasm_i_jr(&p, K1); 801 + uasm_i_rfe(&p); /* branch delay */ 181 802 182 803 if (p > tlb_handler + 32) 183 804 panic("TLB refill handler space exceeded"); 184 805 185 - pr_info("Synthesized TLB refill handler (%u instructions).\n", 186 - (unsigned int)(p - tlb_handler)); 806 + pr_debug("Wrote TLB refill handler (%u instructions).\n", 807 + (unsigned int)(p - tlb_handler)); 187 808 188 809 memcpy((void *)ebase, tlb_handler, 0x80); 189 810 ··· 229 850 case CPU_R5000: 230 851 case CPU_R5000A: 231 852 case CPU_NEVADA: 232 - i_nop(p); 233 - i_tlbp(p); 853 + uasm_i_nop(p); 854 + uasm_i_tlbp(p); 234 855 break; 235 856 236 857 default: 237 - i_tlbp(p); 858 + uasm_i_tlbp(p); 238 859 break; 239 860 } 240 861 } ··· 245 866 */ 246 867 enum tlb_write_entry { tlb_random, tlb_indexed }; 247 868 248 - static void __init build_tlb_write_entry(u32 **p, struct label **l, 249 - struct reloc **r, 869 + static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l, 870 + struct uasm_reloc **r, 250 871 enum tlb_write_entry wmode) 251 872 { 252 873 void(*tlbw)(u32 **) = NULL; 253 874 254 875 switch (wmode) { 255 - case tlb_random: tlbw = i_tlbwr; break; 256 - case tlb_indexed: tlbw = i_tlbwi; break; 876 + case tlb_random: tlbw = uasm_i_tlbwr; break; 877 + case tlb_indexed: tlbw = uasm_i_tlbwi; break; 257 878 } 258 879 259 880 if (cpu_has_mips_r2) { 260 - i_ehb(p); 881 + uasm_i_ehb(p); 261 882 tlbw(p); 262 883 return; 263 884 } ··· 273 894 * This branch uses up a mtc0 hazard nop slot and saves 274 895 * two nops after the tlbw instruction. 275 896 */ 276 - il_bgezl(p, r, 0, label_tlbw_hazard); 897 + uasm_il_bgezl(p, r, 0, label_tlbw_hazard); 277 898 tlbw(p); 278 - l_tlbw_hazard(l, *p); 279 - i_nop(p); 899 + uasm_l_tlbw_hazard(l, *p); 900 + uasm_i_nop(p); 280 901 break; 281 902 282 903 case CPU_R4600: 283 904 case CPU_R4700: 284 905 case CPU_R5000: 285 906 case CPU_R5000A: 286 - i_nop(p); 907 + uasm_i_nop(p); 287 908 tlbw(p); 288 - i_nop(p); 909 + uasm_i_nop(p); 289 910 break; 290 911 291 912 case CPU_R4300: ··· 299 920 case CPU_AU1210: 300 921 case CPU_AU1250: 301 922 case CPU_PR4450: 302 - i_nop(p); 923 + uasm_i_nop(p); 303 924 tlbw(p); 304 925 break; 305 926 ··· 316 937 case CPU_BCM4710: 317 938 case CPU_LOONGSON2: 318 939 if (m4kc_tlbp_war()) 319 - i_nop(p); 940 + uasm_i_nop(p); 320 941 tlbw(p); 321 942 break; 322 943 323 944 case CPU_NEVADA: 324 - i_nop(p); /* QED specifies 2 nops hazard */ 945 + uasm_i_nop(p); /* QED specifies 2 nops hazard */ 325 946 /* 326 947 * This branch uses up a mtc0 hazard nop slot and saves 327 948 * a nop after the tlbw instruction. 328 949 */ 329 - il_bgezl(p, r, 0, label_tlbw_hazard); 950 + uasm_il_bgezl(p, r, 0, label_tlbw_hazard); 330 951 tlbw(p); 331 - l_tlbw_hazard(l, *p); 952 + uasm_l_tlbw_hazard(l, *p); 332 953 break; 333 954 334 955 case CPU_RM7000: 335 - i_nop(p); 336 - i_nop(p); 337 - i_nop(p); 338 - i_nop(p); 956 + uasm_i_nop(p); 957 + uasm_i_nop(p); 958 + uasm_i_nop(p); 959 + uasm_i_nop(p); 339 960 tlbw(p); 340 961 break; 341 962 ··· 346 967 * cpu cycles and use for data translations should not occur 347 968 * for 3 cpu cycles. 348 969 */ 349 - i_ssnop(p); 350 - i_ssnop(p); 351 - i_ssnop(p); 352 - i_ssnop(p); 970 + uasm_i_ssnop(p); 971 + uasm_i_ssnop(p); 972 + uasm_i_ssnop(p); 973 + uasm_i_ssnop(p); 353 974 tlbw(p); 354 - i_ssnop(p); 355 - i_ssnop(p); 356 - i_ssnop(p); 357 - i_ssnop(p); 975 + uasm_i_ssnop(p); 976 + uasm_i_ssnop(p); 977 + uasm_i_ssnop(p); 978 + uasm_i_ssnop(p); 358 979 break; 359 980 360 981 case CPU_VR4111: ··· 362 983 case CPU_VR4122: 363 984 case CPU_VR4181: 364 985 case CPU_VR4181A: 365 - i_nop(p); 366 - i_nop(p); 986 + uasm_i_nop(p); 987 + uasm_i_nop(p); 367 988 tlbw(p); 368 - i_nop(p); 369 - i_nop(p); 989 + uasm_i_nop(p); 990 + uasm_i_nop(p); 370 991 break; 371 992 372 993 case CPU_VR4131: 373 994 case CPU_VR4133: 374 995 case CPU_R5432: 375 - i_nop(p); 376 - i_nop(p); 996 + uasm_i_nop(p); 997 + uasm_i_nop(p); 377 998 tlbw(p); 378 999 break; 379 1000 ··· 390 1011 * TMP will be clobbered, PTR will hold the pmd entry. 391 1012 */ 392 1013 static void __init 393 - build_get_pmde64(u32 **p, struct label **l, struct reloc **r, 1014 + build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 394 1015 unsigned int tmp, unsigned int ptr) 395 1016 { 396 1017 long pgdc = (long)pgd_current; ··· 398 1019 /* 399 1020 * The vmalloc handling is not in the hotpath. 400 1021 */ 401 - i_dmfc0(p, tmp, C0_BADVADDR); 1022 + uasm_i_dmfc0(p, tmp, C0_BADVADDR); 402 1023 #ifdef MODULE_START 403 - il_bltz(p, r, tmp, label_module_alloc); 1024 + uasm_il_bltz(p, r, tmp, label_module_alloc); 404 1025 #else 405 - il_bltz(p, r, tmp, label_vmalloc); 1026 + uasm_il_bltz(p, r, tmp, label_vmalloc); 406 1027 #endif 407 - /* No i_nop needed here, since the next insn doesn't touch TMP. */ 1028 + /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 408 1029 409 1030 #ifdef CONFIG_SMP 410 1031 # ifdef CONFIG_MIPS_MT_SMTC 411 1032 /* 412 1033 * SMTC uses TCBind value as "CPU" index 413 1034 */ 414 - i_mfc0(p, ptr, C0_TCBIND); 415 - i_dsrl(p, ptr, ptr, 19); 1035 + uasm_i_mfc0(p, ptr, C0_TCBIND); 1036 + uasm_i_dsrl(p, ptr, ptr, 19); 416 1037 # else 417 1038 /* 418 1039 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 419 1040 * stored in CONTEXT. 420 1041 */ 421 - i_dmfc0(p, ptr, C0_CONTEXT); 422 - i_dsrl(p, ptr, ptr, 23); 1042 + uasm_i_dmfc0(p, ptr, C0_CONTEXT); 1043 + uasm_i_dsrl(p, ptr, ptr, 23); 423 1044 #endif 424 - i_LA_mostly(p, tmp, pgdc); 425 - i_daddu(p, ptr, ptr, tmp); 426 - i_dmfc0(p, tmp, C0_BADVADDR); 427 - i_ld(p, ptr, rel_lo(pgdc), ptr); 1045 + UASM_i_LA_mostly(p, tmp, pgdc); 1046 + uasm_i_daddu(p, ptr, ptr, tmp); 1047 + uasm_i_dmfc0(p, tmp, C0_BADVADDR); 1048 + uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 428 1049 #else 429 - i_LA_mostly(p, ptr, pgdc); 430 - i_ld(p, ptr, rel_lo(pgdc), ptr); 1050 + UASM_i_LA_mostly(p, ptr, pgdc); 1051 + uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 431 1052 #endif 432 1053 433 - l_vmalloc_done(l, *p); 1054 + uasm_l_vmalloc_done(l, *p); 434 1055 435 1056 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ 436 - i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); 1057 + uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); 437 1058 else 438 - i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); 1059 + uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); 439 1060 440 - i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 441 - i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 442 - i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 443 - i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 444 - i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 445 - i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 446 - i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 1061 + uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 1062 + uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 1063 + uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 1064 + uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 1065 + uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 1066 + uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 1067 + uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 447 1068 } 448 1069 449 1070 /* ··· 451 1072 * PTR will hold the pgd for vmalloc. 452 1073 */ 453 1074 static void __init 454 - build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, 1075 + build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 455 1076 unsigned int bvaddr, unsigned int ptr) 456 1077 { 457 1078 long swpd = (long)swapper_pg_dir; ··· 459 1080 #ifdef MODULE_START 460 1081 long modd = (long)module_pg_dir; 461 1082 462 - l_module_alloc(l, *p); 1083 + uasm_l_module_alloc(l, *p); 463 1084 /* 464 1085 * Assumption: 465 1086 * VMALLOC_START >= 0xc000000000000000UL 466 1087 * MODULE_START >= 0xe000000000000000UL 467 1088 */ 468 - i_SLL(p, ptr, bvaddr, 2); 469 - il_bgez(p, r, ptr, label_vmalloc); 1089 + UASM_i_SLL(p, ptr, bvaddr, 2); 1090 + uasm_il_bgez(p, r, ptr, label_vmalloc); 470 1091 471 - if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) { 472 - i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */ 1092 + if (uasm_in_compat_space_p(MODULE_START) && 1093 + !uasm_rel_lo(MODULE_START)) { 1094 + uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */ 473 1095 } else { 474 1096 /* unlikely configuration */ 475 - i_nop(p); /* delay slot */ 476 - i_LA(p, ptr, MODULE_START); 1097 + uasm_i_nop(p); /* delay slot */ 1098 + UASM_i_LA(p, ptr, MODULE_START); 477 1099 } 478 - i_dsubu(p, bvaddr, bvaddr, ptr); 1100 + uasm_i_dsubu(p, bvaddr, bvaddr, ptr); 479 1101 480 - if (in_compat_space_p(modd) && !rel_lo(modd)) { 481 - il_b(p, r, label_vmalloc_done); 482 - i_lui(p, ptr, rel_hi(modd)); 1102 + if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) { 1103 + uasm_il_b(p, r, label_vmalloc_done); 1104 + uasm_i_lui(p, ptr, uasm_rel_hi(modd)); 483 1105 } else { 484 - i_LA_mostly(p, ptr, modd); 485 - il_b(p, r, label_vmalloc_done); 486 - if (in_compat_space_p(modd)) 487 - i_addiu(p, ptr, ptr, rel_lo(modd)); 1106 + UASM_i_LA_mostly(p, ptr, modd); 1107 + uasm_il_b(p, r, label_vmalloc_done); 1108 + if (uasm_in_compat_space_p(modd)) 1109 + uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd)); 488 1110 else 489 - i_daddiu(p, ptr, ptr, rel_lo(modd)); 1111 + uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd)); 490 1112 } 491 1113 492 - l_vmalloc(l, *p); 493 - if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) && 1114 + uasm_l_vmalloc(l, *p); 1115 + if (uasm_in_compat_space_p(MODULE_START) && 1116 + !uasm_rel_lo(MODULE_START) && 494 1117 MODULE_START << 32 == VMALLOC_START) 495 - i_dsll32(p, ptr, ptr, 0); /* typical case */ 1118 + uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */ 496 1119 else 497 - i_LA(p, ptr, VMALLOC_START); 1120 + UASM_i_LA(p, ptr, VMALLOC_START); 498 1121 #else 499 - l_vmalloc(l, *p); 500 - i_LA(p, ptr, VMALLOC_START); 1122 + uasm_l_vmalloc(l, *p); 1123 + UASM_i_LA(p, ptr, VMALLOC_START); 501 1124 #endif 502 - i_dsubu(p, bvaddr, bvaddr, ptr); 1125 + uasm_i_dsubu(p, bvaddr, bvaddr, ptr); 503 1126 504 - if (in_compat_space_p(swpd) && !rel_lo(swpd)) { 505 - il_b(p, r, label_vmalloc_done); 506 - i_lui(p, ptr, rel_hi(swpd)); 1127 + if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 1128 + uasm_il_b(p, r, label_vmalloc_done); 1129 + uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 507 1130 } else { 508 - i_LA_mostly(p, ptr, swpd); 509 - il_b(p, r, label_vmalloc_done); 510 - if (in_compat_space_p(swpd)) 511 - i_addiu(p, ptr, ptr, rel_lo(swpd)); 1131 + UASM_i_LA_mostly(p, ptr, swpd); 1132 + uasm_il_b(p, r, label_vmalloc_done); 1133 + if (uasm_in_compat_space_p(swpd)) 1134 + uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 512 1135 else 513 - i_daddiu(p, ptr, ptr, rel_lo(swpd)); 1136 + uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 514 1137 } 515 1138 } 516 1139 ··· 533 1152 /* 534 1153 * SMTC uses TCBind value as "CPU" index 535 1154 */ 536 - i_mfc0(p, ptr, C0_TCBIND); 537 - i_LA_mostly(p, tmp, pgdc); 538 - i_srl(p, ptr, ptr, 19); 1155 + uasm_i_mfc0(p, ptr, C0_TCBIND); 1156 + UASM_i_LA_mostly(p, tmp, pgdc); 1157 + uasm_i_srl(p, ptr, ptr, 19); 539 1158 #else 540 1159 /* 541 1160 * smp_processor_id() << 3 is stored in CONTEXT. 542 1161 */ 543 - i_mfc0(p, ptr, C0_CONTEXT); 544 - i_LA_mostly(p, tmp, pgdc); 545 - i_srl(p, ptr, ptr, 23); 1162 + uasm_i_mfc0(p, ptr, C0_CONTEXT); 1163 + UASM_i_LA_mostly(p, tmp, pgdc); 1164 + uasm_i_srl(p, ptr, ptr, 23); 546 1165 #endif 547 - i_addu(p, ptr, tmp, ptr); 1166 + uasm_i_addu(p, ptr, tmp, ptr); 548 1167 #else 549 - i_LA_mostly(p, ptr, pgdc); 1168 + UASM_i_LA_mostly(p, ptr, pgdc); 550 1169 #endif 551 - i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 552 - i_lw(p, ptr, rel_lo(pgdc), ptr); 553 - i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 554 - i_sll(p, tmp, tmp, PGD_T_LOG2); 555 - i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 1170 + uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 1171 + uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1172 + uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 1173 + uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 1174 + uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 556 1175 } 557 1176 558 1177 #endif /* !CONFIG_64BIT */ ··· 579 1198 } 580 1199 581 1200 if (shift) 582 - i_SRL(p, ctx, ctx, shift); 583 - i_andi(p, ctx, ctx, mask); 1201 + UASM_i_SRL(p, ctx, ctx, shift); 1202 + uasm_i_andi(p, ctx, ctx, mask); 584 1203 } 585 1204 586 1205 static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) ··· 594 1213 */ 595 1214 switch (current_cpu_type()) { 596 1215 case CPU_NEVADA: 597 - i_LW(p, ptr, 0, ptr); 1216 + UASM_i_LW(p, ptr, 0, ptr); 598 1217 GET_CONTEXT(p, tmp); /* get context reg */ 599 1218 break; 600 1219 601 1220 default: 602 1221 GET_CONTEXT(p, tmp); /* get context reg */ 603 - i_LW(p, ptr, 0, ptr); 1222 + UASM_i_LW(p, ptr, 0, ptr); 604 1223 break; 605 1224 } 606 1225 607 1226 build_adjust_context(p, tmp); 608 - i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1227 + UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 609 1228 } 610 1229 611 1230 static void __init build_update_entries(u32 **p, unsigned int tmp, ··· 617 1236 */ 618 1237 #ifdef CONFIG_64BIT_PHYS_ADDR 619 1238 if (cpu_has_64bits) { 620 - i_ld(p, tmp, 0, ptep); /* get even pte */ 621 - i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 622 - i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 623 - i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 624 - i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 625 - i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 1239 + uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 1240 + uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1241 + uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 1242 + uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 1243 + uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 1244 + uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 626 1245 } else { 627 1246 int pte_off_even = sizeof(pte_t) / 2; 628 1247 int pte_off_odd = pte_off_even + sizeof(pte_t); 629 1248 630 1249 /* The pte entries are pre-shifted */ 631 - i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 632 - i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 633 - i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 634 - i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 1250 + uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 1251 + uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 1252 + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 1253 + uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 635 1254 } 636 1255 #else 637 - i_LW(p, tmp, 0, ptep); /* get even pte */ 638 - i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1256 + UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 1257 + UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 639 1258 if (r45k_bvahwbug()) 640 1259 build_tlb_probe_entry(p); 641 - i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 1260 + UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 642 1261 if (r4k_250MHZhwbug()) 643 - i_mtc0(p, 0, C0_ENTRYLO0); 644 - i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 645 - i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 1262 + uasm_i_mtc0(p, 0, C0_ENTRYLO0); 1263 + uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 1264 + UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 646 1265 if (r45k_bvahwbug()) 647 - i_mfc0(p, tmp, C0_INDEX); 1266 + uasm_i_mfc0(p, tmp, C0_INDEX); 648 1267 if (r4k_250MHZhwbug()) 649 - i_mtc0(p, 0, C0_ENTRYLO1); 650 - i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 1268 + uasm_i_mtc0(p, 0, C0_ENTRYLO1); 1269 + uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 651 1270 #endif 652 1271 } 653 1272 654 1273 static void __init build_r4000_tlb_refill_handler(void) 655 1274 { 656 1275 u32 *p = tlb_handler; 657 - struct label *l = labels; 658 - struct reloc *r = relocs; 1276 + struct uasm_label *l = labels; 1277 + struct uasm_reloc *r = relocs; 659 1278 u32 *f; 660 1279 unsigned int final_len; 661 1280 ··· 668 1287 * create the plain linear handler 669 1288 */ 670 1289 if (bcm1250_m3_war()) { 671 - i_MFC0(&p, K0, C0_BADVADDR); 672 - i_MFC0(&p, K1, C0_ENTRYHI); 673 - i_xor(&p, K0, K0, K1); 674 - i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 675 - il_bnez(&p, &r, K0, label_leave); 676 - /* No need for i_nop */ 1290 + UASM_i_MFC0(&p, K0, C0_BADVADDR); 1291 + UASM_i_MFC0(&p, K1, C0_ENTRYHI); 1292 + uasm_i_xor(&p, K0, K0, K1); 1293 + UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1294 + uasm_il_bnez(&p, &r, K0, label_leave); 1295 + /* No need for uasm_i_nop */ 677 1296 } 678 1297 679 1298 #ifdef CONFIG_64BIT ··· 685 1304 build_get_ptep(&p, K0, K1); 686 1305 build_update_entries(&p, K0, K1); 687 1306 build_tlb_write_entry(&p, &l, &r, tlb_random); 688 - l_leave(&l, p); 689 - i_eret(&p); /* return from trap */ 1307 + uasm_l_leave(&l, p); 1308 + uasm_i_eret(&p); /* return from trap */ 690 1309 691 1310 #ifdef CONFIG_64BIT 692 1311 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); ··· 706 1325 #else 707 1326 if (((p - tlb_handler) > 63) 708 1327 || (((p - tlb_handler) > 61) 709 - && insn_has_bdelay(relocs, tlb_handler + 29))) 1328 + && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) 710 1329 panic("TLB refill handler space exceeded"); 711 1330 #endif 712 1331 ··· 716 1335 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 717 1336 f = final_handler; 718 1337 /* Simplest case, just copy the handler. */ 719 - copy_handler(relocs, labels, tlb_handler, p, f); 1338 + uasm_copy_handler(relocs, labels, tlb_handler, p, f); 720 1339 final_len = p - tlb_handler; 721 1340 #else /* CONFIG_64BIT */ 722 1341 f = final_handler + 32; 723 1342 if ((p - tlb_handler) <= 32) { 724 1343 /* Just copy the handler. */ 725 - copy_handler(relocs, labels, tlb_handler, p, f); 1344 + uasm_copy_handler(relocs, labels, tlb_handler, p, f); 726 1345 final_len = p - tlb_handler; 727 1346 } else { 728 1347 u32 *split = tlb_handler + 30; ··· 730 1349 /* 731 1350 * Find the split point. 732 1351 */ 733 - if (insn_has_bdelay(relocs, split - 1)) 1352 + if (uasm_insn_has_bdelay(relocs, split - 1)) 734 1353 split--; 735 1354 736 1355 /* Copy first part of the handler. */ 737 - copy_handler(relocs, labels, tlb_handler, split, f); 1356 + uasm_copy_handler(relocs, labels, tlb_handler, split, f); 738 1357 f += split - tlb_handler; 739 1358 740 1359 /* Insert branch. */ 741 - l_split(&l, final_handler); 742 - il_b(&f, &r, label_split); 743 - if (insn_has_bdelay(relocs, split)) 744 - i_nop(&f); 1360 + uasm_l_split(&l, final_handler); 1361 + uasm_il_b(&f, &r, label_split); 1362 + if (uasm_insn_has_bdelay(relocs, split)) 1363 + uasm_i_nop(&f); 745 1364 else { 746 - copy_handler(relocs, labels, split, split + 1, f); 747 - move_labels(labels, f, f + 1, -1); 1365 + uasm_copy_handler(relocs, labels, split, split + 1, f); 1366 + uasm_move_labels(labels, f, f + 1, -1); 748 1367 f++; 749 1368 split++; 750 1369 } 751 1370 752 1371 /* Copy the rest of the handler. */ 753 - copy_handler(relocs, labels, split, p, final_handler); 1372 + uasm_copy_handler(relocs, labels, split, p, final_handler); 754 1373 final_len = (f - (final_handler + 32)) + (p - split); 755 1374 } 756 1375 #endif /* CONFIG_64BIT */ 757 1376 758 - resolve_relocs(relocs, labels); 759 - pr_info("Synthesized TLB refill handler (%u instructions).\n", 760 - final_len); 1377 + uasm_resolve_relocs(relocs, labels); 1378 + pr_debug("Wrote TLB refill handler (%u instructions).\n", 1379 + final_len); 761 1380 762 1381 memcpy((void *)ebase, final_handler, 0x100); 763 1382 ··· 784 1403 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 785 1404 786 1405 static void __init 787 - iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr) 1406 + iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) 788 1407 { 789 1408 #ifdef CONFIG_SMP 790 1409 # ifdef CONFIG_64BIT_PHYS_ADDR 791 1410 if (cpu_has_64bits) 792 - i_lld(p, pte, 0, ptr); 1411 + uasm_i_lld(p, pte, 0, ptr); 793 1412 else 794 1413 # endif 795 - i_LL(p, pte, 0, ptr); 1414 + UASM_i_LL(p, pte, 0, ptr); 796 1415 #else 797 1416 # ifdef CONFIG_64BIT_PHYS_ADDR 798 1417 if (cpu_has_64bits) 799 - i_ld(p, pte, 0, ptr); 1418 + uasm_i_ld(p, pte, 0, ptr); 800 1419 else 801 1420 # endif 802 - i_LW(p, pte, 0, ptr); 1421 + UASM_i_LW(p, pte, 0, ptr); 803 1422 #endif 804 1423 } 805 1424 806 1425 static void __init 807 - iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr, 1426 + iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 808 1427 unsigned int mode) 809 1428 { 810 1429 #ifdef CONFIG_64BIT_PHYS_ADDR 811 1430 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 812 1431 #endif 813 1432 814 - i_ori(p, pte, pte, mode); 1433 + uasm_i_ori(p, pte, pte, mode); 815 1434 #ifdef CONFIG_SMP 816 1435 # ifdef CONFIG_64BIT_PHYS_ADDR 817 1436 if (cpu_has_64bits) 818 - i_scd(p, pte, 0, ptr); 1437 + uasm_i_scd(p, pte, 0, ptr); 819 1438 else 820 1439 # endif 821 - i_SC(p, pte, 0, ptr); 1440 + UASM_i_SC(p, pte, 0, ptr); 822 1441 823 1442 if (r10000_llsc_war()) 824 - il_beqzl(p, r, pte, label_smp_pgtable_change); 1443 + uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 825 1444 else 826 - il_beqz(p, r, pte, label_smp_pgtable_change); 1445 + uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 827 1446 828 1447 # ifdef CONFIG_64BIT_PHYS_ADDR 829 1448 if (!cpu_has_64bits) { 830 - /* no i_nop needed */ 831 - i_ll(p, pte, sizeof(pte_t) / 2, ptr); 832 - i_ori(p, pte, pte, hwmode); 833 - i_sc(p, pte, sizeof(pte_t) / 2, ptr); 834 - il_beqz(p, r, pte, label_smp_pgtable_change); 835 - /* no i_nop needed */ 836 - i_lw(p, pte, 0, ptr); 1449 + /* no uasm_i_nop needed */ 1450 + uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1451 + uasm_i_ori(p, pte, pte, hwmode); 1452 + uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 1453 + uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1454 + /* no uasm_i_nop needed */ 1455 + uasm_i_lw(p, pte, 0, ptr); 837 1456 } else 838 - i_nop(p); 1457 + uasm_i_nop(p); 839 1458 # else 840 - i_nop(p); 1459 + uasm_i_nop(p); 841 1460 # endif 842 1461 #else 843 1462 # ifdef CONFIG_64BIT_PHYS_ADDR 844 1463 if (cpu_has_64bits) 845 - i_sd(p, pte, 0, ptr); 1464 + uasm_i_sd(p, pte, 0, ptr); 846 1465 else 847 1466 # endif 848 - i_SW(p, pte, 0, ptr); 1467 + UASM_i_SW(p, pte, 0, ptr); 849 1468 850 1469 # ifdef CONFIG_64BIT_PHYS_ADDR 851 1470 if (!cpu_has_64bits) { 852 - i_lw(p, pte, sizeof(pte_t) / 2, ptr); 853 - i_ori(p, pte, pte, hwmode); 854 - i_sw(p, pte, sizeof(pte_t) / 2, ptr); 855 - i_lw(p, pte, 0, ptr); 1471 + uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1472 + uasm_i_ori(p, pte, pte, hwmode); 1473 + uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 1474 + uasm_i_lw(p, pte, 0, ptr); 856 1475 } 857 1476 # endif 858 1477 #endif ··· 864 1483 * with it's original value. 865 1484 */ 866 1485 static void __init 867 - build_pte_present(u32 **p, struct label **l, struct reloc **r, 1486 + build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 868 1487 unsigned int pte, unsigned int ptr, enum label_id lid) 869 1488 { 870 - i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 871 - i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 872 - il_bnez(p, r, pte, lid); 1489 + uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1490 + uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1491 + uasm_il_bnez(p, r, pte, lid); 873 1492 iPTE_LW(p, l, pte, ptr); 874 1493 } 875 1494 876 1495 /* Make PTE valid, store result in PTR. */ 877 1496 static void __init 878 - build_make_valid(u32 **p, struct reloc **r, unsigned int pte, 1497 + build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 879 1498 unsigned int ptr) 880 1499 { 881 1500 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; ··· 888 1507 * restore PTE with value from PTR when done. 889 1508 */ 890 1509 static void __init 891 - build_pte_writable(u32 **p, struct label **l, struct reloc **r, 1510 + build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 892 1511 unsigned int pte, unsigned int ptr, enum label_id lid) 893 1512 { 894 - i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 895 - i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 896 - il_bnez(p, r, pte, lid); 1513 + uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1514 + uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1515 + uasm_il_bnez(p, r, pte, lid); 897 1516 iPTE_LW(p, l, pte, ptr); 898 1517 } 899 1518 ··· 901 1520 * at PTR. 902 1521 */ 903 1522 static void __init 904 - build_make_write(u32 **p, struct reloc **r, unsigned int pte, 1523 + build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 905 1524 unsigned int ptr) 906 1525 { 907 1526 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID ··· 915 1534 * restore PTE with value from PTR when done. 916 1535 */ 917 1536 static void __init 918 - build_pte_modifiable(u32 **p, struct label **l, struct reloc **r, 1537 + build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 919 1538 unsigned int pte, unsigned int ptr, enum label_id lid) 920 1539 { 921 - i_andi(p, pte, pte, _PAGE_WRITE); 922 - il_beqz(p, r, pte, lid); 1540 + uasm_i_andi(p, pte, pte, _PAGE_WRITE); 1541 + uasm_il_beqz(p, r, pte, lid); 923 1542 iPTE_LW(p, l, pte, ptr); 924 1543 } 925 1544 ··· 934 1553 static void __init 935 1554 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 936 1555 { 937 - i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 938 - i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 939 - i_tlbwi(p); 940 - i_jr(p, tmp); 941 - i_rfe(p); /* branch delay */ 1556 + uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1557 + uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 1558 + uasm_i_tlbwi(p); 1559 + uasm_i_jr(p, tmp); 1560 + uasm_i_rfe(p); /* branch delay */ 942 1561 } 943 1562 944 1563 /* ··· 948 1567 * kseg2 access, i.e. without refill. Then it returns. 949 1568 */ 950 1569 static void __init 951 - build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r, 952 - unsigned int pte, unsigned int tmp) 1570 + build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1571 + struct uasm_reloc **r, unsigned int pte, 1572 + unsigned int tmp) 953 1573 { 954 - i_mfc0(p, tmp, C0_INDEX); 955 - i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 956 - il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 957 - i_mfc0(p, tmp, C0_EPC); /* branch delay */ 958 - i_tlbwi(p); /* cp0 delay */ 959 - i_jr(p, tmp); 960 - i_rfe(p); /* branch delay */ 961 - l_r3000_write_probe_fail(l, *p); 962 - i_tlbwr(p); /* cp0 delay */ 963 - i_jr(p, tmp); 964 - i_rfe(p); /* branch delay */ 1574 + uasm_i_mfc0(p, tmp, C0_INDEX); 1575 + uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1576 + uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 1577 + uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 1578 + uasm_i_tlbwi(p); /* cp0 delay */ 1579 + uasm_i_jr(p, tmp); 1580 + uasm_i_rfe(p); /* branch delay */ 1581 + uasm_l_r3000_write_probe_fail(l, *p); 1582 + uasm_i_tlbwr(p); /* cp0 delay */ 1583 + uasm_i_jr(p, tmp); 1584 + uasm_i_rfe(p); /* branch delay */ 965 1585 } 966 1586 967 1587 static void __init ··· 971 1589 { 972 1590 long pgdc = (long)pgd_current; 973 1591 974 - i_mfc0(p, pte, C0_BADVADDR); 975 - i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */ 976 - i_lw(p, ptr, rel_lo(pgdc), ptr); 977 - i_srl(p, pte, pte, 22); /* load delay */ 978 - i_sll(p, pte, pte, 2); 979 - i_addu(p, ptr, ptr, pte); 980 - i_mfc0(p, pte, C0_CONTEXT); 981 - i_lw(p, ptr, 0, ptr); /* cp0 delay */ 982 - i_andi(p, pte, pte, 0xffc); /* load delay */ 983 - i_addu(p, ptr, ptr, pte); 984 - i_lw(p, pte, 0, ptr); 985 - i_tlbp(p); /* load delay */ 1592 + uasm_i_mfc0(p, pte, C0_BADVADDR); 1593 + uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 1594 + uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 1595 + uasm_i_srl(p, pte, pte, 22); /* load delay */ 1596 + uasm_i_sll(p, pte, pte, 2); 1597 + uasm_i_addu(p, ptr, ptr, pte); 1598 + uasm_i_mfc0(p, pte, C0_CONTEXT); 1599 + uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 1600 + uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 1601 + uasm_i_addu(p, ptr, ptr, pte); 1602 + uasm_i_lw(p, pte, 0, ptr); 1603 + uasm_i_tlbp(p); /* load delay */ 986 1604 } 987 1605 988 1606 static void __init build_r3000_tlb_load_handler(void) 989 1607 { 990 1608 u32 *p = handle_tlbl; 991 - struct label *l = labels; 992 - struct reloc *r = relocs; 1609 + struct uasm_label *l = labels; 1610 + struct uasm_reloc *r = relocs; 993 1611 994 1612 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 995 1613 memset(labels, 0, sizeof(labels)); ··· 997 1615 998 1616 build_r3000_tlbchange_handler_head(&p, K0, K1); 999 1617 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1000 - i_nop(&p); /* load delay */ 1618 + uasm_i_nop(&p); /* load delay */ 1001 1619 build_make_valid(&p, &r, K0, K1); 1002 1620 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1003 1621 1004 - l_nopage_tlbl(&l, p); 1005 - i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1006 - i_nop(&p); 1622 + uasm_l_nopage_tlbl(&l, p); 1623 + uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1624 + uasm_i_nop(&p); 1007 1625 1008 1626 if ((p - handle_tlbl) > FASTPATH_SIZE) 1009 1627 panic("TLB load handler fastpath space exceeded"); 1010 1628 1011 - resolve_relocs(relocs, labels); 1012 - pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", 1013 - (unsigned int)(p - handle_tlbl)); 1629 + uasm_resolve_relocs(relocs, labels); 1630 + pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1631 + (unsigned int)(p - handle_tlbl)); 1014 1632 1015 1633 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1016 1634 } ··· 1018 1636 static void __init build_r3000_tlb_store_handler(void) 1019 1637 { 1020 1638 u32 *p = handle_tlbs; 1021 - struct label *l = labels; 1022 - struct reloc *r = relocs; 1639 + struct uasm_label *l = labels; 1640 + struct uasm_reloc *r = relocs; 1023 1641 1024 1642 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 1025 1643 memset(labels, 0, sizeof(labels)); ··· 1027 1645 1028 1646 build_r3000_tlbchange_handler_head(&p, K0, K1); 1029 1647 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1030 - i_nop(&p); /* load delay */ 1648 + uasm_i_nop(&p); /* load delay */ 1031 1649 build_make_write(&p, &r, K0, K1); 1032 1650 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1033 1651 1034 - l_nopage_tlbs(&l, p); 1035 - i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1036 - i_nop(&p); 1652 + uasm_l_nopage_tlbs(&l, p); 1653 + uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1654 + uasm_i_nop(&p); 1037 1655 1038 1656 if ((p - handle_tlbs) > FASTPATH_SIZE) 1039 1657 panic("TLB store handler fastpath space exceeded"); 1040 1658 1041 - resolve_relocs(relocs, labels); 1042 - pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", 1043 - (unsigned int)(p - handle_tlbs)); 1659 + uasm_resolve_relocs(relocs, labels); 1660 + pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1661 + (unsigned int)(p - handle_tlbs)); 1044 1662 1045 1663 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1046 1664 } ··· 1048 1666 static void __init build_r3000_tlb_modify_handler(void) 1049 1667 { 1050 1668 u32 *p = handle_tlbm; 1051 - struct label *l = labels; 1052 - struct reloc *r = relocs; 1669 + struct uasm_label *l = labels; 1670 + struct uasm_reloc *r = relocs; 1053 1671 1054 1672 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1055 1673 memset(labels, 0, sizeof(labels)); ··· 1057 1675 1058 1676 build_r3000_tlbchange_handler_head(&p, K0, K1); 1059 1677 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1060 - i_nop(&p); /* load delay */ 1678 + uasm_i_nop(&p); /* load delay */ 1061 1679 build_make_write(&p, &r, K0, K1); 1062 1680 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1063 1681 1064 - l_nopage_tlbm(&l, p); 1065 - i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1066 - i_nop(&p); 1682 + uasm_l_nopage_tlbm(&l, p); 1683 + uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1684 + uasm_i_nop(&p); 1067 1685 1068 1686 if ((p - handle_tlbm) > FASTPATH_SIZE) 1069 1687 panic("TLB modify handler fastpath space exceeded"); 1070 1688 1071 - resolve_relocs(relocs, labels); 1072 - pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", 1073 - (unsigned int)(p - handle_tlbm)); 1689 + uasm_resolve_relocs(relocs, labels); 1690 + pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1691 + (unsigned int)(p - handle_tlbm)); 1074 1692 1075 1693 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1076 1694 } ··· 1079 1697 * R4000 style TLB load/store/modify handlers. 1080 1698 */ 1081 1699 static void __init 1082 - build_r4000_tlbchange_handler_head(u32 **p, struct label **l, 1083 - struct reloc **r, unsigned int pte, 1700 + build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1701 + struct uasm_reloc **r, unsigned int pte, 1084 1702 unsigned int ptr) 1085 1703 { 1086 1704 #ifdef CONFIG_64BIT ··· 1089 1707 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1090 1708 #endif 1091 1709 1092 - i_MFC0(p, pte, C0_BADVADDR); 1093 - i_LW(p, ptr, 0, ptr); 1094 - i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1095 - i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1096 - i_ADDU(p, ptr, ptr, pte); 1710 + UASM_i_MFC0(p, pte, C0_BADVADDR); 1711 + UASM_i_LW(p, ptr, 0, ptr); 1712 + UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1713 + uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1714 + UASM_i_ADDU(p, ptr, ptr, pte); 1097 1715 1098 1716 #ifdef CONFIG_SMP 1099 - l_smp_pgtable_change(l, *p); 1100 - # endif 1717 + uasm_l_smp_pgtable_change(l, *p); 1718 + #endif 1101 1719 iPTE_LW(p, l, pte, ptr); /* get even pte */ 1102 1720 if (!m4kc_tlbp_war()) 1103 1721 build_tlb_probe_entry(p); 1104 1722 } 1105 1723 1106 1724 static void __init 1107 - build_r4000_tlbchange_handler_tail(u32 **p, struct label **l, 1108 - struct reloc **r, unsigned int tmp, 1725 + build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1726 + struct uasm_reloc **r, unsigned int tmp, 1109 1727 unsigned int ptr) 1110 1728 { 1111 - i_ori(p, ptr, ptr, sizeof(pte_t)); 1112 - i_xori(p, ptr, ptr, sizeof(pte_t)); 1729 + uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 1730 + uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 1113 1731 build_update_entries(p, tmp, ptr); 1114 1732 build_tlb_write_entry(p, l, r, tlb_indexed); 1115 - l_leave(l, *p); 1116 - i_eret(p); /* return from trap */ 1733 + uasm_l_leave(l, *p); 1734 + uasm_i_eret(p); /* return from trap */ 1117 1735 1118 1736 #ifdef CONFIG_64BIT 1119 1737 build_get_pgd_vmalloc64(p, l, r, tmp, ptr); ··· 1123 1741 static void __init build_r4000_tlb_load_handler(void) 1124 1742 { 1125 1743 u32 *p = handle_tlbl; 1126 - struct label *l = labels; 1127 - struct reloc *r = relocs; 1744 + struct uasm_label *l = labels; 1745 + struct uasm_reloc *r = relocs; 1128 1746 1129 1747 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 1130 1748 memset(labels, 0, sizeof(labels)); 1131 1749 memset(relocs, 0, sizeof(relocs)); 1132 1750 1133 1751 if (bcm1250_m3_war()) { 1134 - i_MFC0(&p, K0, C0_BADVADDR); 1135 - i_MFC0(&p, K1, C0_ENTRYHI); 1136 - i_xor(&p, K0, K0, K1); 1137 - i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1138 - il_bnez(&p, &r, K0, label_leave); 1139 - /* No need for i_nop */ 1752 + UASM_i_MFC0(&p, K0, C0_BADVADDR); 1753 + UASM_i_MFC0(&p, K1, C0_ENTRYHI); 1754 + uasm_i_xor(&p, K0, K0, K1); 1755 + UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1756 + uasm_il_bnez(&p, &r, K0, label_leave); 1757 + /* No need for uasm_i_nop */ 1140 1758 } 1141 1759 1142 1760 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); ··· 1146 1764 build_make_valid(&p, &r, K0, K1); 1147 1765 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1148 1766 1149 - l_nopage_tlbl(&l, p); 1150 - i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1151 - i_nop(&p); 1767 + uasm_l_nopage_tlbl(&l, p); 1768 + uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1769 + uasm_i_nop(&p); 1152 1770 1153 1771 if ((p - handle_tlbl) > FASTPATH_SIZE) 1154 1772 panic("TLB load handler fastpath space exceeded"); 1155 1773 1156 - resolve_relocs(relocs, labels); 1157 - pr_info("Synthesized TLB load handler fastpath (%u instructions).\n", 1158 - (unsigned int)(p - handle_tlbl)); 1774 + uasm_resolve_relocs(relocs, labels); 1775 + pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1776 + (unsigned int)(p - handle_tlbl)); 1159 1777 1160 1778 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1161 1779 } ··· 1163 1781 static void __init build_r4000_tlb_store_handler(void) 1164 1782 { 1165 1783 u32 *p = handle_tlbs; 1166 - struct label *l = labels; 1167 - struct reloc *r = relocs; 1784 + struct uasm_label *l = labels; 1785 + struct uasm_reloc *r = relocs; 1168 1786 1169 1787 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 1170 1788 memset(labels, 0, sizeof(labels)); ··· 1177 1795 build_make_write(&p, &r, K0, K1); 1178 1796 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1179 1797 1180 - l_nopage_tlbs(&l, p); 1181 - i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1182 - i_nop(&p); 1798 + uasm_l_nopage_tlbs(&l, p); 1799 + uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1800 + uasm_i_nop(&p); 1183 1801 1184 1802 if ((p - handle_tlbs) > FASTPATH_SIZE) 1185 1803 panic("TLB store handler fastpath space exceeded"); 1186 1804 1187 - resolve_relocs(relocs, labels); 1188 - pr_info("Synthesized TLB store handler fastpath (%u instructions).\n", 1189 - (unsigned int)(p - handle_tlbs)); 1805 + uasm_resolve_relocs(relocs, labels); 1806 + pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1807 + (unsigned int)(p - handle_tlbs)); 1190 1808 1191 1809 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1192 1810 } ··· 1194 1812 static void __init build_r4000_tlb_modify_handler(void) 1195 1813 { 1196 1814 u32 *p = handle_tlbm; 1197 - struct label *l = labels; 1198 - struct reloc *r = relocs; 1815 + struct uasm_label *l = labels; 1816 + struct uasm_reloc *r = relocs; 1199 1817 1200 1818 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1201 1819 memset(labels, 0, sizeof(labels)); ··· 1209 1827 build_make_write(&p, &r, K0, K1); 1210 1828 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1211 1829 1212 - l_nopage_tlbm(&l, p); 1213 - i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1214 - i_nop(&p); 1830 + uasm_l_nopage_tlbm(&l, p); 1831 + uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1832 + uasm_i_nop(&p); 1215 1833 1216 1834 if ((p - handle_tlbm) > FASTPATH_SIZE) 1217 1835 panic("TLB modify handler fastpath space exceeded"); 1218 1836 1219 - resolve_relocs(relocs, labels); 1220 - pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n", 1221 - (unsigned int)(p - handle_tlbm)); 1837 + uasm_resolve_relocs(relocs, labels); 1838 + pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1839 + (unsigned int)(p - handle_tlbm)); 1222 1840 1223 1841 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1224 1842 }
+576
arch/mips/mm/uasm.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * A small micro-assembler. It is intentionally kept simple, does only 7 + * support a subset of instructions, and does not try to hide pipeline 8 + * effects like branch delay slots. 9 + * 10 + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 11 + * Copyright (C) 2005, 2007 Maciej W. Rozycki 12 + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/types.h> 17 + #include <linux/init.h> 18 + 19 + #include <asm/inst.h> 20 + #include <asm/elf.h> 21 + #include <asm/bugs.h> 22 + 23 + #include "uasm.h" 24 + 25 + enum fields { 26 + RS = 0x001, 27 + RT = 0x002, 28 + RD = 0x004, 29 + RE = 0x008, 30 + SIMM = 0x010, 31 + UIMM = 0x020, 32 + BIMM = 0x040, 33 + JIMM = 0x080, 34 + FUNC = 0x100, 35 + SET = 0x200 36 + }; 37 + 38 + #define OP_MASK 0x3f 39 + #define OP_SH 26 40 + #define RS_MASK 0x1f 41 + #define RS_SH 21 42 + #define RT_MASK 0x1f 43 + #define RT_SH 16 44 + #define RD_MASK 0x1f 45 + #define RD_SH 11 46 + #define RE_MASK 0x1f 47 + #define RE_SH 6 48 + #define IMM_MASK 0xffff 49 + #define IMM_SH 0 50 + #define JIMM_MASK 0x3ffffff 51 + #define JIMM_SH 0 52 + #define FUNC_MASK 0x3f 53 + #define FUNC_SH 0 54 + #define SET_MASK 0x7 55 + #define SET_SH 0 56 + 57 + enum opcode { 58 + insn_invalid, 59 + insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, 60 + insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 61 + insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, 62 + insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, 63 + insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, 64 + insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, 65 + insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, 66 + insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi, 67 + insn_tlbwr, insn_xor, insn_xori 68 + }; 69 + 70 + struct insn { 71 + enum opcode opcode; 72 + u32 match; 73 + enum fields fields; 74 + }; 75 + 76 + /* This macro sets the non-variable bits of an instruction. */ 77 + #define M(a, b, c, d, e, f) \ 78 + ((a) << OP_SH \ 79 + | (b) << RS_SH \ 80 + | (c) << RT_SH \ 81 + | (d) << RD_SH \ 82 + | (e) << RE_SH \ 83 + | (f) << FUNC_SH) 84 + 85 + static struct insn insn_table[] __initdata = { 86 + { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 87 + { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 88 + { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, 89 + { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 90 + { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 91 + { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 92 + { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, 93 + { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, 94 + { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 95 + { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 96 + { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 97 + { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 98 + { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 99 + { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 100 + { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 101 + { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, 102 + { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, 103 + { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, 104 + { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, 105 + { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, 106 + { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, 107 + { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, 108 + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 109 + { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 110 + { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 111 + { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 112 + { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 113 + { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 114 + { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 115 + { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 116 + { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 117 + { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 118 + { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 119 + { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 120 + { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 121 + { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 122 + { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 123 + { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 124 + { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 125 + { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 126 + { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 127 + { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 128 + { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 129 + { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 130 + { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 131 + { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 132 + { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 133 + { insn_invalid, 0, 0 } 134 + }; 135 + 136 + #undef M 137 + 138 + static inline __init u32 build_rs(u32 arg) 139 + { 140 + if (arg & ~RS_MASK) 141 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 142 + 143 + return (arg & RS_MASK) << RS_SH; 144 + } 145 + 146 + static inline __init u32 build_rt(u32 arg) 147 + { 148 + if (arg & ~RT_MASK) 149 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 150 + 151 + return (arg & RT_MASK) << RT_SH; 152 + } 153 + 154 + static inline __init u32 build_rd(u32 arg) 155 + { 156 + if (arg & ~RD_MASK) 157 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 158 + 159 + return (arg & RD_MASK) << RD_SH; 160 + } 161 + 162 + static inline __init u32 build_re(u32 arg) 163 + { 164 + if (arg & ~RE_MASK) 165 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 166 + 167 + return (arg & RE_MASK) << RE_SH; 168 + } 169 + 170 + static inline __init u32 build_simm(s32 arg) 171 + { 172 + if (arg > 0x7fff || arg < -0x8000) 173 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 174 + 175 + return arg & 0xffff; 176 + } 177 + 178 + static inline __init u32 build_uimm(u32 arg) 179 + { 180 + if (arg & ~IMM_MASK) 181 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 182 + 183 + return arg & IMM_MASK; 184 + } 185 + 186 + static inline __init u32 build_bimm(s32 arg) 187 + { 188 + if (arg > 0x1ffff || arg < -0x20000) 189 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 190 + 191 + if (arg & 0x3) 192 + printk(KERN_WARNING "Invalid micro-assembler branch target\n"); 193 + 194 + return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 195 + } 196 + 197 + static inline __init u32 build_jimm(u32 arg) 198 + { 199 + if (arg & ~((JIMM_MASK) << 2)) 200 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 201 + 202 + return (arg >> 2) & JIMM_MASK; 203 + } 204 + 205 + static inline __init u32 build_func(u32 arg) 206 + { 207 + if (arg & ~FUNC_MASK) 208 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 209 + 210 + return arg & FUNC_MASK; 211 + } 212 + 213 + static inline __init u32 build_set(u32 arg) 214 + { 215 + if (arg & ~SET_MASK) 216 + printk(KERN_WARNING "Micro-assembler field overflow\n"); 217 + 218 + return arg & SET_MASK; 219 + } 220 + 221 + /* 222 + * The order of opcode arguments is implicitly left to right, 223 + * starting with RS and ending with FUNC or IMM. 224 + */ 225 + static void __init build_insn(u32 **buf, enum opcode opc, ...) 226 + { 227 + struct insn *ip = NULL; 228 + unsigned int i; 229 + va_list ap; 230 + u32 op; 231 + 232 + for (i = 0; insn_table[i].opcode != insn_invalid; i++) 233 + if (insn_table[i].opcode == opc) { 234 + ip = &insn_table[i]; 235 + break; 236 + } 237 + 238 + if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) 239 + panic("Unsupported Micro-assembler instruction %d", opc); 240 + 241 + op = ip->match; 242 + va_start(ap, opc); 243 + if (ip->fields & RS) 244 + op |= build_rs(va_arg(ap, u32)); 245 + if (ip->fields & RT) 246 + op |= build_rt(va_arg(ap, u32)); 247 + if (ip->fields & RD) 248 + op |= build_rd(va_arg(ap, u32)); 249 + if (ip->fields & RE) 250 + op |= build_re(va_arg(ap, u32)); 251 + if (ip->fields & SIMM) 252 + op |= build_simm(va_arg(ap, s32)); 253 + if (ip->fields & UIMM) 254 + op |= build_uimm(va_arg(ap, u32)); 255 + if (ip->fields & BIMM) 256 + op |= build_bimm(va_arg(ap, s32)); 257 + if (ip->fields & JIMM) 258 + op |= build_jimm(va_arg(ap, u32)); 259 + if (ip->fields & FUNC) 260 + op |= build_func(va_arg(ap, u32)); 261 + if (ip->fields & SET) 262 + op |= build_set(va_arg(ap, u32)); 263 + va_end(ap); 264 + 265 + **buf = op; 266 + (*buf)++; 267 + } 268 + 269 + #define I_u1u2u3(op) \ 270 + Ip_u1u2u3(op) \ 271 + { \ 272 + build_insn(buf, insn##op, a, b, c); \ 273 + } 274 + 275 + #define I_u2u1u3(op) \ 276 + Ip_u2u1u3(op) \ 277 + { \ 278 + build_insn(buf, insn##op, b, a, c); \ 279 + } 280 + 281 + #define I_u3u1u2(op) \ 282 + Ip_u3u1u2(op) \ 283 + { \ 284 + build_insn(buf, insn##op, b, c, a); \ 285 + } 286 + 287 + #define I_u1u2s3(op) \ 288 + Ip_u1u2s3(op) \ 289 + { \ 290 + build_insn(buf, insn##op, a, b, c); \ 291 + } 292 + 293 + #define I_u2s3u1(op) \ 294 + Ip_u2s3u1(op) \ 295 + { \ 296 + build_insn(buf, insn##op, c, a, b); \ 297 + } 298 + 299 + #define I_u2u1s3(op) \ 300 + Ip_u2u1s3(op) \ 301 + { \ 302 + build_insn(buf, insn##op, b, a, c); \ 303 + } 304 + 305 + #define I_u1u2(op) \ 306 + Ip_u1u2(op) \ 307 + { \ 308 + build_insn(buf, insn##op, a, b); \ 309 + } 310 + 311 + #define I_u1s2(op) \ 312 + Ip_u1s2(op) \ 313 + { \ 314 + build_insn(buf, insn##op, a, b); \ 315 + } 316 + 317 + #define I_u1(op) \ 318 + Ip_u1(op) \ 319 + { \ 320 + build_insn(buf, insn##op, a); \ 321 + } 322 + 323 + #define I_0(op) \ 324 + Ip_0(op) \ 325 + { \ 326 + build_insn(buf, insn##op); \ 327 + } 328 + 329 + I_u2u1s3(_addiu) 330 + I_u3u1u2(_addu) 331 + I_u2u1u3(_andi) 332 + I_u3u1u2(_and) 333 + I_u1u2s3(_beq) 334 + I_u1u2s3(_beql) 335 + I_u1s2(_bgez) 336 + I_u1s2(_bgezl) 337 + I_u1s2(_bltz) 338 + I_u1s2(_bltzl) 339 + I_u1u2s3(_bne) 340 + I_u1u2u3(_dmfc0) 341 + I_u1u2u3(_dmtc0) 342 + I_u2u1s3(_daddiu) 343 + I_u3u1u2(_daddu) 344 + I_u2u1u3(_dsll) 345 + I_u2u1u3(_dsll32) 346 + I_u2u1u3(_dsra) 347 + I_u2u1u3(_dsrl) 348 + I_u2u1u3(_dsrl32) 349 + I_u3u1u2(_dsubu) 350 + I_0(_eret) 351 + I_u1(_j) 352 + I_u1(_jal) 353 + I_u1(_jr) 354 + I_u2s3u1(_ld) 355 + I_u2s3u1(_ll) 356 + I_u2s3u1(_lld) 357 + I_u1s2(_lui) 358 + I_u2s3u1(_lw) 359 + I_u1u2u3(_mfc0) 360 + I_u1u2u3(_mtc0) 361 + I_u2u1u3(_ori) 362 + I_0(_rfe) 363 + I_u2s3u1(_sc) 364 + I_u2s3u1(_scd) 365 + I_u2s3u1(_sd) 366 + I_u2u1u3(_sll) 367 + I_u2u1u3(_sra) 368 + I_u2u1u3(_srl) 369 + I_u3u1u2(_subu) 370 + I_u2s3u1(_sw) 371 + I_0(_tlbp) 372 + I_0(_tlbwi) 373 + I_0(_tlbwr) 374 + I_u3u1u2(_xor) 375 + I_u2u1u3(_xori) 376 + 377 + /* Handle labels. */ 378 + void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 379 + { 380 + (*lab)->addr = addr; 381 + (*lab)->lab = lid; 382 + (*lab)++; 383 + } 384 + 385 + int __init uasm_in_compat_space_p(long addr) 386 + { 387 + /* Is this address in 32bit compat space? */ 388 + #ifdef CONFIG_64BIT 389 + return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); 390 + #else 391 + return 1; 392 + #endif 393 + } 394 + 395 + int __init uasm_rel_highest(long val) 396 + { 397 + #ifdef CONFIG_64BIT 398 + return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 399 + #else 400 + return 0; 401 + #endif 402 + } 403 + 404 + int __init uasm_rel_higher(long val) 405 + { 406 + #ifdef CONFIG_64BIT 407 + return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 408 + #else 409 + return 0; 410 + #endif 411 + } 412 + 413 + int __init uasm_rel_hi(long val) 414 + { 415 + return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 416 + } 417 + 418 + int __init uasm_rel_lo(long val) 419 + { 420 + return ((val & 0xffff) ^ 0x8000) - 0x8000; 421 + } 422 + 423 + void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 424 + { 425 + if (!uasm_in_compat_space_p(addr)) { 426 + uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 427 + if (uasm_rel_higher(addr)) 428 + uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); 429 + if (uasm_rel_hi(addr)) { 430 + uasm_i_dsll(buf, rs, rs, 16); 431 + uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); 432 + uasm_i_dsll(buf, rs, rs, 16); 433 + } else 434 + uasm_i_dsll32(buf, rs, rs, 0); 435 + } else 436 + uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 437 + } 438 + 439 + void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr) 440 + { 441 + UASM_i_LA_mostly(buf, rs, addr); 442 + if (uasm_rel_lo(addr)) { 443 + if (!uasm_in_compat_space_p(addr)) 444 + uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); 445 + else 446 + uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); 447 + } 448 + } 449 + 450 + /* Handle relocations. */ 451 + void __init 452 + uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 453 + { 454 + (*rel)->addr = addr; 455 + (*rel)->type = R_MIPS_PC16; 456 + (*rel)->lab = lid; 457 + (*rel)++; 458 + } 459 + 460 + static inline void __init 461 + __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 462 + { 463 + long laddr = (long)lab->addr; 464 + long raddr = (long)rel->addr; 465 + 466 + switch (rel->type) { 467 + case R_MIPS_PC16: 468 + *rel->addr |= build_bimm(laddr - (raddr + 4)); 469 + break; 470 + 471 + default: 472 + panic("Unsupported Micro-assembler relocation %d", 473 + rel->type); 474 + } 475 + } 476 + 477 + void __init 478 + uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 479 + { 480 + struct uasm_label *l; 481 + 482 + for (; rel->lab != UASM_LABEL_INVALID; rel++) 483 + for (l = lab; l->lab != UASM_LABEL_INVALID; l++) 484 + if (rel->lab == l->lab) 485 + __resolve_relocs(rel, l); 486 + } 487 + 488 + void __init 489 + uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 490 + { 491 + for (; rel->lab != UASM_LABEL_INVALID; rel++) 492 + if (rel->addr >= first && rel->addr < end) 493 + rel->addr += off; 494 + } 495 + 496 + void __init 497 + uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 498 + { 499 + for (; lab->lab != UASM_LABEL_INVALID; lab++) 500 + if (lab->addr >= first && lab->addr < end) 501 + lab->addr += off; 502 + } 503 + 504 + void __init 505 + uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 506 + u32 *end, u32 *target) 507 + { 508 + long off = (long)(target - first); 509 + 510 + memcpy(target, first, (end - first) * sizeof(u32)); 511 + 512 + uasm_move_relocs(rel, first, end, off); 513 + uasm_move_labels(lab, first, end, off); 514 + } 515 + 516 + int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 517 + { 518 + for (; rel->lab != UASM_LABEL_INVALID; rel++) { 519 + if (rel->addr == addr 520 + && (rel->type == R_MIPS_PC16 521 + || rel->type == R_MIPS_26)) 522 + return 1; 523 + } 524 + 525 + return 0; 526 + } 527 + 528 + /* Convenience functions for labeled branches. */ 529 + void __init 530 + uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 531 + { 532 + uasm_r_mips_pc16(r, *p, lid); 533 + uasm_i_bltz(p, reg, 0); 534 + } 535 + 536 + void __init 537 + uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 538 + { 539 + uasm_r_mips_pc16(r, *p, lid); 540 + uasm_i_b(p, 0); 541 + } 542 + 543 + void __init 544 + uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 545 + { 546 + uasm_r_mips_pc16(r, *p, lid); 547 + uasm_i_beqz(p, reg, 0); 548 + } 549 + 550 + void __init 551 + uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 552 + { 553 + uasm_r_mips_pc16(r, *p, lid); 554 + uasm_i_beqzl(p, reg, 0); 555 + } 556 + 557 + void __init 558 + uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 559 + { 560 + uasm_r_mips_pc16(r, *p, lid); 561 + uasm_i_bnez(p, reg, 0); 562 + } 563 + 564 + void __init 565 + uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 566 + { 567 + uasm_r_mips_pc16(r, *p, lid); 568 + uasm_i_bgezl(p, reg, 0); 569 + } 570 + 571 + void __init 572 + uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 573 + { 574 + uasm_r_mips_pc16(r, *p, lid); 575 + uasm_i_bgez(p, reg, 0); 576 + }
+192
arch/mips/mm/uasm.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 7 + * Copyright (C) 2005 Maciej W. Rozycki 8 + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 9 + */ 10 + 11 + #include <linux/types.h> 12 + 13 + #define Ip_u1u2u3(op) \ 14 + void __init \ 15 + uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 16 + 17 + #define Ip_u2u1u3(op) \ 18 + void __init \ 19 + uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 20 + 21 + #define Ip_u3u1u2(op) \ 22 + void __init \ 23 + uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 24 + 25 + #define Ip_u1u2s3(op) \ 26 + void __init \ 27 + uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 28 + 29 + #define Ip_u2s3u1(op) \ 30 + void __init \ 31 + uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) 32 + 33 + #define Ip_u2u1s3(op) \ 34 + void __init \ 35 + uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 36 + 37 + #define Ip_u1u2(op) \ 38 + void __init uasm_i##op(u32 **buf, unsigned int a, unsigned int b) 39 + 40 + #define Ip_u1s2(op) \ 41 + void __init uasm_i##op(u32 **buf, unsigned int a, signed int b) 42 + 43 + #define Ip_u1(op) void __init uasm_i##op(u32 **buf, unsigned int a) 44 + 45 + #define Ip_0(op) void __init uasm_i##op(u32 **buf) 46 + 47 + Ip_u2u1s3(_addiu); 48 + Ip_u3u1u2(_addu); 49 + Ip_u2u1u3(_andi); 50 + Ip_u3u1u2(_and); 51 + Ip_u1u2s3(_beq); 52 + Ip_u1u2s3(_beql); 53 + Ip_u1s2(_bgez); 54 + Ip_u1s2(_bgezl); 55 + Ip_u1s2(_bltz); 56 + Ip_u1s2(_bltzl); 57 + Ip_u1u2s3(_bne); 58 + Ip_u1u2u3(_dmfc0); 59 + Ip_u1u2u3(_dmtc0); 60 + Ip_u2u1s3(_daddiu); 61 + Ip_u3u1u2(_daddu); 62 + Ip_u2u1u3(_dsll); 63 + Ip_u2u1u3(_dsll32); 64 + Ip_u2u1u3(_dsra); 65 + Ip_u2u1u3(_dsrl); 66 + Ip_u2u1u3(_dsrl32); 67 + Ip_u3u1u2(_dsubu); 68 + Ip_0(_eret); 69 + Ip_u1(_j); 70 + Ip_u1(_jal); 71 + Ip_u1(_jr); 72 + Ip_u2s3u1(_ld); 73 + Ip_u2s3u1(_ll); 74 + Ip_u2s3u1(_lld); 75 + Ip_u1s2(_lui); 76 + Ip_u2s3u1(_lw); 77 + Ip_u1u2u3(_mfc0); 78 + Ip_u1u2u3(_mtc0); 79 + Ip_u2u1u3(_ori); 80 + Ip_0(_rfe); 81 + Ip_u2s3u1(_sc); 82 + Ip_u2s3u1(_scd); 83 + Ip_u2s3u1(_sd); 84 + Ip_u2u1u3(_sll); 85 + Ip_u2u1u3(_sra); 86 + Ip_u2u1u3(_srl); 87 + Ip_u3u1u2(_subu); 88 + Ip_u2s3u1(_sw); 89 + Ip_0(_tlbp); 90 + Ip_0(_tlbwi); 91 + Ip_0(_tlbwr); 92 + Ip_u3u1u2(_xor); 93 + Ip_u2u1u3(_xori); 94 + 95 + /* Handle labels. */ 96 + struct uasm_label { 97 + u32 *addr; 98 + int lab; 99 + }; 100 + 101 + void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); 102 + #ifdef CONFIG_64BIT 103 + int __init uasm_in_compat_space_p(long addr); 104 + int __init uasm_rel_highest(long val); 105 + int __init uasm_rel_higher(long val); 106 + #endif 107 + int __init uasm_rel_hi(long val); 108 + int __init uasm_rel_lo(long val); 109 + void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); 110 + void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr); 111 + 112 + #define UASM_L_LA(lb) \ 113 + static inline void __init uasm_l##lb(struct uasm_label **lab, u32 *addr) \ 114 + { \ 115 + uasm_build_label(lab, addr, label##lb); \ 116 + } 117 + 118 + /* convenience macros for instructions */ 119 + #ifdef CONFIG_64BIT 120 + # define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off) 121 + # define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off) 122 + # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh) 123 + # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh) 124 + # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh) 125 + # define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd) 126 + # define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd) 127 + # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val) 128 + # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd) 129 + # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd) 130 + # define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off) 131 + # define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off) 132 + #else 133 + # define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off) 134 + # define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off) 135 + # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh) 136 + # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh) 137 + # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) 138 + # define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd) 139 + # define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd) 140 + # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val) 141 + # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd) 142 + # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd) 143 + # define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off) 144 + # define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off) 145 + #endif 146 + 147 + #define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off) 148 + #define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off) 149 + #define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off) 150 + #define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off) 151 + #define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off) 152 + #define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b) 153 + #define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0) 154 + #define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1) 155 + #define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3) 156 + 157 + /* Handle relocations. */ 158 + struct uasm_reloc { 159 + u32 *addr; 160 + unsigned int type; 161 + int lab; 162 + }; 163 + 164 + /* This is zero so we can use zeroed label arrays. */ 165 + #define UASM_LABEL_INVALID 0 166 + 167 + void __init uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid); 168 + void __init 169 + uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); 170 + void __init 171 + uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off); 172 + void __init 173 + uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off); 174 + void __init 175 + uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 176 + u32 *end, u32 *target); 177 + int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr); 178 + 179 + /* Convenience functions for labeled branches. */ 180 + void __init 181 + uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 182 + void __init uasm_il_b(u32 **p, struct uasm_reloc **r, int lid); 183 + void __init 184 + uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 185 + void __init 186 + uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 187 + void __init 188 + uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 189 + void __init 190 + uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 191 + void __init 192 + uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);