Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

EDAC, MCE: Overhaul error fields extraction macros

Make macro names shorter thus making code shorter and more clear.

Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

+43 -54
+2 -2
drivers/edac/amd64_edac.c
··· 2055 2055 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 2056 2056 struct err_regs *info) 2057 2057 { 2058 - u32 ec = ERROR_CODE(info->nbsl); 2059 - u32 xec = EXT_ERROR_CODE(info->nbsl); 2058 + u16 ec = EC(info->nbsl); 2059 + u8 xec = XEC(info->nbsl, 0x1f); 2060 2060 int ecc_type = (info->nbsh >> 13) & 0x3; 2061 2061 2062 2062 /* Bail early out if this was an 'observed' error */
+36 -47
drivers/edac/mce_amd.c
··· 133 133 bool ret = false; 134 134 135 135 if (MEM_ERROR(ec)) { 136 - u8 ll = ec & 0x3; 136 + u8 ll = LL(ec); 137 137 ret = true; 138 138 139 139 if (ll == LL_L2) 140 140 pr_cont("during L1 linefill from L2.\n"); 141 141 else if (ll == LL_L1) 142 - pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec)); 142 + pr_cont("Data/Tag %s error.\n", R4_MSG(ec)); 143 143 else 144 144 ret = false; 145 145 } ··· 148 148 149 149 static bool f10h_dc_mce(u16 ec, u8 xec) 150 150 { 151 - u8 r4 = (ec >> 4) & 0xf; 152 - u8 ll = ec & 0x3; 153 - 154 - if (r4 == R4_GEN && ll == LL_L1) { 151 + if (R4(ec) == R4_GEN && LL(ec) == LL_L1) { 155 152 pr_cont("during data scrub.\n"); 156 153 return true; 157 154 } ··· 167 170 168 171 static bool f14h_dc_mce(u16 ec, u8 xec) 169 172 { 170 - u8 r4 = (ec >> 4) & 0xf; 171 - u8 ll = ec & 0x3; 172 - u8 tt = (ec >> 2) & 0x3; 173 - u8 ii = tt; 173 + u8 r4 = R4(ec); 174 174 bool ret = true; 175 175 176 176 if (MEM_ERROR(ec)) { 177 177 178 - if (tt != TT_DATA || ll != LL_L1) 178 + if (TT(ec) != TT_DATA || LL(ec) != LL_L1) 179 179 return false; 180 180 181 181 switch (r4) { ··· 192 198 } 193 199 } else if (BUS_ERROR(ec)) { 194 200 195 - if ((ii != II_MEM && ii != II_IO) || ll != LL_LG) 201 + if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG) 196 202 return false; 197 203 198 204 pr_cont("System read data error on a "); ··· 267 273 268 274 static void amd_decode_dc_mce(struct mce *m) 269 275 { 270 - u16 ec = m->status & 0xffff; 271 - u8 xec = (m->status >> 16) & xec_mask; 276 + u16 ec = EC(m->status); 277 + u8 xec = XEC(m->status, xec_mask); 272 278 273 279 pr_emerg(HW_ERR "Data Cache Error: "); 274 280 275 281 /* TLB error signatures are the same across families */ 276 282 if (TLB_ERROR(ec)) { 277 - u8 tt = (ec >> 2) & 0x3; 278 - 279 - if (tt == TT_DATA) { 283 + if (TT(ec) == TT_DATA) { 280 284 pr_cont("%s TLB %s.\n", LL_MSG(ec), 281 285 ((xec == 2) ? "locked miss" 282 286 : (xec ? "multimatch" : "parity"))); ··· 288 296 289 297 static bool k8_ic_mce(u16 ec, u8 xec) 290 298 { 291 - u8 ll = ec & 0x3; 292 - u8 r4 = (ec >> 4) & 0xf; 299 + u8 ll = LL(ec); 293 300 bool ret = true; 294 301 295 302 if (!MEM_ERROR(ec)) ··· 297 306 if (ll == 0x2) 298 307 pr_cont("during a linefill from L2.\n"); 299 308 else if (ll == 0x1) { 300 - switch (r4) { 309 + switch (R4(ec)) { 301 310 case R4_IRD: 302 311 pr_cont("Parity error during data load.\n"); 303 312 break; ··· 322 331 323 332 static bool f14h_ic_mce(u16 ec, u8 xec) 324 333 { 325 - u8 ll = ec & 0x3; 326 - u8 tt = (ec >> 2) & 0x3; 327 - u8 r4 = (ec >> 4) & 0xf; 334 + u8 r4 = R4(ec); 328 335 bool ret = true; 329 336 330 337 if (MEM_ERROR(ec)) { 331 - if (tt != 0 || ll != 1) 338 + if (TT(ec) != 0 || LL(ec) != 1) 332 339 ret = false; 333 340 334 341 if (r4 == R4_IRD) ··· 367 378 368 379 static void amd_decode_ic_mce(struct mce *m) 369 380 { 370 - u16 ec = m->status & 0xffff; 371 - u8 xec = (m->status >> 16) & xec_mask; 381 + u16 ec = EC(m->status); 382 + u8 xec = XEC(m->status, xec_mask); 372 383 373 384 pr_emerg(HW_ERR "Instruction Cache Error: "); 374 385 ··· 387 398 388 399 static void amd_decode_bu_mce(struct mce *m) 389 400 { 390 - u32 ec = m->status & 0xffff; 391 - u32 xec = (m->status >> 16) & xec_mask; 401 + u16 ec = EC(m->status); 402 + u8 xec = XEC(m->status, xec_mask); 392 403 393 404 pr_emerg(HW_ERR "Bus Unit Error"); 394 405 ··· 397 408 else if (xec == 0x3) 398 409 pr_cont(" in the victim data buffers.\n"); 399 410 else if (xec == 0x2 && MEM_ERROR(ec)) 400 - pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec)); 411 + pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec)); 401 412 else if (xec == 0x0) { 402 413 if (TLB_ERROR(ec)) 403 414 pr_cont(": %s error in a Page Descriptor Cache or " 404 415 "Guest TLB.\n", TT_MSG(ec)); 405 416 else if (BUS_ERROR(ec)) 406 417 pr_cont(": %s/ECC error in data read from NB: %s.\n", 407 - RRRR_MSG(ec), PP_MSG(ec)); 418 + R4_MSG(ec), PP_MSG(ec)); 408 419 else if (MEM_ERROR(ec)) { 409 - u8 rrrr = (ec >> 4) & 0xf; 420 + u8 r4 = R4(ec); 410 421 411 - if (rrrr >= 0x7) 422 + if (r4 >= 0x7) 412 423 pr_cont(": %s error during data copyback.\n", 413 - RRRR_MSG(ec)); 414 - else if (rrrr <= 0x1) 424 + R4_MSG(ec)); 425 + else if (r4 <= 0x1) 415 426 pr_cont(": %s parity/ECC error during data " 416 - "access from L2.\n", RRRR_MSG(ec)); 427 + "access from L2.\n", R4_MSG(ec)); 417 428 else 418 429 goto wrong_bu_mce; 419 430 } else ··· 429 440 430 441 static void amd_decode_cu_mce(struct mce *m) 431 442 { 432 - u16 ec = m->status & 0xffff; 433 - u8 xec = (m->status >> 16) & xec_mask; 443 + u16 ec = EC(m->status); 444 + u8 xec = XEC(m->status, xec_mask); 434 445 435 446 pr_emerg(HW_ERR "Combined Unit Error: "); 436 447 ··· 469 480 470 481 static void amd_decode_ls_mce(struct mce *m) 471 482 { 472 - u16 ec = m->status & 0xffff; 473 - u8 xec = (m->status >> 16) & xec_mask; 483 + u16 ec = EC(m->status); 484 + u8 xec = XEC(m->status, xec_mask); 474 485 475 486 if (boot_cpu_data.x86 >= 0x14) { 476 487 pr_emerg("You shouldn't be seeing an LS MCE on this cpu family," ··· 481 492 pr_emerg(HW_ERR "Load Store Error"); 482 493 483 494 if (xec == 0x0) { 484 - u8 r4 = (ec >> 4) & 0xf; 495 + u8 r4 = R4(ec); 485 496 486 497 if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) 487 498 goto wrong_ls_mce; 488 499 489 - pr_cont(" during %s.\n", RRRR_MSG(ec)); 500 + pr_cont(" during %s.\n", R4_MSG(ec)); 490 501 } else 491 502 goto wrong_ls_mce; 492 503 ··· 594 605 595 606 void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) 596 607 { 597 - u8 xec = (m->status >> 16) & 0x1f; 598 - u16 ec = m->status & 0xffff; 608 + u16 ec = EC(m->status); 609 + u8 xec = XEC(m->status, 0x1f); 599 610 u32 nbsh = (u32)(m->status >> 32); 600 611 601 612 pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id); ··· 657 668 static void amd_decode_fr_mce(struct mce *m) 658 669 { 659 670 struct cpuinfo_x86 *c = &boot_cpu_data; 660 - u8 xec = (m->status >> 16) & xec_mask; 671 + u8 xec = XEC(m->status, xec_mask); 661 672 662 673 if (c->x86 == 0xf || c->x86 == 0x11) 663 674 goto wrong_fr_mce; ··· 683 694 684 695 static void amd_decode_fp_mce(struct mce *m) 685 696 { 686 - u8 xec = (m->status >> 16) & xec_mask; 697 + u8 xec = XEC(m->status, xec_mask); 687 698 688 699 pr_emerg(HW_ERR "Floating Point Unit Error: "); 689 700 ··· 728 739 TT_MSG(ec), LL_MSG(ec)); 729 740 } else if (MEM_ERROR(ec)) { 730 741 pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n", 731 - RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); 742 + R4_MSG(ec), TT_MSG(ec), LL_MSG(ec)); 732 743 } else if (BUS_ERROR(ec)) { 733 744 pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, " 734 745 "Participating Processor: %s\n", 735 - RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), 746 + R4_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), 736 747 PP_MSG(ec)); 737 748 } else 738 749 pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
+5 -5
drivers/edac/mce_amd.h
··· 7 7 8 8 #define BIT_64(n) (U64_C(1) << (n)) 9 9 10 - #define ERROR_CODE(x) ((x) & 0xffff) 11 - #define EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f) 10 + #define EC(x) ((x) & 0xffff) 11 + #define XEC(x, mask) (((x) >> 16) & mask) 12 12 13 13 #define LOW_SYNDROME(x) (((x) >> 15) & 0xff) 14 14 #define HIGH_SYNDROME(x) (((x) >> 24) & 0xff) ··· 21 21 #define TT_MSG(x) tt_msgs[TT(x)] 22 22 #define II(x) (((x) >> 2) & 0x3) 23 23 #define II_MSG(x) ii_msgs[II(x)] 24 - #define LL(x) (((x) >> 0) & 0x3) 24 + #define LL(x) ((x) & 0x3) 25 25 #define LL_MSG(x) ll_msgs[LL(x)] 26 26 #define TO(x) (((x) >> 8) & 0x1) 27 27 #define TO_MSG(x) to_msgs[TO(x)] 28 28 #define PP(x) (((x) >> 9) & 0x3) 29 29 #define PP_MSG(x) pp_msgs[PP(x)] 30 30 31 - #define RRRR(x) (((x) >> 4) & 0xf) 32 - #define RRRR_MSG(x) ((RRRR(x) < 9) ? rrrr_msgs[RRRR(x)] : "Wrong R4!") 31 + #define R4(x) (((x) >> 4) & 0xf) 32 + #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") 33 33 34 34 #define K8_NBSH 0x4C 35 35