Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] bfa: kdump fix on 815 and 825 adapters

Root cause: When kernel crashes, On brocade 815/825 adapters,
bfa IOC state machine and FW doesn't get a notification and
hence are not cleanly shutdown. So registers holding driver/IOC
state information are not reset back to valid disabled/parking
values. This causes subsequent driver initialization to fail
during kdump kernel boot.

Fix description: during the initialization of first PCI function, reset
corresponding register when unclean shutown is detect by reading chip
registers. This will make sure that ioc/fw gets clean re-initialization.

Signed-off-by: Vijaya Mohan Guvva <vmohan@brocade.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

Vijaya Mohan Guvva and committed by
James Bottomley
c679b599 f2a0cc3f

+150 -24
+25 -17
drivers/scsi/bfa/bfa_ioc.c
··· 67 67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 68 68 #define bfa_ioc_sync_complete(__ioc) \ 69 69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 70 + #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ 71 + ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) 72 + #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ 73 + ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) 74 + #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ 75 + ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) 76 + #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ 77 + ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) 70 78 71 79 #define bfa_ioc_mbox_cmd_pending(__ioc) \ 72 80 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ ··· 706 698 } 707 699 708 700 /* h/w sem init */ 709 - fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); 701 + fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc); 710 702 if (fwstate == BFI_IOC_UNINIT) { 711 703 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); 712 704 goto sem_get; ··· 733 725 734 726 bfa_trc(iocpf->ioc, fwstate); 735 727 bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); 736 - writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); 737 - writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate); 728 + bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); 729 + bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); 738 730 739 731 /* 740 732 * Unlock the hw semaphore. Should be here only once per boot. ··· 1045 1037 */ 1046 1038 1047 1039 case IOCPF_E_TIMEOUT: 1048 - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1040 + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1049 1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1050 1042 break; 1051 1043 ··· 1146 1138 case IOCPF_E_SEMLOCKED: 1147 1139 bfa_ioc_notify_fail(ioc); 1148 1140 bfa_ioc_sync_leave(ioc); 1149 - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1141 + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1150 1142 writel(1, ioc->ioc_regs.ioc_sem_reg); 1151 1143 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 1152 1144 break; ··· 1235 1227 bfa_ioc_notify_fail(ioc); 1236 1228 if (!iocpf->auto_recover) { 1237 1229 bfa_ioc_sync_leave(ioc); 1238 - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1230 + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1239 1231 writel(1, ioc->ioc_regs.ioc_sem_reg); 1240 1232 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1241 1233 } else { ··· 1527 1519 u32 boot_type; 1528 1520 u32 boot_env; 1529 1521 1530 - ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1522 + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1531 1523 1532 1524 if (force) 1533 1525 ioc_fwstate = BFI_IOC_UNINIT; ··· 2014 2006 * Initialize IOC state of all functions on a chip reset. 2015 2007 */ 2016 2008 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2017 - writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); 2018 - writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); 2009 + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2010 + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2019 2011 } else { 2020 - writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); 2021 - writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); 2012 + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); 2013 + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); 2022 2014 } 2023 2015 2024 2016 bfa_ioc_msgflush(ioc); ··· 2046 2038 bfa_boolean_t 2047 2039 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) 2048 2040 { 2049 - u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 2041 + u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); 2050 2042 2051 2043 return ((r32 != BFI_IOC_UNINIT) && 2052 2044 (r32 != BFI_IOC_INITING) && ··· 2438 2430 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2439 2431 return BFA_FALSE; 2440 2432 2441 - ioc_state = readl(ioc->ioc_regs.ioc_fwstate); 2433 + ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); 2442 2434 if (!bfa_ioc_state_disabled(ioc_state)) 2443 2435 return BFA_FALSE; 2444 2436 2445 2437 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { 2446 - ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate); 2438 + ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); 2447 2439 if (!bfa_ioc_state_disabled(ioc_state)) 2448 2440 return BFA_FALSE; 2449 2441 } ··· 2457 2449 void 2458 2450 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) 2459 2451 { 2460 - writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 2461 - writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 2452 + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); 2453 + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); 2462 2454 } 2463 2455 2464 2456 #define BFA_MFG_NAME "Brocade" ··· 2925 2917 static void 2926 2918 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) 2927 2919 { 2928 - u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); 2920 + u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 2929 2921 2930 2922 bfa_trc(ioc, fwstate); 2931 2923
+6
drivers/scsi/bfa/bfa_ioc.h
··· 346 346 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); 347 347 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); 348 348 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); 349 + void (*ioc_set_fwstate) (struct bfa_ioc_s *ioc, 350 + enum bfi_ioc_state fwstate); 351 + enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc_s *ioc); 352 + void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc, 353 + enum bfi_ioc_state fwstate); 354 + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc); 349 355 }; 350 356 351 357 /*
+79 -7
drivers/scsi/bfa/bfa_ioc_cb.c
··· 22 22 23 23 BFA_TRC_FILE(CNA, IOC_CB); 24 24 25 + #define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH)) 26 + 25 27 /* 26 28 * forward declarations 27 29 */ ··· 39 37 static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); 40 38 static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); 41 39 static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc); 40 + static void bfa_ioc_cb_set_cur_ioc_fwstate( 41 + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); 42 + static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); 43 + static void bfa_ioc_cb_set_alt_ioc_fwstate( 44 + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); 45 + static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); 42 46 43 47 static struct bfa_ioc_hwif_s hwif_cb; 44 48 ··· 67 59 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; 68 60 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; 69 61 hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete; 62 + hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate; 63 + hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate; 64 + hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate; 65 + hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate; 70 66 71 67 ioc->ioc_hwif = &hwif_cb; 72 68 } ··· 199 187 static bfa_boolean_t 200 188 bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc) 201 189 { 190 + u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 191 + 192 + /** 193 + * Driver load time. If the join bit is set, 194 + * it is due to an unclean exit by the driver for this 195 + * PCI fn in the previous incarnation. Whoever comes here first 196 + * should clean it up, no matter which PCI fn. 197 + */ 198 + if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) { 199 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 200 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 201 + return BFA_TRUE; 202 + } 203 + 202 204 return bfa_ioc_cb_sync_complete(ioc); 203 205 } 204 206 ··· 238 212 static void 239 213 bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc) 240 214 { 215 + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 216 + u32 join_pos = bfa_ioc_cb_join_pos(ioc); 217 + 218 + writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate); 241 219 } 242 220 243 221 static void 244 222 bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc) 245 223 { 224 + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 225 + u32 join_pos = bfa_ioc_cb_join_pos(ioc); 226 + 227 + writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate); 228 + } 229 + 230 + static void 231 + bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, 232 + enum bfi_ioc_state fwstate) 233 + { 234 + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 235 + 236 + writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), 237 + ioc->ioc_regs.ioc_fwstate); 238 + } 239 + 240 + static enum bfi_ioc_state 241 + bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) 242 + { 243 + return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) & 244 + BFA_IOC_CB_FWSTATE_MASK); 245 + } 246 + 247 + static void 248 + bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, 249 + enum bfi_ioc_state fwstate) 250 + { 251 + u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate); 252 + 253 + writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), 254 + ioc->ioc_regs.alt_ioc_fwstate); 255 + } 256 + 257 + static enum bfi_ioc_state 258 + bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) 259 + { 260 + return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) & 261 + BFA_IOC_CB_FWSTATE_MASK); 246 262 } 247 263 248 264 static void 249 265 bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc) 250 266 { 251 - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 267 + bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 252 268 } 253 269 254 270 static bfa_boolean_t 255 271 bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) 256 272 { 257 - uint32_t fwstate, alt_fwstate; 258 - fwstate = readl(ioc->ioc_regs.ioc_fwstate); 273 + u32 fwstate, alt_fwstate; 274 + fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); 259 275 260 276 /* 261 277 * At this point, this IOC is hoding the hw sem in the ··· 325 257 fwstate == BFI_IOC_OP) 326 258 return BFA_TRUE; 327 259 else { 328 - alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate); 260 + alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); 329 261 if (alt_fwstate == BFI_IOC_FAIL || 330 262 alt_fwstate == BFI_IOC_DISABLED || 331 263 alt_fwstate == BFI_IOC_UNINIT || ··· 340 272 bfa_status_t 341 273 bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) 342 274 { 343 - u32 pll_sclk, pll_fclk; 275 + u32 pll_sclk, pll_fclk, join_bits; 344 276 345 277 pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN | 346 278 __APP_PLL_SCLK_P0_1(3U) | ··· 350 282 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | 351 283 __APP_PLL_LCLK_JITLMT0_1(3U) | 352 284 __APP_PLL_LCLK_CNTLMT0_1(3U); 353 - writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 354 - writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 285 + join_bits = readl(rb + BFA_IOC0_STATE_REG) & 286 + BFA_IOC_CB_JOIN_MASK; 287 + writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG)); 288 + join_bits = readl(rb + BFA_IOC1_STATE_REG) & 289 + BFA_IOC_CB_JOIN_MASK; 290 + writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG)); 355 291 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 356 292 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 357 293 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+36
drivers/scsi/bfa/bfa_ioc_ct.c
··· 43 43 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 44 44 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 45 45 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); 46 + static void bfa_ioc_ct_set_cur_ioc_fwstate( 47 + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); 48 + static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); 49 + static void bfa_ioc_ct_set_alt_ioc_fwstate( 50 + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); 51 + static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); 46 52 47 53 static struct bfa_ioc_hwif_s hwif_ct; 48 54 static struct bfa_ioc_hwif_s hwif_ct2; ··· 518 512 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; 519 513 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; 520 514 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; 515 + hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate; 516 + hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate; 517 + hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate; 518 + hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate; 521 519 } 522 520 523 521 /** ··· 968 958 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); 969 959 970 960 return BFA_STATUS_OK; 961 + } 962 + 963 + static void 964 + bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, 965 + enum bfi_ioc_state fwstate) 966 + { 967 + writel(fwstate, ioc->ioc_regs.ioc_fwstate); 968 + } 969 + 970 + static enum bfi_ioc_state 971 + bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) 972 + { 973 + return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); 974 + } 975 + 976 + static void 977 + bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, 978 + enum bfi_ioc_state fwstate) 979 + { 980 + writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); 981 + } 982 + 983 + static enum bfi_ioc_state 984 + bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) 985 + { 986 + return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate); 971 987 }
+4
drivers/scsi/bfa/bfi.h
··· 374 374 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ 375 375 }; 376 376 377 + #define BFA_IOC_CB_JOIN_SH 16 378 + #define BFA_IOC_CB_FWSTATE_MASK 0x0000ffff 379 + #define BFA_IOC_CB_JOIN_MASK 0xffff0000 380 + 377 381 #define BFI_IOC_ENDIAN_SIG 0x12345678 378 382 379 383 enum {