Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bna: IOC failure auto recovery fix

Change Details:
- Made IOC auto_recovery synchronized and not timer based.
- Only one PCI function will attempt to recover and reinitialize
the ASIC on a failure, that too after all the active PCI
functions acknowledge the IOC failure.

Signed-off-by: Debashis Dutt <ddutt@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Rasesh Mody and committed by
David S. Miller
1d32f769 aad75b66

+1159 -448
+12 -10
drivers/net/bna/bfa_defs.h
··· 112 112 * IOC states 113 113 */ 114 114 enum bfa_ioc_state { 115 - BFA_IOC_RESET = 1, /*!< IOC is in reset state */ 116 - BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 117 - BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */ 118 - BFA_IOC_GETATTR = 4, /*!< IOC is being configured */ 119 - BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */ 120 - BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */ 121 - BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */ 122 - BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */ 123 - BFA_IOC_DISABLED = 9, /*!< IOC is disabled */ 124 - BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */ 115 + BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ 116 + BFA_IOC_RESET = 2, /*!< IOC is in reset state */ 117 + BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */ 118 + BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */ 119 + BFA_IOC_GETATTR = 5, /*!< IOC is being configured */ 120 + BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */ 121 + BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */ 122 + BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */ 123 + BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */ 124 + BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ 125 + BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ 126 + BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ 125 127 }; 126 128 127 129 /**
+986 -384
drivers/net/bna/bfa_ioc.c
··· 26 26 * IOC local definitions 27 27 */ 28 28 29 - #define bfa_ioc_timer_start(__ioc) \ 30 - mod_timer(&(__ioc)->ioc_timer, jiffies + \ 31 - msecs_to_jiffies(BFA_IOC_TOV)) 32 - #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer) 33 - 34 - #define bfa_ioc_recovery_timer_start(__ioc) \ 35 - mod_timer(&(__ioc)->ioc_timer, jiffies + \ 36 - msecs_to_jiffies(BFA_IOC_TOV_RECOVER)) 37 - 38 - #define bfa_sem_timer_start(__ioc) \ 39 - mod_timer(&(__ioc)->sem_timer, jiffies + \ 40 - msecs_to_jiffies(BFA_IOC_HWSEM_TOV)) 41 - #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer) 42 - 43 - #define bfa_hb_timer_start(__ioc) \ 44 - mod_timer(&(__ioc)->hb_timer, jiffies + \ 45 - msecs_to_jiffies(BFA_IOC_HB_TOV)) 46 - #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer) 47 - 48 29 /** 49 30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 50 31 */ ··· 36 55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 37 56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 38 57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 39 - #define bfa_ioc_notify_hbfail(__ioc) \ 40 - ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 58 + #define bfa_ioc_notify_fail(__ioc) \ 59 + ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 60 + #define bfa_ioc_sync_join(__ioc) \ 61 + ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 62 + #define bfa_ioc_sync_leave(__ioc) \ 63 + ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) 64 + #define bfa_ioc_sync_ack(__ioc) \ 65 + ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 66 + #define bfa_ioc_sync_complete(__ioc) \ 67 + ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 41 68 42 69 #define bfa_ioc_mbox_cmd_pending(__ioc) \ 43 70 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ ··· 71 82 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); 72 83 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 73 84 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 85 + static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 86 + static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 87 + static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 88 + static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc); 89 + static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 90 + static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 74 91 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 75 92 u32 boot_param); 76 93 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); ··· 95 100 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 96 101 97 102 /** 98 - * IOC state machine events 103 + * IOC state machine definitions/declarations 99 104 */ 100 105 enum ioc_event { 101 - IOC_E_ENABLE = 1, /*!< IOC enable request */ 102 - IOC_E_DISABLE = 2, /*!< IOC disable request */ 103 - IOC_E_TIMEOUT = 3, /*!< f/w response timeout */ 104 - IOC_E_FWREADY = 4, /*!< f/w initialization done */ 105 - IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */ 106 - IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */ 107 - IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */ 108 - IOC_E_HBFAIL = 8, /*!< heartbeat failure */ 109 - IOC_E_HWERROR = 9, /*!< hardware error interrupt */ 110 - IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 111 - IOC_E_DETACH = 11, /*!< driver detach cleanup */ 106 + IOC_E_RESET = 1, /*!< IOC reset request */ 107 + IOC_E_ENABLE = 2, /*!< IOC enable request */ 108 + IOC_E_DISABLE = 3, /*!< IOC disable request */ 109 + IOC_E_DETACH = 4, /*!< driver detach cleanup */ 110 + IOC_E_ENABLED = 5, /*!< f/w enabled */ 111 + IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 112 + IOC_E_DISABLED = 7, /*!< f/w disabled */ 113 + IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */ 114 + IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */ 115 + IOC_E_HBFAIL = 10, /*!< heartbeat failure */ 116 + IOC_E_HWERROR = 11, /*!< hardware error interrupt */ 117 + IOC_E_TIMEOUT = 12, /*!< timeout */ 112 118 }; 113 119 120 + bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 114 121 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 115 - bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event); 116 - bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event); 117 - bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event); 118 - bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event); 119 122 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 120 123 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 121 124 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 122 - bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event); 123 - bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event); 125 + bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); 126 + bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 124 127 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 125 128 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 126 129 127 130 static struct bfa_sm_table ioc_sm_table[] = { 131 + {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 128 132 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 129 - {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, 130 - {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, 131 - {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, 132 - {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, 133 - {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, 133 + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 134 134 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 135 135 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 136 - {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 137 - {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, 136 + {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, 137 + {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 138 138 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 139 139 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 140 140 }; 141 + 142 + /** 143 + * IOCPF state machine definitions/declarations 144 + */ 145 + 146 + /* 147 + * Forward declareations for iocpf state machine 148 + */ 149 + static void bfa_iocpf_enable(struct bfa_ioc *ioc); 150 + static void bfa_iocpf_disable(struct bfa_ioc *ioc); 151 + static void bfa_iocpf_fail(struct bfa_ioc *ioc); 152 + static void bfa_iocpf_initfail(struct bfa_ioc *ioc); 153 + static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 154 + static void bfa_iocpf_stop(struct bfa_ioc *ioc); 155 + 156 + /** 157 + * IOCPF state machine events 158 + */ 159 + enum iocpf_event { 160 + IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 161 + IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 162 + IOCPF_E_STOP = 3, /*!< stop on driver detach */ 163 + IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ 164 + IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ 165 + IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ 166 + IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ 167 + IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ 168 + IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 169 + IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 170 + IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 171 + }; 172 + 173 + /** 174 + * IOCPF states 175 + */ 176 + enum bfa_iocpf_state { 177 + BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 178 + BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 179 + BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ 180 + BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ 181 + BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ 182 + BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ 183 + BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ 184 + BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ 185 + BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ 186 + }; 187 + 188 + bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); 189 + bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); 190 + bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); 191 + bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); 192 + bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); 193 + bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); 194 + bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); 195 + bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, 196 + enum iocpf_event); 197 + bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); 198 + bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); 199 + bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); 200 + bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); 201 + bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, 202 + enum iocpf_event); 203 + bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); 204 + 205 + static struct bfa_sm_table iocpf_sm_table[] = { 206 + {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, 207 + {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, 208 + {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, 209 + {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, 210 + {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 211 + {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 212 + {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 213 + {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, 214 + {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 215 + {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, 216 + {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 217 + {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 218 + {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, 219 + {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 220 + }; 221 + 222 + /** 223 + * IOC State Machine 224 + */ 225 + 226 + /** 227 + * Beginning state. IOC uninit state. 228 + */ 229 + static void 230 + bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 231 + { 232 + } 233 + 234 + /** 235 + * IOC is in uninit state. 236 + */ 237 + static void 238 + bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 239 + { 240 + switch (event) { 241 + case IOC_E_RESET: 242 + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 243 + break; 244 + 245 + default: 246 + bfa_sm_fault(ioc, event); 247 + } 248 + } 141 249 142 250 /** 143 251 * Reset entry actions -- initialize state machine ··· 248 150 static void 249 151 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 250 152 { 251 - ioc->retry_count = 0; 252 - ioc->auto_recover = bfa_nw_auto_recover; 153 + bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 253 154 } 254 155 255 156 /** 256 - * Beginning state. IOC is in reset state. 157 + * IOC is in reset state. 257 158 */ 258 159 static void 259 160 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 260 161 { 261 162 switch (event) { 262 163 case IOC_E_ENABLE: 263 - bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 164 + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 264 165 break; 265 166 266 167 case IOC_E_DISABLE: ··· 267 170 break; 268 171 269 172 case IOC_E_DETACH: 173 + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 174 + break; 175 + 176 + default: 177 + bfa_sm_fault(ioc, event); 178 + } 179 + } 180 + 181 + static void 182 + bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 183 + { 184 + bfa_iocpf_enable(ioc); 185 + } 186 + 187 + /** 188 + * Host IOC function is being enabled, awaiting response from firmware. 189 + * Semaphore is acquired. 190 + */ 191 + static void 192 + bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 193 + { 194 + switch (event) { 195 + case IOC_E_ENABLED: 196 + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 197 + break; 198 + 199 + case IOC_E_PFAILED: 200 + /* !!! fall through !!! */ 201 + case IOC_E_HWERROR: 202 + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 203 + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 204 + if (event != IOC_E_PFAILED) 205 + bfa_iocpf_initfail(ioc); 206 + break; 207 + 208 + case IOC_E_DISABLE: 209 + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 210 + break; 211 + 212 + case IOC_E_DETACH: 213 + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 214 + bfa_iocpf_stop(ioc); 215 + break; 216 + 217 + case IOC_E_ENABLE: 270 218 break; 271 219 272 220 default: ··· 323 181 * Semaphore should be acquired for version check. 324 182 */ 325 183 static void 326 - bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc) 327 - { 328 - bfa_ioc_hw_sem_get(ioc); 329 - } 330 - 331 - /** 332 - * Awaiting h/w semaphore to continue with version check. 333 - */ 334 - static void 335 - bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) 336 - { 337 - switch (event) { 338 - case IOC_E_SEMLOCKED: 339 - if (bfa_ioc_firmware_lock(ioc)) { 340 - ioc->retry_count = 0; 341 - bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 342 - } else { 343 - bfa_nw_ioc_hw_sem_release(ioc); 344 - bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 345 - } 346 - break; 347 - 348 - case IOC_E_DISABLE: 349 - bfa_ioc_disable_comp(ioc); 350 - /* fall through */ 351 - 352 - case IOC_E_DETACH: 353 - bfa_ioc_hw_sem_get_cancel(ioc); 354 - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 355 - break; 356 - 357 - case IOC_E_FWREADY: 358 - break; 359 - 360 - default: 361 - bfa_sm_fault(ioc, event); 362 - } 363 - } 364 - 365 - /** 366 - * Notify enable completion callback and generate mismatch AEN. 367 - */ 368 - static void 369 - bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc) 370 - { 371 - /** 372 - * Provide enable completion callback and AEN notification only once. 373 - */ 374 - if (ioc->retry_count == 0) 375 - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 376 - ioc->retry_count++; 377 - bfa_ioc_timer_start(ioc); 378 - } 379 - 380 - /** 381 - * Awaiting firmware version match. 382 - */ 383 - static void 384 - bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) 385 - { 386 - switch (event) { 387 - case IOC_E_TIMEOUT: 388 - bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 389 - break; 390 - 391 - case IOC_E_DISABLE: 392 - bfa_ioc_disable_comp(ioc); 393 - /* fall through */ 394 - 395 - case IOC_E_DETACH: 396 - bfa_ioc_timer_stop(ioc); 397 - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 398 - break; 399 - 400 - case IOC_E_FWREADY: 401 - break; 402 - 403 - default: 404 - bfa_sm_fault(ioc, event); 405 - } 406 - } 407 - 408 - /** 409 - * Request for semaphore. 410 - */ 411 - static void 412 - bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc) 413 - { 414 - bfa_ioc_hw_sem_get(ioc); 415 - } 416 - 417 - /** 418 - * Awaiting semaphore for h/w initialzation. 419 - */ 420 - static void 421 - bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) 422 - { 423 - switch (event) { 424 - case IOC_E_SEMLOCKED: 425 - ioc->retry_count = 0; 426 - bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 427 - break; 428 - 429 - case IOC_E_DISABLE: 430 - bfa_ioc_hw_sem_get_cancel(ioc); 431 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 432 - break; 433 - 434 - default: 435 - bfa_sm_fault(ioc, event); 436 - } 437 - } 438 - 439 - static void 440 - bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc) 441 - { 442 - bfa_ioc_timer_start(ioc); 443 - bfa_ioc_reset(ioc, false); 444 - } 445 - 446 - /** 447 - * @brief 448 - * Hardware is being initialized. Interrupts are enabled. 449 - * Holding hardware semaphore lock. 450 - */ 451 - static void 452 - bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) 453 - { 454 - switch (event) { 455 - case IOC_E_FWREADY: 456 - bfa_ioc_timer_stop(ioc); 457 - bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 458 - break; 459 - 460 - case IOC_E_HWERROR: 461 - bfa_ioc_timer_stop(ioc); 462 - /* fall through */ 463 - 464 - case IOC_E_TIMEOUT: 465 - ioc->retry_count++; 466 - if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 467 - bfa_ioc_timer_start(ioc); 468 - bfa_ioc_reset(ioc, true); 469 - break; 470 - } 471 - 472 - bfa_nw_ioc_hw_sem_release(ioc); 473 - bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 474 - break; 475 - 476 - case IOC_E_DISABLE: 477 - bfa_nw_ioc_hw_sem_release(ioc); 478 - bfa_ioc_timer_stop(ioc); 479 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 480 - break; 481 - 482 - default: 483 - bfa_sm_fault(ioc, event); 484 - } 485 - } 486 - 487 - static void 488 - bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 489 - { 490 - bfa_ioc_timer_start(ioc); 491 - bfa_ioc_send_enable(ioc); 492 - } 493 - 494 - /** 495 - * Host IOC function is being enabled, awaiting response from firmware. 496 - * Semaphore is acquired. 497 - */ 498 - static void 499 - bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 500 - { 501 - switch (event) { 502 - case IOC_E_FWRSP_ENABLE: 503 - bfa_ioc_timer_stop(ioc); 504 - bfa_nw_ioc_hw_sem_release(ioc); 505 - bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 506 - break; 507 - 508 - case IOC_E_HWERROR: 509 - bfa_ioc_timer_stop(ioc); 510 - /* fall through */ 511 - 512 - case IOC_E_TIMEOUT: 513 - ioc->retry_count++; 514 - if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 515 - writel(BFI_IOC_UNINIT, 516 - ioc->ioc_regs.ioc_fwstate); 517 - bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 518 - break; 519 - } 520 - 521 - bfa_nw_ioc_hw_sem_release(ioc); 522 - bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 523 - break; 524 - 525 - case IOC_E_DISABLE: 526 - bfa_ioc_timer_stop(ioc); 527 - bfa_nw_ioc_hw_sem_release(ioc); 528 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 529 - break; 530 - 531 - case IOC_E_FWREADY: 532 - bfa_ioc_send_enable(ioc); 533 - break; 534 - 535 - default: 536 - bfa_sm_fault(ioc, event); 537 - } 538 - } 539 - 540 - static void 541 184 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 542 185 { 543 - bfa_ioc_timer_start(ioc); 186 + mod_timer(&ioc->ioc_timer, jiffies + 187 + msecs_to_jiffies(BFA_IOC_TOV)); 544 188 bfa_ioc_send_getattr(ioc); 545 189 } 546 190 547 191 /** 548 - * @brief 549 192 * IOC configuration in progress. Timer is active. 550 193 */ 551 194 static void ··· 338 411 { 339 412 switch (event) { 340 413 case IOC_E_FWRSP_GETATTR: 341 - bfa_ioc_timer_stop(ioc); 414 + del_timer(&ioc->ioc_timer); 342 415 bfa_ioc_check_attr_wwns(ioc); 343 416 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 344 417 break; 345 418 419 + case IOC_E_PFAILED: 346 420 case IOC_E_HWERROR: 347 - bfa_ioc_timer_stop(ioc); 421 + del_timer(&ioc->ioc_timer); 348 422 /* fall through */ 349 - 350 423 case IOC_E_TIMEOUT: 351 - bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 424 + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 425 + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 426 + if (event != IOC_E_PFAILED) 427 + bfa_iocpf_getattrfail(ioc); 352 428 break; 353 429 354 430 case IOC_E_DISABLE: 355 - bfa_ioc_timer_stop(ioc); 356 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 431 + del_timer(&ioc->ioc_timer); 432 + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 433 + break; 434 + 435 + case IOC_E_ENABLE: 357 436 break; 358 437 359 438 default: ··· 386 453 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 387 454 break; 388 455 456 + case IOC_E_PFAILED: 389 457 case IOC_E_HWERROR: 390 - case IOC_E_FWREADY: 391 - /** 392 - * Hard error or IOC recovery by other function. 393 - * Treat it same as heartbeat failure. 394 - */ 395 458 bfa_ioc_hb_stop(ioc); 396 459 /* !!! fall through !!! */ 397 - 398 460 case IOC_E_HBFAIL: 399 - bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); 461 + bfa_ioc_fail_notify(ioc); 462 + if (ioc->iocpf.auto_recover) 463 + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 464 + else 465 + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 466 + 467 + if (event != IOC_E_PFAILED) 468 + bfa_iocpf_fail(ioc); 400 469 break; 401 470 402 471 default: ··· 409 474 static void 410 475 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 411 476 { 412 - bfa_ioc_timer_start(ioc); 413 - bfa_ioc_send_disable(ioc); 477 + bfa_iocpf_disable(ioc); 414 478 } 415 479 416 480 /** 417 - * IOC is being disabled 481 + * IOC is being desabled 418 482 */ 419 483 static void 420 484 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 421 485 { 422 486 switch (event) { 423 - case IOC_E_FWRSP_DISABLE: 424 - bfa_ioc_timer_stop(ioc); 487 + case IOC_E_DISABLED: 425 488 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 426 489 break; 427 490 428 491 case IOC_E_HWERROR: 429 - bfa_ioc_timer_stop(ioc); 430 492 /* 431 - * !!! fall through !!! 493 + * No state change. Will move to disabled state 494 + * after iocpf sm completes failure processing and 495 + * moves to disabled state. 432 496 */ 433 - 434 - case IOC_E_TIMEOUT: 435 - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 436 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 497 + bfa_iocpf_fail(ioc); 437 498 break; 438 499 439 500 default: ··· 438 507 } 439 508 440 509 /** 441 - * IOC disable completion entry. 510 + * IOC desable completion entry. 442 511 */ 443 512 static void 444 513 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) ··· 451 520 { 452 521 switch (event) { 453 522 case IOC_E_ENABLE: 454 - bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 523 + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 455 524 break; 456 525 457 526 case IOC_E_DISABLE: 458 527 ioc->cbfn->disable_cbfn(ioc->bfa); 459 528 break; 460 529 461 - case IOC_E_FWREADY: 462 - break; 463 - 464 530 case IOC_E_DETACH: 465 - bfa_ioc_firmware_unlock(ioc); 466 - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 531 + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 532 + bfa_iocpf_stop(ioc); 467 533 break; 468 534 469 535 default: ··· 469 541 } 470 542 471 543 static void 472 - bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc) 544 + bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) 473 545 { 474 - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 475 - bfa_ioc_timer_start(ioc); 476 546 } 477 547 478 548 /** 479 - * @brief 480 - * Hardware initialization failed. 549 + * Hardware initialization retry. 481 550 */ 482 551 static void 483 - bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) 552 + bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 484 553 { 485 554 switch (event) { 555 + case IOC_E_ENABLED: 556 + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 557 + break; 558 + 559 + case IOC_E_PFAILED: 560 + case IOC_E_HWERROR: 561 + /** 562 + * Initialization retry failed. 563 + */ 564 + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 565 + if (event != IOC_E_PFAILED) 566 + bfa_iocpf_initfail(ioc); 567 + break; 568 + 569 + case IOC_E_INITFAILED: 570 + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 571 + break; 572 + 573 + case IOC_E_ENABLE: 574 + break; 575 + 486 576 case IOC_E_DISABLE: 487 - bfa_ioc_timer_stop(ioc); 488 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 577 + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 489 578 break; 490 579 491 580 case IOC_E_DETACH: 492 - bfa_ioc_timer_stop(ioc); 493 - bfa_ioc_firmware_unlock(ioc); 494 - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 495 - break; 496 - 497 - case IOC_E_TIMEOUT: 498 - bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 581 + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 582 + bfa_iocpf_stop(ioc); 499 583 break; 500 584 501 585 default: ··· 516 576 } 517 577 518 578 static void 519 - bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc) 579 + bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) 520 580 { 521 - struct list_head *qe; 522 - struct bfa_ioc_hbfail_notify *notify; 523 - 524 - /** 525 - * Mark IOC as failed in hardware and stop firmware. 526 - */ 527 - bfa_ioc_lpu_stop(ioc); 528 - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 529 - 530 - /** 531 - * Notify other functions on HB failure. 532 - */ 533 - bfa_ioc_notify_hbfail(ioc); 534 - 535 - /** 536 - * Notify driver and common modules registered for notification. 537 - */ 538 - ioc->cbfn->hbfail_cbfn(ioc->bfa); 539 - list_for_each(qe, &ioc->hb_notify_q) { 540 - notify = (struct bfa_ioc_hbfail_notify *) qe; 541 - notify->cbfn(notify->cbarg); 542 - } 543 - 544 - /** 545 - * Flush any queued up mailbox requests. 546 - */ 547 - bfa_ioc_mbox_hbfail(ioc); 548 - 549 - /** 550 - * Trigger auto-recovery after a delay. 551 - */ 552 - if (ioc->auto_recover) 553 - mod_timer(&ioc->ioc_timer, jiffies + 554 - msecs_to_jiffies(BFA_IOC_TOV_RECOVER)); 555 581 } 556 582 557 583 /** 558 - * @brief 559 - * IOC heartbeat failure. 584 + * IOC failure. 560 585 */ 561 586 static void 562 - bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event) 587 + bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 563 588 { 564 589 switch (event) { 565 - 566 590 case IOC_E_ENABLE: 567 591 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 568 592 break; 569 593 570 594 case IOC_E_DISABLE: 571 - if (ioc->auto_recover) 572 - bfa_ioc_timer_stop(ioc); 573 - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 595 + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 574 596 break; 575 597 576 - case IOC_E_TIMEOUT: 577 - bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 578 - break; 579 - 580 - case IOC_E_FWREADY: 581 - /** 582 - * Recovery is already initiated by other function. 583 - */ 598 + case IOC_E_DETACH: 599 + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 600 + bfa_iocpf_stop(ioc); 584 601 break; 585 602 586 603 case IOC_E_HWERROR: 587 - /* 588 - * HB failure notification, ignore. 589 - */ 604 + /* HB failure notification, ignore. */ 590 605 break; 606 + 591 607 default: 592 608 bfa_sm_fault(ioc, event); 609 + } 610 + } 611 + 612 + /** 613 + * IOCPF State Machine 614 + */ 615 + 616 + /** 617 + * Reset entry actions -- initialize state machine 618 + */ 619 + static void 620 + bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 621 + { 622 + iocpf->retry_count = 0; 623 + iocpf->auto_recover = bfa_nw_auto_recover; 624 + } 625 + 626 + /** 627 + * Beginning state. IOC is in reset state. 628 + */ 629 + static void 630 + bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 631 + { 632 + switch (event) { 633 + case IOCPF_E_ENABLE: 634 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 635 + break; 636 + 637 + case IOCPF_E_STOP: 638 + break; 639 + 640 + default: 641 + bfa_sm_fault(iocpf->ioc, event); 642 + } 643 + } 644 + 645 + /** 646 + * Semaphore should be acquired for version check. 647 + */ 648 + static void 649 + bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 650 + { 651 + bfa_ioc_hw_sem_get(iocpf->ioc); 652 + } 653 + 654 + /** 655 + * Awaiting h/w semaphore to continue with version check. 656 + */ 657 + static void 658 + bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 659 + { 660 + struct bfa_ioc *ioc = iocpf->ioc; 661 + 662 + switch (event) { 663 + case IOCPF_E_SEMLOCKED: 664 + if (bfa_ioc_firmware_lock(ioc)) { 665 + if (bfa_ioc_sync_complete(ioc)) { 666 + iocpf->retry_count = 0; 667 + bfa_ioc_sync_join(ioc); 668 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 669 + } else { 670 + bfa_ioc_firmware_unlock(ioc); 671 + bfa_nw_ioc_hw_sem_release(ioc); 672 + mod_timer(&ioc->sem_timer, jiffies + 673 + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 674 + } 675 + } else { 676 + bfa_nw_ioc_hw_sem_release(ioc); 677 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 678 + } 679 + break; 680 + 681 + case IOCPF_E_DISABLE: 682 + bfa_ioc_hw_sem_get_cancel(ioc); 683 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 684 + bfa_ioc_pf_disabled(ioc); 685 + break; 686 + 687 + case IOCPF_E_STOP: 688 + bfa_ioc_hw_sem_get_cancel(ioc); 689 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 690 + break; 691 + 692 + default: 693 + bfa_sm_fault(ioc, event); 694 + } 695 + } 696 + 697 + /** 698 + * Notify enable completion callback 699 + */ 700 + static void 701 + bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 702 + { 703 + /* Call only the first time sm enters fwmismatch state. */ 704 + if (iocpf->retry_count == 0) 705 + bfa_ioc_pf_fwmismatch(iocpf->ioc); 706 + 707 + iocpf->retry_count++; 708 + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 709 + msecs_to_jiffies(BFA_IOC_TOV)); 710 + } 711 + 712 + /** 713 + * Awaiting firmware version match. 714 + */ 715 + static void 716 + bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 717 + { 718 + struct bfa_ioc *ioc = iocpf->ioc; 719 + 720 + switch (event) { 721 + case IOCPF_E_TIMEOUT: 722 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 723 + break; 724 + 725 + case IOCPF_E_DISABLE: 726 + del_timer(&ioc->iocpf_timer); 727 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 728 + bfa_ioc_pf_disabled(ioc); 729 + break; 730 + 731 + case IOCPF_E_STOP: 732 + del_timer(&ioc->iocpf_timer); 733 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 734 + break; 735 + 736 + default: 737 + bfa_sm_fault(ioc, event); 738 + } 739 + } 740 + 741 + /** 742 + * Request for semaphore. 743 + */ 744 + static void 745 + bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 746 + { 747 + bfa_ioc_hw_sem_get(iocpf->ioc); 748 + } 749 + 750 + /** 751 + * Awaiting semaphore for h/w initialzation. 752 + */ 753 + static void 754 + bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 755 + { 756 + struct bfa_ioc *ioc = iocpf->ioc; 757 + 758 + switch (event) { 759 + case IOCPF_E_SEMLOCKED: 760 + if (bfa_ioc_sync_complete(ioc)) { 761 + bfa_ioc_sync_join(ioc); 762 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 763 + } else { 764 + bfa_nw_ioc_hw_sem_release(ioc); 765 + mod_timer(&ioc->sem_timer, jiffies + 766 + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 767 + } 768 + break; 769 + 770 + case IOCPF_E_DISABLE: 771 + bfa_ioc_hw_sem_get_cancel(ioc); 772 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 773 + break; 774 + 775 + default: 776 + bfa_sm_fault(ioc, event); 777 + } 778 + } 779 + 780 + static void 781 + bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 782 + { 783 + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 784 + msecs_to_jiffies(BFA_IOC_TOV)); 785 + bfa_ioc_reset(iocpf->ioc, 0); 786 + } 787 + 788 + /** 789 + * Hardware is being initialized. Interrupts are enabled. 790 + * Holding hardware semaphore lock. 791 + */ 792 + static void 793 + bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) 794 + { 795 + struct bfa_ioc *ioc = iocpf->ioc; 796 + 797 + switch (event) { 798 + case IOCPF_E_FWREADY: 799 + del_timer(&ioc->iocpf_timer); 800 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 801 + break; 802 + 803 + case IOCPF_E_INITFAIL: 804 + del_timer(&ioc->iocpf_timer); 805 + /* 806 + * !!! fall through !!! 807 + */ 808 + 809 + case IOCPF_E_TIMEOUT: 810 + bfa_nw_ioc_hw_sem_release(ioc); 811 + if (event == IOCPF_E_TIMEOUT) 812 + bfa_ioc_pf_failed(ioc); 813 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 814 + break; 815 + 816 + case IOCPF_E_DISABLE: 817 + del_timer(&ioc->iocpf_timer); 818 + bfa_ioc_sync_leave(ioc); 819 + bfa_nw_ioc_hw_sem_release(ioc); 820 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 821 + break; 822 + 823 + default: 824 + bfa_sm_fault(ioc, event); 825 + } 826 + } 827 + 828 + static void 829 + bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) 830 + { 831 + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 832 + msecs_to_jiffies(BFA_IOC_TOV)); 833 + bfa_ioc_send_enable(iocpf->ioc); 834 + } 835 + 836 + /** 837 + * Host IOC function is being enabled, awaiting response from firmware. 838 + * Semaphore is acquired. 839 + */ 840 + static void 841 + bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 842 + { 843 + struct bfa_ioc *ioc = iocpf->ioc; 844 + 845 + switch (event) { 846 + case IOCPF_E_FWRSP_ENABLE: 847 + del_timer(&ioc->iocpf_timer); 848 + bfa_nw_ioc_hw_sem_release(ioc); 849 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 850 + break; 851 + 852 + case IOCPF_E_INITFAIL: 853 + del_timer(&ioc->iocpf_timer); 854 + /* 855 + * !!! fall through !!! 856 + */ 857 + case IOCPF_E_TIMEOUT: 858 + bfa_nw_ioc_hw_sem_release(ioc); 859 + if (event == IOCPF_E_TIMEOUT) 860 + bfa_ioc_pf_failed(ioc); 861 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 862 + break; 863 + 864 + case IOCPF_E_DISABLE: 865 + del_timer(&ioc->iocpf_timer); 866 + bfa_nw_ioc_hw_sem_release(ioc); 867 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 868 + break; 869 + 870 + case IOCPF_E_FWREADY: 871 + bfa_ioc_send_enable(ioc); 872 + break; 873 + 874 + default: 875 + bfa_sm_fault(ioc, event); 876 + } 877 + } 878 + 879 + static bool 880 + bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 881 + { 882 + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 883 + } 884 + 885 + static void 886 + bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 887 + { 888 + bfa_ioc_pf_enabled(iocpf->ioc); 889 + } 890 + 891 + static void 892 + bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 893 + { 894 + struct bfa_ioc *ioc = iocpf->ioc; 895 + 896 + switch (event) { 897 + case IOCPF_E_DISABLE: 898 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 899 + break; 900 + 901 + case IOCPF_E_GETATTRFAIL: 902 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 903 + break; 904 + 905 + case IOCPF_E_FAIL: 906 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 907 + break; 908 + 909 + case IOCPF_E_FWREADY: 910 + bfa_ioc_pf_failed(ioc); 911 + if (bfa_nw_ioc_is_operational(ioc)) 912 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 913 + else 914 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 915 + break; 916 + 917 + default: 918 + bfa_sm_fault(ioc, event); 919 + } 920 + } 921 + 922 + static void 923 + bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) 924 + { 925 + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 926 + msecs_to_jiffies(BFA_IOC_TOV)); 927 + bfa_ioc_send_disable(iocpf->ioc); 928 + } 929 + 930 + /** 931 + * IOC is being disabled 932 + */ 933 + static void 934 + bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 935 + { 936 + struct bfa_ioc *ioc = iocpf->ioc; 937 + 938 + switch (event) { 939 + case IOCPF_E_FWRSP_DISABLE: 940 + case IOCPF_E_FWREADY: 941 + del_timer(&ioc->iocpf_timer); 942 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 943 + break; 944 + 945 + case IOCPF_E_FAIL: 946 + del_timer(&ioc->iocpf_timer); 947 + /* 948 + * !!! fall through !!! 949 + */ 950 + 951 + case IOCPF_E_TIMEOUT: 952 + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 953 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 954 + break; 955 + 956 + case IOCPF_E_FWRSP_ENABLE: 957 + break; 958 + 959 + default: 960 + bfa_sm_fault(ioc, event); 961 + } 962 + } 963 + 964 + static void 965 + bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) 966 + { 967 + bfa_ioc_hw_sem_get(iocpf->ioc); 968 + } 969 + 970 + /** 971 + * IOC hb ack request is being removed. 972 + */ 973 + static void 974 + bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 975 + { 976 + struct bfa_ioc *ioc = iocpf->ioc; 977 + 978 + switch (event) { 979 + case IOCPF_E_SEMLOCKED: 980 + bfa_ioc_sync_leave(ioc); 981 + bfa_nw_ioc_hw_sem_release(ioc); 982 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 983 + break; 984 + 985 + case IOCPF_E_FAIL: 986 + break; 987 + 988 + default: 989 + bfa_sm_fault(ioc, event); 990 + } 991 + } 992 + 993 + /** 994 + * IOC disable completion entry. 995 + */ 996 + static void 997 + bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 998 + { 999 + bfa_ioc_pf_disabled(iocpf->ioc); 1000 + } 1001 + 1002 + static void 1003 + bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) 1004 + { 1005 + struct bfa_ioc *ioc = iocpf->ioc; 1006 + 1007 + switch (event) { 1008 + case IOCPF_E_ENABLE: 1009 + iocpf->retry_count = 0; 1010 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1011 + break; 1012 + 1013 + case IOCPF_E_STOP: 1014 + bfa_ioc_firmware_unlock(ioc); 1015 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1016 + break; 1017 + 1018 + default: 1019 + bfa_sm_fault(ioc, event); 1020 + } 1021 + } 1022 + 1023 + static void 1024 + bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) 1025 + { 1026 + bfa_ioc_hw_sem_get(iocpf->ioc); 1027 + } 1028 + 1029 + /** 1030 + * Hardware initialization failed. 1031 + */ 1032 + static void 1033 + bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1034 + { 1035 + struct bfa_ioc *ioc = iocpf->ioc; 1036 + 1037 + switch (event) { 1038 + case IOCPF_E_SEMLOCKED: 1039 + bfa_ioc_notify_fail(ioc); 1040 + bfa_ioc_sync_ack(ioc); 1041 + iocpf->retry_count++; 1042 + if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { 1043 + bfa_ioc_sync_leave(ioc); 1044 + bfa_nw_ioc_hw_sem_release(ioc); 1045 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 1046 + } else { 1047 + if (bfa_ioc_sync_complete(ioc)) 1048 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1049 + else { 1050 + bfa_nw_ioc_hw_sem_release(ioc); 1051 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1052 + } 1053 + } 1054 + break; 1055 + 1056 + case IOCPF_E_DISABLE: 1057 + bfa_ioc_hw_sem_get_cancel(ioc); 1058 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1059 + break; 1060 + 1061 + case IOCPF_E_STOP: 1062 + bfa_ioc_hw_sem_get_cancel(ioc); 1063 + bfa_ioc_firmware_unlock(ioc); 1064 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1065 + break; 1066 + 1067 + case IOCPF_E_FAIL: 1068 + break; 1069 + 1070 + default: 1071 + bfa_sm_fault(ioc, event); 1072 + } 1073 + } 1074 + 1075 + static void 1076 + bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 1077 + { 1078 + bfa_ioc_pf_initfailed(iocpf->ioc); 1079 + } 1080 + 1081 + /** 1082 + * Hardware initialization failed. 1083 + */ 1084 + static void 1085 + bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1086 + { 1087 + struct bfa_ioc *ioc = iocpf->ioc; 1088 + 1089 + switch (event) { 1090 + case IOCPF_E_DISABLE: 1091 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1092 + break; 1093 + 1094 + case IOCPF_E_STOP: 1095 + bfa_ioc_firmware_unlock(ioc); 1096 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1097 + break; 1098 + 1099 + default: 1100 + bfa_sm_fault(ioc, event); 1101 + } 1102 + } 1103 + 1104 + static void 1105 + bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) 1106 + { 1107 + /** 1108 + * Mark IOC as failed in hardware and stop firmware. 1109 + */ 1110 + bfa_ioc_lpu_stop(iocpf->ioc); 1111 + 1112 + /** 1113 + * Flush any queued up mailbox requests. 1114 + */ 1115 + bfa_ioc_mbox_hbfail(iocpf->ioc); 1116 + bfa_ioc_hw_sem_get(iocpf->ioc); 1117 + } 1118 + 1119 + /** 1120 + * IOC is in failed state. 1121 + */ 1122 + static void 1123 + bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1124 + { 1125 + struct bfa_ioc *ioc = iocpf->ioc; 1126 + 1127 + switch (event) { 1128 + case IOCPF_E_SEMLOCKED: 1129 + iocpf->retry_count = 0; 1130 + bfa_ioc_sync_ack(ioc); 1131 + bfa_ioc_notify_fail(ioc); 1132 + if (!iocpf->auto_recover) { 1133 + bfa_ioc_sync_leave(ioc); 1134 + bfa_nw_ioc_hw_sem_release(ioc); 1135 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1136 + } else { 1137 + if (bfa_ioc_sync_complete(ioc)) 1138 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1139 + else { 1140 + bfa_nw_ioc_hw_sem_release(ioc); 1141 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1142 + } 1143 + } 1144 + break; 1145 + 1146 + case IOCPF_E_DISABLE: 1147 + bfa_ioc_hw_sem_get_cancel(ioc); 1148 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1149 + break; 1150 + 1151 + case IOCPF_E_FAIL: 1152 + break; 1153 + 1154 + default: 1155 + bfa_sm_fault(ioc, event); 1156 + } 1157 + } 1158 + 1159 + static void 1160 + bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) 1161 + { 1162 + } 1163 + 1164 + /** 1165 + * @brief 1166 + * IOC is in failed state. 1167 + */ 1168 + static void 1169 + bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1170 + { 1171 + switch (event) { 1172 + case IOCPF_E_DISABLE: 1173 + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1174 + break; 1175 + 1176 + default: 1177 + bfa_sm_fault(iocpf->ioc, event); 593 1178 } 594 1179 } 595 1180 ··· 1137 672 notify = (struct bfa_ioc_hbfail_notify *) qe; 1138 673 notify->cbfn(notify->cbarg); 1139 674 } 1140 - } 1141 - 1142 - void 1143 - bfa_nw_ioc_sem_timeout(void *ioc_arg) 1144 - { 1145 - struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 1146 - 1147 - bfa_ioc_hw_sem_get(ioc); 1148 675 } 1149 676 1150 677 bool ··· 1178 721 */ 1179 722 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1180 723 if (r32 == 0) { 1181 - bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); 724 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1182 725 return; 1183 726 } 1184 727 ··· 1389 932 */ 1390 933 bfa_ioc_msgflush(ioc); 1391 934 ioc->cbfn->reset_cbfn(ioc->bfa); 1392 - bfa_fsm_send_event(ioc, IOC_E_FWREADY); 935 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1393 936 return; 1394 937 } 1395 938 ··· 1475 1018 1476 1019 hb_count = readl(ioc->ioc_regs.heartbeat); 1477 1020 if (ioc->hb_count == hb_count) { 1478 - pr_crit("Firmware heartbeat failure at %d", hb_count); 1479 1021 bfa_ioc_recover(ioc); 1480 1022 return; 1481 1023 } else { ··· 1645 1189 bfa_q_deq(&mod->cmd_q, &cmd); 1646 1190 } 1647 1191 1192 + static void 1193 + bfa_ioc_fail_notify(struct bfa_ioc *ioc) 1194 + { 1195 + struct list_head *qe; 1196 + struct bfa_ioc_hbfail_notify *notify; 1197 + 1198 + /** 1199 + * Notify driver and common modules registered for notification. 1200 + */ 1201 + ioc->cbfn->hbfail_cbfn(ioc->bfa); 1202 + list_for_each(qe, &ioc->hb_notify_q) { 1203 + notify = (struct bfa_ioc_hbfail_notify *) qe; 1204 + notify->cbfn(notify->cbarg); 1205 + } 1206 + } 1207 + 1208 + static void 1209 + bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 1210 + { 1211 + bfa_fsm_send_event(ioc, IOC_E_ENABLED); 1212 + } 1213 + 1214 + static void 1215 + bfa_ioc_pf_disabled(struct bfa_ioc *ioc) 1216 + { 1217 + bfa_fsm_send_event(ioc, IOC_E_DISABLED); 1218 + } 1219 + 1220 + static void 1221 + bfa_ioc_pf_initfailed(struct bfa_ioc *ioc) 1222 + { 1223 + bfa_fsm_send_event(ioc, IOC_E_INITFAILED); 1224 + } 1225 + 1226 + static void 1227 + bfa_ioc_pf_failed(struct bfa_ioc *ioc) 1228 + { 1229 + bfa_fsm_send_event(ioc, IOC_E_PFAILED); 1230 + } 1231 + 1232 + static void 1233 + bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) 1234 + { 1235 + /** 1236 + * Provide enable completion callback and AEN notification. 1237 + */ 1238 + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1239 + } 1240 + 1648 1241 /** 1649 1242 * IOC public 1650 1243 */ ··· 1789 1284 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 1790 1285 { 1791 1286 union bfi_ioc_i2h_msg_u *msg; 1287 + struct bfa_iocpf *iocpf = &ioc->iocpf; 1792 1288 1793 1289 msg = (union bfi_ioc_i2h_msg_u *) m; 1794 1290 ··· 1800 1294 break; 1801 1295 1802 1296 case BFI_IOC_I2H_READY_EVENT: 1803 - bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1297 + bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); 1804 1298 break; 1805 1299 1806 1300 case BFI_IOC_I2H_ENABLE_REPLY: 1807 - bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); 1301 + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 1808 1302 break; 1809 1303 1810 1304 case BFI_IOC_I2H_DISABLE_REPLY: 1811 - bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); 1305 + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); 1812 1306 break; 1813 1307 1814 1308 case BFI_IOC_I2H_GETATTR_REPLY: ··· 1834 1328 ioc->fcmode = false; 1835 1329 ioc->pllinit = false; 1836 1330 ioc->dbg_fwsave_once = true; 1331 + ioc->iocpf.ioc = ioc; 1837 1332 1838 1333 bfa_ioc_mbox_attach(ioc); 1839 1334 INIT_LIST_HEAD(&ioc->hb_notify_q); 1840 1335 1841 - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1336 + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 1337 + bfa_fsm_send_event(ioc, IOC_E_RESET); 1842 1338 } 1843 1339 1844 1340 /** ··· 2145 1637 static enum bfa_ioc_state 2146 1638 bfa_ioc_get_state(struct bfa_ioc *ioc) 2147 1639 { 2148 - return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1640 + enum bfa_iocpf_state iocpf_st; 1641 + enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1642 + 1643 + if (ioc_st == BFA_IOC_ENABLING || 1644 + ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { 1645 + 1646 + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 1647 + 1648 + switch (iocpf_st) { 1649 + case BFA_IOCPF_SEMWAIT: 1650 + ioc_st = BFA_IOC_SEMWAIT; 1651 + break; 1652 + 1653 + case BFA_IOCPF_HWINIT: 1654 + ioc_st = BFA_IOC_HWINIT; 1655 + break; 1656 + 1657 + case BFA_IOCPF_FWMISMATCH: 1658 + ioc_st = BFA_IOC_FWMISMATCH; 1659 + break; 1660 + 1661 + case BFA_IOCPF_FAIL: 1662 + ioc_st = BFA_IOC_FAIL; 1663 + break; 1664 + 1665 + case BFA_IOCPF_INITFAIL: 1666 + ioc_st = BFA_IOC_INITFAIL; 1667 + break; 1668 + 1669 + default: 1670 + break; 1671 + } 1672 + } 1673 + return ioc_st; 2149 1674 } 2150 1675 2151 1676 void ··· 2219 1678 static void 2220 1679 bfa_ioc_recover(struct bfa_ioc *ioc) 2221 1680 { 2222 - bfa_ioc_stats(ioc, ioc_hbfails); 2223 - bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 1681 + u16 bdf; 1682 + 1683 + bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 | 1684 + ioc->pcidev.device_id); 1685 + 1686 + pr_crit("Firmware heartbeat failure at %d", bdf); 1687 + BUG_ON(1); 2224 1688 } 2225 1689 2226 1690 static void ··· 2233 1687 { 2234 1688 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2235 1689 return; 1690 + } 2236 1691 1692 + /** 1693 + * @dg hal_iocpf_pvt BFA IOC PF private functions 1694 + * @{ 1695 + */ 1696 + 1697 + static void 1698 + bfa_iocpf_enable(struct bfa_ioc *ioc) 1699 + { 1700 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); 1701 + } 1702 + 1703 + static void 1704 + bfa_iocpf_disable(struct bfa_ioc *ioc) 1705 + { 1706 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 1707 + } 1708 + 1709 + static void 1710 + bfa_iocpf_fail(struct bfa_ioc *ioc) 1711 + { 1712 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 1713 + } 1714 + 1715 + static void 1716 + bfa_iocpf_initfail(struct bfa_ioc *ioc) 1717 + { 1718 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 1719 + } 1720 + 1721 + static void 1722 + bfa_iocpf_getattrfail(struct bfa_ioc *ioc) 1723 + { 1724 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 1725 + } 1726 + 1727 + static void 1728 + bfa_iocpf_stop(struct bfa_ioc *ioc) 1729 + { 1730 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); 1731 + } 1732 + 1733 + void 1734 + bfa_nw_iocpf_timeout(void *ioc_arg) 1735 + { 1736 + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 1737 + 1738 + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 1739 + } 1740 + 1741 + void 1742 + bfa_nw_iocpf_sem_timeout(void *ioc_arg) 1743 + { 1744 + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 1745 + 1746 + bfa_ioc_hw_sem_get(ioc); 2237 1747 }
+20 -29
drivers/net/bna/bfa_ioc.h
··· 26 26 #define BFA_IOC_TOV 3000 /* msecs */ 27 27 #define BFA_IOC_HWSEM_TOV 500 /* msecs */ 28 28 #define BFA_IOC_HB_TOV 500 /* msecs */ 29 - #define BFA_IOC_HWINIT_MAX 2 30 - #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 31 - 32 - /** 33 - * Generic Scatter Gather Element used by driver 34 - */ 35 - struct bfa_sge { 36 - u32 sg_len; 37 - void *sg_addr; 38 - }; 29 + #define BFA_IOC_HWINIT_MAX 5 39 30 40 31 /** 41 32 * PCI device information required by IOC ··· 56 65 #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ 57 66 58 67 /** 59 - * @brief BFA dma address assignment macro 60 - */ 61 - #define bfa_dma_addr_set(dma_addr, pa) \ 62 - __bfa_dma_addr_set(&dma_addr, (u64)pa) 63 - 64 - static inline void 65 - __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) 66 - { 67 - dma_addr->a32.addr_lo = (u32) pa; 68 - dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa)); 69 - } 70 - 71 - /** 72 68 * @brief BFA dma address assignment macro. (big endian format) 73 69 */ 74 70 #define bfa_dma_be_addr_set(dma_addr, pa) \ ··· 83 105 void __iomem *host_page_num_fn; 84 106 void __iomem *heartbeat; 85 107 void __iomem *ioc_fwstate; 108 + void __iomem *alt_ioc_fwstate; 86 109 void __iomem *ll_halt; 110 + void __iomem *alt_ll_halt; 87 111 void __iomem *err_set; 112 + void __iomem *ioc_fail_sync; 88 113 void __iomem *shirq_isr_next; 89 114 void __iomem *shirq_msk_next; 90 115 void __iomem *smem_page_start; ··· 146 165 (__notify)->cbarg = (__cbarg); \ 147 166 } while (0) 148 167 168 + struct bfa_iocpf { 169 + bfa_fsm_t fsm; 170 + struct bfa_ioc *ioc; 171 + u32 retry_count; 172 + bool auto_recover; 173 + }; 174 + 149 175 struct bfa_ioc { 150 176 bfa_fsm_t fsm; 151 177 struct bfa *bfa; 152 178 struct bfa_pcidev pcidev; 153 - struct bfa_timer_mod *timer_mod; 154 179 struct timer_list ioc_timer; 180 + struct timer_list iocpf_timer; 155 181 struct timer_list sem_timer; 156 182 struct timer_list hb_timer; 157 183 u32 hb_count; 158 - u32 retry_count; 159 184 struct list_head hb_notify_q; 160 185 void *dbg_fwsave; 161 186 int dbg_fwsave_len; ··· 169 182 enum bfi_mclass ioc_mc; 170 183 struct bfa_ioc_regs ioc_regs; 171 184 struct bfa_ioc_drv_stats stats; 172 - bool auto_recover; 173 185 bool fcmode; 174 186 bool ctdev; 175 187 bool cna; ··· 181 195 struct bfa_ioc_cbfn *cbfn; 182 196 struct bfa_ioc_mbox_mod mbox_mod; 183 197 struct bfa_ioc_hwif *ioc_hwif; 198 + struct bfa_iocpf iocpf; 184 199 }; 185 200 186 201 struct bfa_ioc_hwif { ··· 192 205 void (*ioc_map_port) (struct bfa_ioc *ioc); 193 206 void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, 194 207 bool msix); 195 - void (*ioc_notify_hbfail) (struct bfa_ioc *ioc); 208 + void (*ioc_notify_fail) (struct bfa_ioc *ioc); 196 209 void (*ioc_ownership_reset) (struct bfa_ioc *ioc); 210 + void (*ioc_sync_join) (struct bfa_ioc *ioc); 211 + void (*ioc_sync_leave) (struct bfa_ioc *ioc); 212 + void (*ioc_sync_ack) (struct bfa_ioc *ioc); 213 + bool (*ioc_sync_complete) (struct bfa_ioc *ioc); 197 214 }; 198 215 199 216 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) ··· 262 271 void bfa_nw_ioc_disable(struct bfa_ioc *ioc); 263 272 264 273 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); 265 - 266 274 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 267 275 void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, 268 276 struct bfa_ioc_hbfail_notify *notify); ··· 279 289 */ 280 290 void bfa_nw_ioc_timeout(void *ioc); 281 291 void bfa_nw_ioc_hb_check(void *ioc); 282 - void bfa_nw_ioc_sem_timeout(void *ioc); 292 + void bfa_nw_iocpf_timeout(void *ioc); 293 + void bfa_nw_iocpf_sem_timeout(void *ioc); 283 294 284 295 /* 285 296 * F/W Image Size & Chunk
+99 -3
drivers/net/bna/bfa_ioc_ct.c
··· 22 22 #include "bfi_ctreg.h" 23 23 #include "bfa_defs.h" 24 24 25 + #define bfa_ioc_ct_sync_pos(__ioc) \ 26 + ((u32) (1 << bfa_ioc_pcifn(__ioc))) 27 + #define BFA_IOC_SYNC_REQD_SH 16 28 + #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) 29 + #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) 30 + #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) 31 + #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ 32 + (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) 33 + 25 34 /* 26 35 * forward declarations 27 36 */ ··· 39 30 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); 40 31 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); 41 32 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 42 - static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); 33 + static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 43 34 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 35 + static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 36 + static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 37 + static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); 38 + static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); 44 39 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 45 40 46 41 static struct bfa_ioc_hwif nw_hwif_ct; ··· 61 48 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 62 49 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 63 50 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 64 - nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 51 + nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 65 52 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 53 + nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 54 + nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 55 + nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 56 + nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; 66 57 67 58 ioc->ioc_hwif = &nw_hwif_ct; 68 59 } ··· 103 86 if (usecnt == 0) { 104 87 writel(1, ioc->ioc_regs.ioc_usage_reg); 105 88 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 89 + writel(0, ioc->ioc_regs.ioc_fail_sync); 106 90 return true; 107 91 } 108 92 ··· 167 149 * Notify other functions on HB failure. 168 150 */ 169 151 static void 170 - bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc) 152 + bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) 171 153 { 172 154 if (ioc->cna) { 173 155 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 156 + writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 174 157 /* Wait for halt to take effect */ 175 158 readl(ioc->ioc_regs.ll_halt); 159 + readl(ioc->ioc_regs.alt_ll_halt); 176 160 } else { 177 161 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 178 162 readl(ioc->ioc_regs.err_set); ··· 226 206 if (ioc->port_id == 0) { 227 207 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 228 208 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 209 + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 229 210 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 230 211 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 231 212 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 213 + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 232 214 } else { 233 215 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 234 216 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 217 + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 235 218 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 236 219 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 237 220 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 221 + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 238 222 } 239 223 240 224 /* ··· 256 232 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 257 233 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 258 234 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 235 + ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); 259 236 260 237 /** 261 238 * sram memory access ··· 340 315 */ 341 316 readl(ioc->ioc_regs.ioc_sem_reg); 342 317 bfa_nw_ioc_hw_sem_release(ioc); 318 + } 319 + 320 + /** 321 + * Synchronized IOC failure processing routines 322 + */ 323 + static void 324 + bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 325 + { 326 + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 327 + u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); 328 + 329 + writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); 330 + } 331 + 332 + static void 333 + bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) 334 + { 335 + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 336 + u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | 337 + bfa_ioc_ct_sync_pos(ioc); 338 + 339 + writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); 340 + } 341 + 342 + static void 343 + bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) 344 + { 345 + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 346 + 347 + writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); 348 + } 349 + 350 + static bool 351 + bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) 352 + { 353 + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 354 + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 355 + u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); 356 + u32 tmp_ackd; 357 + 358 + if (sync_ackd == 0) 359 + return true; 360 + 361 + /** 362 + * The check below is to see whether any other PCI fn 363 + * has reinitialized the ASIC (reset sync_ackd bits) 364 + * and failed again while this IOC was waiting for hw 365 + * semaphore (in bfa_iocpf_sm_semwait()). 366 + */ 367 + tmp_ackd = sync_ackd; 368 + if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && 369 + !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) 370 + sync_ackd |= bfa_ioc_ct_sync_pos(ioc); 371 + 372 + if (sync_reqd == sync_ackd) { 373 + writel(bfa_ioc_ct_clear_sync_ackd(r32), 374 + ioc->ioc_regs.ioc_fail_sync); 375 + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 376 + writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); 377 + return true; 378 + } 379 + 380 + /** 381 + * If another PCI fn reinitialized and failed again while 382 + * this IOC was waiting for hw sem, the sync_ackd bit for 383 + * this IOC need to be set again to allow reinitialization. 384 + */ 385 + if (tmp_ackd != sync_ackd) 386 + writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); 387 + 388 + return false; 343 389 } 344 390 345 391 static enum bfa_status
+25 -16
drivers/net/bna/bfi_ctreg.h
··· 535 535 #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG 536 536 #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG 537 537 #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG 538 + #define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG 538 539 539 540 #define CPE_DEPTH_Q(__n) \ 540 541 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) ··· 553 552 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) 554 553 #define RME_CI_PTR_Q(__n) \ 555 554 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) 556 - #define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ 557 - * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) 558 - #define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ 559 - * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) 560 - #define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ 561 - * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) 562 - #define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ 563 - * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) 564 - #define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ 565 - * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) 566 - #define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ 567 - * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) 568 - #define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ 569 - * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) 570 - #define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ 571 - * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) 555 + #define HQM_QSET_RXQ_DRBL_P0(__n) \ 556 + (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \ 557 + (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) 558 + #define HQM_QSET_TXQ_DRBL_P0(__n) \ 559 + (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \ 560 + (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) 561 + #define HQM_QSET_IB_DRBL_1_P0(__n) \ 562 + (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \ 563 + (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) 564 + #define HQM_QSET_IB_DRBL_2_P0(__n) \ 565 + (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \ 566 + (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) 567 + #define HQM_QSET_RXQ_DRBL_P1(__n) \ 568 + (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \ 569 + (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) 570 + #define HQM_QSET_TXQ_DRBL_P1(__n) \ 571 + (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \ 572 + (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) 573 + #define HQM_QSET_IB_DRBL_1_P1(__n) \ 574 + (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \ 575 + (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) 576 + #define HQM_QSET_IB_DRBL_2_P1(__n) \ 577 + (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \ 578 + (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) 572 579 573 580 #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 574 581 #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-2
drivers/net/bna/bna.h
··· 32 32 /* Log string size */ 33 33 #define BNA_MESSAGE_SIZE 256 34 34 35 - #define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod)) 36 - 37 35 /* MBOX API for PORT, TX, RX */ 38 36 #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \ 39 37 do { \
+17 -4
drivers/net/bna/bnad.c
··· 1425 1425 } 1426 1426 1427 1427 static void 1428 - bnad_ioc_sem_timeout(unsigned long data) 1428 + bnad_iocpf_timeout(unsigned long data) 1429 1429 { 1430 1430 struct bnad *bnad = (struct bnad *)data; 1431 1431 unsigned long flags; 1432 1432 1433 1433 spin_lock_irqsave(&bnad->bna_lock, flags); 1434 - bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc); 1434 + bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); 1435 + spin_unlock_irqrestore(&bnad->bna_lock, flags); 1436 + } 1437 + 1438 + static void 1439 + bnad_iocpf_sem_timeout(unsigned long data) 1440 + { 1441 + struct bnad *bnad = (struct bnad *)data; 1442 + unsigned long flags; 1443 + 1444 + spin_lock_irqsave(&bnad->bna_lock, flags); 1445 + bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); 1435 1446 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1436 1447 } 1437 1448 ··· 3143 3132 ((unsigned long)bnad)); 3144 3133 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, 3145 3134 ((unsigned long)bnad)); 3146 - setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout, 3135 + setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, 3136 + ((unsigned long)bnad)); 3137 + setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, 3147 3138 ((unsigned long)bnad)); 3148 3139 3149 3140 /* Now start the timer before calling IOC */ 3150 - mod_timer(&bnad->bna.device.ioc.ioc_timer, 3141 + mod_timer(&bnad->bna.device.ioc.iocpf_timer, 3151 3142 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3152 3143 3153 3144 /*