Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (70 commits)
[SCSI] pmcraid: add support for set timestamp command and other fixes
[SCSI] pmcraid: remove duplicate struct member
[SCSI] qla4xxx: Fix cmd check in qla4xxx_cmd_wait
[SCSI] megaraid_sas: Version and documentation update
[SCSI] megaraid_sas: Add three times Online controller reset
[SCSI] megaraid_sas: Add input parameter for max_sectors
[SCSI] megaraid_sas: support devices update flag
[SCSI] libosd: write/read_sg_kern API
[SCSI] libosd: Support for scatter gather write/read commands
[SCSI] libosd: Free resources in reverse order of allocation
[SCSI] libosd: Fix bug in attr_page handling
[SCSI] lpfc 8.3.18: Update lpfc driver version to 8.3.18
[SCSI] lpfc 8.3.18: Add new WQE support
[SCSI] lpfc 8.3.18: Fix critical errors
[SCSI] lpfc 8.3.18: Adapter Shutdown and Unregistration cleanup
[SCSI] lpfc 8.3.18: Add logic to detect last devloss timeout
[SCSI] lpfc 8.3.18: Add support of received ELS commands
[SCSI] lpfc 8.3.18: FC/FCoE Discovery fixes
[SCSI] ipr: add definitions for a new adapter
[SCSI] bfa: fix comments for c files
...

+4165 -3011
+47
Documentation/scsi/ChangeLog.megaraid_sas
··· 1 + 1 Release Date : Thur. May 03, 2010 09:12:45 PST 2009 - 2 + (emaild-id:megaraidlinux@lsi.com) 3 + Bo Yang 4 + 5 + 2 Current Version : 00.00.04.31-rc1 6 + 3 Older Version : 00.00.04.17.1-rc1 7 + 8 + 1. Add the Online Controller Reset (OCR) to the Driver. 9 + OCR is the new feature for megaraid_sas driver which 10 + will allow the fw to do the chip reset which will not 11 + affact the OS behavious. 12 + 13 + To add the OCR support, driver need to do: 14 + a). reset the controller chips -- Xscale and Gen2 which 15 + will change the function calls and add the reset function 16 + related to this two chips. 17 + 18 + b). during the reset, driver will store the pending cmds 19 + which not returned by FW to driver's pending queue. Driver 20 + will re-issue those pending cmds again to FW after the OCR 21 + finished. 22 + 23 + c). In driver's timeout routine, driver will report to 24 + OS as reset. Also driver's queue routine will block the 25 + cmds until the OCR finished. 26 + 27 + d). in Driver's ISR routine, if driver get the FW state as 28 + state change, FW in Failure status and FW support online controller 29 + reset (OCR), driver will start to do the controller reset. 30 + 31 + e). In driver's IOCTL routine, the application cmds will wait for the 32 + OCR to finish, then issue the cmds to FW. 33 + 34 + f). Before driver kill adapter, driver will do last chance of 35 + OCR to see if driver can bring back the FW. 36 + 37 + 2. Add the support update flag to the driver to tell LSI megaraid_sas 38 + application which driver will support the device update. So application 39 + will not need to do the device update after application add/del the device 40 + from the system. 41 + 3. In driver's timeout routine, driver will do three time reset if fw is in 42 + failed state. Driver will kill adapter if can't bring back FW after the 43 + this three times reset. 44 + 4. Add the input parameter max_sectors to 1MB support to our GEN2 controller. 45 + customer can use the input paramenter max_sectors to add 1MB support to GEN2 46 + controller. 47 + 1 48 1 Release Date : Thur. Oct 29, 2009 09:12:45 PST 2009 - 2 49 (emaild-id:megaraidlinux@lsi.com) 3 50 Bo Yang
+1 -1
drivers/s390/scsi/zfcp_fc.h
··· 270 270 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { 271 271 sense = (char *) &fcp_rsp[1]; 272 272 if (rsp_flags & FCP_RSP_LEN_VAL) 273 - sense += fcp_rsp->ext.fr_sns_len; 273 + sense += fcp_rsp->ext.fr_rsp_len; 274 274 sense_len = min(fcp_rsp->ext.fr_sns_len, 275 275 (u32) SCSI_SENSE_BUFFERSIZE); 276 276 memcpy(scsi->sense_buffer, sense, sense_len);
-3
drivers/s390/scsi/zfcp_fsf.c
··· 532 532 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 533 533 adapter->hydra_version = 0; 534 534 535 - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 536 - &adapter->status); 537 - 538 535 zfcp_fsf_link_down_info_eval(req, 539 536 &qtcb->header.fsf_status_qual.link_down_info); 540 537 break;
+2 -2
drivers/s390/scsi/zfcp_unit.c
··· 142 142 return -ENOMEM; 143 143 } 144 144 145 + get_device(&port->dev); 146 + 145 147 if (device_register(&unit->dev)) { 146 148 put_device(&unit->dev); 147 149 return -ENOMEM; ··· 153 151 device_unregister(&unit->dev); 154 152 return -EINVAL; 155 153 } 156 - 157 - get_device(&port->dev); 158 154 159 155 write_lock_irq(&port->unit_list_lock); 160 156 list_add_tail(&unit->list, &port->unit_list);
+24 -24
drivers/scsi/bfa/bfa.h
··· 29 29 typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); 30 30 typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); 31 31 32 - /** 32 + /* 33 33 * Interrupt message handlers 34 34 */ 35 35 void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); 36 36 void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); 37 37 38 - /** 38 + /* 39 39 * Request and response queue related defines 40 40 */ 41 41 #define BFA_REQQ_NELEMS_MIN (4) ··· 58 58 #define bfa_reqq_produce(__bfa, __reqq) do { \ 59 59 (__bfa)->iocfc.req_cq_pi[__reqq]++; \ 60 60 (__bfa)->iocfc.req_cq_pi[__reqq] &= \ 61 - ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ 62 - bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \ 63 - (__bfa)->iocfc.req_cq_pi[__reqq]); \ 61 + ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ 62 + writel((__bfa)->iocfc.req_cq_pi[__reqq], \ 63 + (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \ 64 64 mmiowb(); \ 65 65 } while (0) 66 66 ··· 76 76 (__index) &= ((__size) - 1); \ 77 77 } while (0) 78 78 79 - /** 79 + /* 80 80 * Queue element to wait for room in request queue. FIFO order is 81 81 * maintained when fullfilling requests. 82 82 */ ··· 86 86 void *cbarg; 87 87 }; 88 88 89 - /** 89 + /* 90 90 * Circular queue usage assignments 91 91 */ 92 92 enum { ··· 113 113 114 114 #define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) 115 115 116 - /** 116 + /* 117 117 * static inline void 118 118 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe) 119 119 */ ··· 130 130 #define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) 131 131 132 132 133 - /** 133 + /* 134 134 * Generic BFA callback element. 135 135 */ 136 136 struct bfa_cb_qe_s { ··· 163 163 } while (0) 164 164 165 165 166 - /** 166 + /* 167 167 * PCI devices supported by the current BFA 168 168 */ 169 169 struct bfa_pciid_s { ··· 173 173 174 174 extern char bfa_version[]; 175 175 176 - /** 176 + /* 177 177 * BFA memory resources 178 178 */ 179 179 enum bfa_mem_type { ··· 202 202 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) 203 203 204 204 struct bfa_iocfc_regs_s { 205 - bfa_os_addr_t intr_status; 206 - bfa_os_addr_t intr_mask; 207 - bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS]; 208 - bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS]; 209 - bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS]; 210 - bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS]; 211 - bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS]; 212 - bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS]; 213 - bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS]; 214 - bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS]; 205 + void __iomem *intr_status; 206 + void __iomem *intr_mask; 207 + void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS]; 208 + void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS]; 209 + void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS]; 210 + void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS]; 211 + void __iomem *rme_q_ci[BFI_IOC_MAX_CQS]; 212 + void __iomem *rme_q_pi[BFI_IOC_MAX_CQS]; 213 + void __iomem *rme_q_depth[BFI_IOC_MAX_CQS]; 214 + void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS]; 215 215 }; 216 216 217 - /** 217 + /* 218 218 * MSIX vector handlers 219 219 */ 220 220 #define BFA_MSIX_MAX_VECTORS 22 ··· 224 224 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS]; 225 225 }; 226 226 227 - /** 227 + /* 228 228 * Chip specific interfaces 229 229 */ 230 230 struct bfa_hwif_s { ··· 343 343 struct bfi_pbc_vport_s *pbc_vport); 344 344 345 345 346 - /** 346 + /* 347 347 *---------------------------------------------------------------------- 348 348 * BFA public interfaces 349 349 *----------------------------------------------------------------------
+11 -11
drivers/scsi/bfa/bfa_cb_ioim.h
··· 37 37 } lun; 38 38 39 39 lun.bfa_lun = 0; 40 - lun.scsi_lun[0] = bfa_os_htons(luno); 40 + lun.scsi_lun[0] = cpu_to_be16(luno); 41 41 42 42 return lun.bfa_lun; 43 43 } 44 44 45 - /** 45 + /* 46 46 * Get LUN for the I/O request 47 47 */ 48 48 #define bfa_cb_ioim_get_lun(__dio) \ 49 49 bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun) 50 50 51 - /** 51 + /* 52 52 * Get CDB for the I/O request 53 53 */ 54 54 static inline u8 * ··· 59 59 return (u8 *) cmnd->cmnd; 60 60 } 61 61 62 - /** 62 + /* 63 63 * Get I/O direction (read/write) for the I/O request 64 64 */ 65 65 static inline enum fcp_iodir ··· 77 77 return FCP_IODIR_NONE; 78 78 } 79 79 80 - /** 80 + /* 81 81 * Get IO size in bytes for the I/O request 82 82 */ 83 83 static inline u32 ··· 88 88 return scsi_bufflen(cmnd); 89 89 } 90 90 91 - /** 91 + /* 92 92 * Get timeout for the I/O request 93 93 */ 94 94 static inline u8 ··· 104 104 return 0; 105 105 } 106 106 107 - /** 107 + /* 108 108 * Get Command Reference Number for the I/O request. 0 if none. 109 109 */ 110 110 static inline u8 ··· 113 113 return 0; 114 114 } 115 115 116 - /** 116 + /* 117 117 * Get SAM-3 priority for the I/O request. 0 is default. 118 118 */ 119 119 static inline u8 ··· 122 122 return 0; 123 123 } 124 124 125 - /** 125 + /* 126 126 * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0). 127 127 */ 128 128 static inline u8 ··· 148 148 return task_attr; 149 149 } 150 150 151 - /** 151 + /* 152 152 * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16). 153 153 */ 154 154 static inline u8 ··· 159 159 return cmnd->cmd_len; 160 160 } 161 161 162 - /** 162 + /* 163 163 * Assign queue to be used for the I/O request. This value depends on whether 164 164 * the driver wants to use the queues via any specific algorithm. Currently, 165 165 * this is not supported.
+89 -89
drivers/scsi/bfa/bfa_core.c
··· 21 21 22 22 BFA_TRC_FILE(HAL, CORE); 23 23 24 - /** 24 + /* 25 25 * BFA IOC FC related definitions 26 26 */ 27 27 28 - /** 28 + /* 29 29 * IOC local definitions 30 30 */ 31 31 #define BFA_IOCFC_TOV 5000 /* msecs */ ··· 54 54 #define DEF_CFG_NUM_SBOOT_TGTS 16 55 55 #define DEF_CFG_NUM_SBOOT_LUNS 16 56 56 57 - /** 57 + /* 58 58 * forward declaration for IOC FC functions 59 59 */ 60 60 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); ··· 63 63 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 64 64 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 65 65 66 - /** 66 + /* 67 67 * BFA Interrupt handling functions 68 68 */ 69 69 static void ··· 86 86 87 87 waitq = bfa_reqq(bfa, qid); 88 88 list_for_each_safe(qe, qen, waitq) { 89 - /** 89 + /* 90 90 * Callback only as long as there is room in request queue 91 91 */ 92 92 if (bfa_reqq_full(bfa, qid)) ··· 104 104 bfa_intx(bfa); 105 105 } 106 106 107 - /** 107 + /* 108 108 * hal_intr_api 109 109 */ 110 110 bfa_boolean_t ··· 113 113 u32 intr, qintr; 114 114 int queue; 115 115 116 - intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 116 + intr = readl(bfa->iocfc.bfa_regs.intr_status); 117 117 if (!intr) 118 118 return BFA_FALSE; 119 119 120 - /** 120 + /* 121 121 * RME completion queue interrupt 122 122 */ 123 123 qintr = intr & __HFN_INT_RME_MASK; 124 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 124 + writel(qintr, bfa->iocfc.bfa_regs.intr_status); 125 125 126 126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 127 127 if (intr & (__HFN_INT_RME_Q0 << queue)) ··· 131 131 if (!intr) 132 132 return BFA_TRUE; 133 133 134 - /** 134 + /* 135 135 * CPE completion queue interrupt 136 136 */ 137 137 qintr = intr & __HFN_INT_CPE_MASK; 138 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 138 + writel(qintr, bfa->iocfc.bfa_regs.intr_status); 139 139 140 140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 141 141 if (intr & (__HFN_INT_CPE_Q0 << queue)) ··· 153 153 void 154 154 bfa_intx_enable(struct bfa_s *bfa) 155 155 { 156 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask); 156 + writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask); 157 157 } 158 158 159 159 void 160 160 bfa_intx_disable(struct bfa_s *bfa) 161 161 { 162 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); 162 + writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 163 163 } 164 164 165 165 void ··· 188 188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | 189 189 __HFN_INT_MBOX_LPU1); 190 190 191 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); 192 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); 191 + writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status); 192 + writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask); 193 193 bfa->iocfc.intr_mask = ~intr_unmask; 194 194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 195 195 } ··· 198 198 bfa_isr_disable(struct bfa_s *bfa) 199 199 { 200 200 bfa_isr_mode_set(bfa, BFA_FALSE); 201 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); 201 + writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 202 202 bfa_msix_uninstall(bfa); 203 203 } 204 204 ··· 211 211 212 212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 213 213 214 - /** 214 + /* 215 215 * Resume any pending requests in the corresponding reqq. 216 216 */ 217 217 waitq = bfa_reqq(bfa, qid); ··· 259 259 } 260 260 } 261 261 262 - /** 262 + /* 263 263 * update CI 264 264 */ 265 265 bfa_rspq_ci(bfa, qid) = pi; 266 - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); 266 + writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); 267 267 mmiowb(); 268 268 269 - /** 269 + /* 270 270 * Resume any pending requests in the corresponding reqq. 271 271 */ 272 272 waitq = bfa_reqq(bfa, qid); ··· 279 279 { 280 280 u32 intr, curr_value; 281 281 282 - intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 282 + intr = readl(bfa->iocfc.bfa_regs.intr_status); 283 283 284 284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 285 285 bfa_msix_lpu(bfa); ··· 289 289 290 290 if (intr) { 291 291 if (intr & __HFN_INT_LL_HALT) { 292 - /** 292 + /* 293 293 * If LL_HALT bit is set then FW Init Halt LL Port 294 294 * Register needs to be cleared as well so Interrupt 295 295 * Status Register will be cleared. 296 296 */ 297 - curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); 297 + curr_value = readl(bfa->ioc.ioc_regs.ll_halt); 298 298 curr_value &= ~__FW_INIT_HALT_P; 299 - bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); 299 + writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 300 300 } 301 301 302 302 if (intr & __HFN_INT_ERR_PSS) { 303 - /** 303 + /* 304 304 * ERR_PSS bit needs to be cleared as well in case 305 305 * interrups are shared so driver's interrupt handler is 306 306 * still called eventhough it is already masked out. 307 307 */ 308 - curr_value = bfa_reg_read( 308 + curr_value = readl( 309 309 bfa->ioc.ioc_regs.pss_err_status_reg); 310 310 curr_value &= __PSS_ERR_STATUS_SET; 311 - bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, 312 - curr_value); 311 + writel(curr_value, 312 + bfa->ioc.ioc_regs.pss_err_status_reg); 313 313 } 314 314 315 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); 315 + writel(intr, bfa->iocfc.bfa_regs.intr_status); 316 316 bfa_msix_errint(bfa, intr); 317 317 } 318 318 } ··· 323 323 bfa_isrs[mc] = isr_func; 324 324 } 325 325 326 - /** 326 + /* 327 327 * BFA IOC FC related functions 328 328 */ 329 329 330 - /** 330 + /* 331 331 * hal_ioc_pvt BFA IOC private functions 332 332 */ 333 333 ··· 366 366 BFA_CACHELINE_SZ); 367 367 } 368 368 369 - /** 369 + /* 370 370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 371 371 */ 372 372 static void ··· 384 384 385 385 bfa_iocfc_reset_queues(bfa); 386 386 387 - /** 387 + /* 388 388 * initialize IOC configuration info 389 389 */ 390 390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 391 391 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 392 392 393 393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 394 - /** 394 + /* 395 395 * dma map REQ and RSP circular queues and shadow pointers 396 396 */ 397 397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { ··· 400 400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 401 401 iocfc->req_cq_shadow_ci[i].pa); 402 402 cfg_info->req_cq_elems[i] = 403 - bfa_os_htons(cfg->drvcfg.num_reqq_elems); 403 + cpu_to_be16(cfg->drvcfg.num_reqq_elems); 404 404 405 405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 406 406 iocfc->rsp_cq_ba[i].pa); 407 407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 408 408 iocfc->rsp_cq_shadow_pi[i].pa); 409 409 cfg_info->rsp_cq_elems[i] = 410 - bfa_os_htons(cfg->drvcfg.num_rspq_elems); 410 + cpu_to_be16(cfg->drvcfg.num_rspq_elems); 411 411 } 412 412 413 - /** 413 + /* 414 414 * Enable interrupt coalescing if it is driver init path 415 415 * and not ioc disable/enable path. 416 416 */ ··· 419 419 420 420 iocfc->cfgdone = BFA_FALSE; 421 421 422 - /** 422 + /* 423 423 * dma map IOC configuration itself 424 424 */ 425 425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, ··· 440 440 iocfc->bfa = bfa; 441 441 iocfc->action = BFA_IOCFC_ACT_NONE; 442 442 443 - bfa_os_assign(iocfc->cfg, *cfg); 443 + iocfc->cfg = *cfg; 444 444 445 - /** 445 + /* 446 446 * Initialize chip specific handlers. 447 447 */ 448 448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { ··· 503 503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 504 504 iocfc->req_cq_ba[i].kva = dm_kva; 505 505 iocfc->req_cq_ba[i].pa = dm_pa; 506 - bfa_os_memset(dm_kva, 0, per_reqq_sz); 506 + memset(dm_kva, 0, per_reqq_sz); 507 507 dm_kva += per_reqq_sz; 508 508 dm_pa += per_reqq_sz; 509 509 510 510 iocfc->rsp_cq_ba[i].kva = dm_kva; 511 511 iocfc->rsp_cq_ba[i].pa = dm_pa; 512 - bfa_os_memset(dm_kva, 0, per_rspq_sz); 512 + memset(dm_kva, 0, per_rspq_sz); 513 513 dm_kva += per_rspq_sz; 514 514 dm_pa += per_rspq_sz; 515 515 } ··· 559 559 } 560 560 } 561 561 562 - /** 562 + /* 563 563 * Start BFA submodules. 564 564 */ 565 565 static void ··· 573 573 hal_mods[i]->start(bfa); 574 574 } 575 575 576 - /** 576 + /* 577 577 * Disable BFA submodules. 578 578 */ 579 579 static void ··· 623 623 complete(&bfad->disable_comp); 624 624 } 625 625 626 - /** 626 + /* 627 627 * Update BFA configuration from firmware configuration. 628 628 */ 629 629 static void ··· 634 634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 635 635 636 636 fwcfg->num_cqs = fwcfg->num_cqs; 637 - fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); 638 - fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); 639 - fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); 640 - fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); 641 - fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); 637 + fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 638 + fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 639 + fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 640 + fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 641 + fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 642 642 643 643 iocfc->cfgdone = BFA_TRUE; 644 644 645 - /** 645 + /* 646 646 * Configuration is complete - initialize/start submodules 647 647 */ 648 648 bfa_fcport_init(bfa); ··· 665 665 } 666 666 } 667 667 668 - /** 668 + /* 669 669 * IOC enable request is complete 670 670 */ 671 671 static void ··· 684 684 bfa_iocfc_send_cfg(bfa); 685 685 } 686 686 687 - /** 687 + /* 688 688 * IOC disable request is complete 689 689 */ 690 690 static void ··· 705 705 } 706 706 } 707 707 708 - /** 708 + /* 709 709 * Notify sub-modules of hardware failure. 710 710 */ 711 711 static void ··· 723 723 bfa); 724 724 } 725 725 726 - /** 726 + /* 727 727 * Actions on chip-reset completion. 728 728 */ 729 729 static void ··· 735 735 bfa_isr_enable(bfa); 736 736 } 737 737 738 - /** 738 + /* 739 739 * hal_ioc_public 740 740 */ 741 741 742 - /** 742 + /* 743 743 * Query IOC memory requirement information. 744 744 */ 745 745 void ··· 754 754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 755 755 } 756 756 757 - /** 757 + /* 758 758 * Query IOC memory requirement information. 759 759 */ 760 760 void ··· 772 772 ioc->trcmod = bfa->trcmod; 773 773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 774 774 775 - /** 775 + /* 776 776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 777 777 */ 778 778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) ··· 790 790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 791 791 } 792 792 793 - /** 793 + /* 794 794 * Query IOC memory requirement information. 795 795 */ 796 796 void ··· 799 799 bfa_ioc_detach(&bfa->ioc); 800 800 } 801 801 802 - /** 802 + /* 803 803 * Query IOC memory requirement information. 804 804 */ 805 805 void ··· 809 809 bfa_ioc_enable(&bfa->ioc); 810 810 } 811 811 812 - /** 812 + /* 813 813 * IOC start called from bfa_start(). Called to start IOC operations 814 814 * at driver instantiation for this instance. 815 815 */ ··· 820 820 bfa_iocfc_start_submod(bfa); 821 821 } 822 822 823 - /** 823 + /* 824 824 * IOC stop called from bfa_stop(). Called only when driver is unloaded 825 825 * for this instance. 826 826 */ ··· 876 876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 877 877 878 878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 879 - bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : 880 - bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); 879 + be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : 880 + be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); 881 881 882 882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 883 - bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : 884 - bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); 883 + be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : 884 + be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); 885 885 886 886 attr->config = iocfc->cfg; 887 887 } ··· 893 893 struct bfi_iocfc_set_intr_req_s *m; 894 894 895 895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 896 - iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); 897 - iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); 896 + iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); 897 + iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); 898 898 899 899 if (!bfa_iocfc_is_operational(bfa)) 900 900 return BFA_STATUS_OK; ··· 924 924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 925 925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 926 926 } 927 - /** 927 + /* 928 928 * Enable IOC after it is disabled. 929 929 */ 930 930 void ··· 953 953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 954 954 } 955 955 956 - /** 956 + /* 957 957 * Return boot target port wwns -- read from boot information in flash. 958 958 */ 959 959 void ··· 998 998 return cfgrsp->pbc_cfg.nvports; 999 999 } 1000 1000 1001 - /** 1001 + /* 1002 1002 * hal_api 1003 1003 */ 1004 1004 1005 - /** 1005 + /* 1006 1006 * Use this function query the memory requirement of the BFA library. 1007 1007 * This function needs to be called before bfa_attach() to get the 1008 1008 * memory required of the BFA layer for a given driver configuration. ··· 1038 1038 1039 1039 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1040 1040 1041 - bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1041 + memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1042 1042 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = 1043 1043 BFA_MEM_TYPE_KVA; 1044 1044 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = ··· 1055 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1056 1056 } 1057 1057 1058 - /** 1058 + /* 1059 1059 * Use this function to do attach the driver instance with the BFA 1060 1060 * library. This function will not trigger any HW initialization 1061 1061 * process (which will be done in bfa_init() call) ··· 1092 1092 1093 1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1094 1094 1095 - /** 1095 + /* 1096 1096 * initialize all memory pointers for iterative allocation 1097 1097 */ 1098 1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { ··· 1109 1109 bfa_com_port_attach(bfa, meminfo); 1110 1110 } 1111 1111 1112 - /** 1112 + /* 1113 1113 * Use this function to delete a BFA IOC. IOC should be stopped (by 1114 1114 * calling bfa_stop()) before this function call. 1115 1115 * ··· 1146 1146 bfa->plog = plog; 1147 1147 } 1148 1148 1149 - /** 1149 + /* 1150 1150 * Initialize IOC. 1151 1151 * 1152 1152 * This function will return immediately, when the IOC initialization is ··· 1169 1169 bfa_iocfc_init(bfa); 1170 1170 } 1171 1171 1172 - /** 1172 + /* 1173 1173 * Use this function initiate the IOC configuration setup. This function 1174 1174 * will return immediately. 1175 1175 * ··· 1183 1183 bfa_iocfc_start(bfa); 1184 1184 } 1185 1185 1186 - /** 1186 + /* 1187 1187 * Use this function quiese the IOC. This function will return immediately, 1188 1188 * when the IOC is actually stopped, the bfad->comp will be set. 1189 1189 * ··· 1243 1243 bfa->fcs = BFA_TRUE; 1244 1244 } 1245 1245 1246 - /** 1246 + /* 1247 1247 * Periodic timer heart beat from driver 1248 1248 */ 1249 1249 void ··· 1252 1252 bfa_timer_beat(&bfa->timer_mod); 1253 1253 } 1254 1254 1255 - /** 1255 + /* 1256 1256 * Return the list of PCI vendor/device id lists supported by this 1257 1257 * BFA instance. 1258 1258 */ ··· 1270 1270 *pciids = __pciids; 1271 1271 } 1272 1272 1273 - /** 1273 + /* 1274 1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1275 1275 * into BFA layer). The OS driver can then turn back and overwrite entries that 1276 1276 * have been configured by the user. ··· 1328 1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1329 1329 } 1330 1330 1331 - /** 1331 + /* 1332 1332 * Retrieve firmware trace information on IOC failure. 1333 1333 */ 1334 1334 bfa_status_t ··· 1337 1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); 1338 1338 } 1339 1339 1340 - /** 1340 + /* 1341 1341 * Clear the saved firmware trace information of an IOC. 1342 1342 */ 1343 1343 void ··· 1346 1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc); 1347 1347 } 1348 1348 1349 - /** 1349 + /* 1350 1350 * Fetch firmware trace data. 1351 1351 * 1352 1352 * @param[in] bfa BFA instance ··· 1362 1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 1363 1363 } 1364 1364 1365 - /** 1365 + /* 1366 1366 * Dump firmware memory. 1367 1367 * 1368 1368 * @param[in] bfa BFA instance ··· 1378 1378 { 1379 1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); 1380 1380 } 1381 - /** 1381 + /* 1382 1382 * Reset hw semaphore & usage cnt regs and initialize. 1383 1383 */ 1384 1384 void ··· 1388 1388 bfa_ioc_pll_init(&bfa->ioc); 1389 1389 } 1390 1390 1391 - /** 1391 + /* 1392 1392 * Fetch firmware statistics data. 1393 1393 * 1394 1394 * @param[in] bfa BFA instance
+12 -12
drivers/scsi/bfa/bfa_cs.h
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * bfa_cs.h BFA common services 20 20 */ 21 21 ··· 24 24 25 25 #include "bfa_os_inc.h" 26 26 27 - /** 27 + /* 28 28 * BFA TRC 29 29 */ 30 30 ··· 73 73 #define BFA_TRC_MOD_SH 10 74 74 #define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) 75 75 76 - /** 76 + /* 77 77 * Define a new tracing file (module). Module should match one defined above. 78 78 */ 79 79 #define BFA_TRC_FILE(__mod, __submod) \ ··· 155 155 #define bfa_trc_fp(_trcp, _data) 156 156 #endif 157 157 158 - /** 158 + /* 159 159 * @ BFA LOG interfaces 160 160 */ 161 161 #define bfa_assert(__cond) do { \ ··· 249 249 #define bfa_q_is_on_q(_q, _qe) \ 250 250 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) 251 251 252 - /** 252 + /* 253 253 * @ BFA state machine interfaces 254 254 */ 255 255 256 256 typedef void (*bfa_sm_t)(void *sm, int event); 257 257 258 - /** 258 + /* 259 259 * oc - object class eg. bfa_ioc 260 260 * st - state, eg. reset 261 261 * otype - object type, eg. struct bfa_ioc_s ··· 269 269 #define bfa_sm_get_state(_sm) ((_sm)->sm) 270 270 #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) 271 271 272 - /** 272 + /* 273 273 * For converting from state machine function to state encoding. 274 274 */ 275 275 struct bfa_sm_table_s { ··· 279 279 }; 280 280 #define BFA_SM(_sm) ((bfa_sm_t)(_sm)) 281 281 282 - /** 282 + /* 283 283 * State machine with entry actions. 284 284 */ 285 285 typedef void (*bfa_fsm_t)(void *fsm, int event); 286 286 287 - /** 287 + /* 288 288 * oc - object class eg. bfa_ioc 289 289 * st - state, eg. reset 290 290 * otype - object type, eg. struct bfa_ioc_s ··· 314 314 return smt[i].state; 315 315 } 316 316 317 - /** 317 + /* 318 318 * @ Generic wait counter. 319 319 */ 320 320 ··· 340 340 wc->wc_resume(wc->wc_cbarg); 341 341 } 342 342 343 - /** 343 + /* 344 344 * Initialize a waiting counter. 345 345 */ 346 346 static inline void ··· 352 352 bfa_wc_up(wc); 353 353 } 354 354 355 - /** 355 + /* 356 356 * Wait for counter to reach zero 357 357 */ 358 358 static inline void
+27 -27
drivers/scsi/bfa/bfa_defs.h
··· 24 24 #define BFA_MFG_SERIALNUM_SIZE 11 25 25 #define STRSZ(_n) (((_n) + 4) & ~3) 26 26 27 - /** 27 + /* 28 28 * Manufacturing card type 29 29 */ 30 30 enum { ··· 45 45 46 46 #pragma pack(1) 47 47 48 - /** 48 + /* 49 49 * Check if Mezz card 50 50 */ 51 51 #define bfa_mfg_is_mezz(type) (( \ ··· 55 55 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ 56 56 (type) == BFA_MFG_TYPE_LIGHTNING)) 57 57 58 - /** 58 + /* 59 59 * Check if the card having old wwn/mac handling 60 60 */ 61 61 #define bfa_mfg_is_old_wwn_mac_model(type) (( \ ··· 78 78 (m)[2] = t & 0xFF; \ 79 79 } while (0) 80 80 81 - /** 81 + /* 82 82 * VPD data length 83 83 */ 84 84 #define BFA_MFG_VPD_LEN 512 85 85 86 - /** 86 + /* 87 87 * VPD vendor tag 88 88 */ 89 89 enum { ··· 97 97 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ 98 98 }; 99 99 100 - /** 100 + /* 101 101 * All numerical fields are in big-endian format. 102 102 */ 103 103 struct bfa_mfg_vpd_s { ··· 112 112 113 113 #pragma pack() 114 114 115 - /** 115 + /* 116 116 * Status return values 117 117 */ 118 118 enum bfa_status { ··· 167 167 #define BFA_STRING_32 32 168 168 #define BFA_VERSION_LEN 64 169 169 170 - /** 170 + /* 171 171 * ---------------------- adapter definitions ------------ 172 172 */ 173 173 174 - /** 174 + /* 175 175 * BFA adapter level attributes. 176 176 */ 177 177 enum { ··· 215 215 u8 trunk_capable; 216 216 }; 217 217 218 - /** 218 + /* 219 219 * ---------------------- IOC definitions ------------ 220 220 */ 221 221 ··· 224 224 BFA_IOC_CHIP_REV_LEN = 8, 225 225 }; 226 226 227 - /** 227 + /* 228 228 * Driver and firmware versions. 229 229 */ 230 230 struct bfa_ioc_driver_attr_s { ··· 236 236 char ob_ver[BFA_VERSION_LEN]; /* openboot version */ 237 237 }; 238 238 239 - /** 239 + /* 240 240 * IOC PCI device attributes 241 241 */ 242 242 struct bfa_ioc_pci_attr_s { ··· 249 249 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ 250 250 }; 251 251 252 - /** 252 + /* 253 253 * IOC states 254 254 */ 255 255 enum bfa_ioc_state { ··· 267 267 BFA_IOC_ENABLING = 12, /* IOC is being enabled */ 268 268 }; 269 269 270 - /** 270 + /* 271 271 * IOC firmware stats 272 272 */ 273 273 struct bfa_fw_ioc_stats_s { ··· 279 279 u32 unknown_reqs; 280 280 }; 281 281 282 - /** 282 + /* 283 283 * IOC driver stats 284 284 */ 285 285 struct bfa_ioc_drv_stats_s { ··· 296 296 u32 enable_replies; 297 297 }; 298 298 299 - /** 299 + /* 300 300 * IOC statistics 301 301 */ 302 302 struct bfa_ioc_stats_s { ··· 310 310 BFA_IOC_TYPE_LL = 3, 311 311 }; 312 312 313 - /** 313 + /* 314 314 * IOC attributes returned in queries 315 315 */ 316 316 struct bfa_ioc_attr_s { ··· 323 323 u8 rsvd[7]; /* 64bit align */ 324 324 }; 325 325 326 - /** 326 + /* 327 327 * ---------------------- mfg definitions ------------ 328 328 */ 329 329 330 - /** 330 + /* 331 331 * Checksum size 332 332 */ 333 333 #define BFA_MFG_CHKSUM_SIZE 16 ··· 340 340 341 341 #pragma pack(1) 342 342 343 - /** 343 + /* 344 344 * All numerical fields are in big-endian format. 345 345 */ 346 346 struct bfa_mfg_block_s { ··· 373 373 374 374 #pragma pack() 375 375 376 - /** 376 + /* 377 377 * ---------------------- pci definitions ------------ 378 378 */ 379 379 380 - /** 380 + /* 381 381 * PCI device and vendor ID information 382 382 */ 383 383 enum { ··· 392 392 ((devid) == BFA_PCI_DEVICE_ID_CT || \ 393 393 (devid) == BFA_PCI_DEVICE_ID_CT_FC) 394 394 395 - /** 395 + /* 396 396 * PCI sub-system device and vendor ID information 397 397 */ 398 398 enum { 399 399 BFA_PCI_FCOE_SSDEVICE_ID = 0x14, 400 400 }; 401 401 402 - /** 402 + /* 403 403 * Maximum number of device address ranges mapped through different BAR(s) 404 404 */ 405 405 #define BFA_PCI_ACCESS_RANGES 1 ··· 430 430 #define BOOT_CFG_REV1 1 431 431 #define BOOT_CFG_VLAN 1 432 432 433 - /** 433 + /* 434 434 * Boot options setting. Boot options setting determines from where 435 435 * to get the boot lun information 436 436 */ ··· 442 442 }; 443 443 444 444 #pragma pack(1) 445 - /** 445 + /* 446 446 * Boot lun information. 447 447 */ 448 448 struct bfa_boot_bootlun_s { ··· 451 451 }; 452 452 #pragma pack() 453 453 454 - /** 454 + /* 455 455 * BOOT boot configuraton 456 456 */ 457 457 struct bfa_boot_pbc_s {
+24 -24
drivers/scsi/bfa/bfa_defs_fcs.h
··· 21 21 #include "bfa_fc.h" 22 22 #include "bfa_defs_svc.h" 23 23 24 - /** 24 + /* 25 25 * VF states 26 26 */ 27 27 enum bfa_vf_state { ··· 35 35 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */ 36 36 }; 37 37 38 - /** 38 + /* 39 39 * VF statistics 40 40 */ 41 41 struct bfa_vf_stats_s { ··· 55 55 u32 resvd; /* padding for 64 bit alignment */ 56 56 }; 57 57 58 - /** 58 + /* 59 59 * VF attributes returned in queries 60 60 */ 61 61 struct bfa_vf_attr_s { ··· 67 67 #define BFA_FCS_MAX_LPORTS 256 68 68 #define BFA_FCS_FABRIC_IPADDR_SZ 16 69 69 70 - /** 70 + /* 71 71 * symbolic names for base port/virtual port 72 72 */ 73 73 #define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */ ··· 75 75 char symname[BFA_SYMNAME_MAXLEN]; 76 76 }; 77 77 78 - /** 78 + /* 79 79 * Roles of FCS port: 80 80 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port 81 81 * - Create multiple ports if both IM and TM functions required. ··· 86 86 BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM, 87 87 }; 88 88 89 - /** 89 + /* 90 90 * FCS port configuration. 91 91 */ 92 92 struct bfa_lport_cfg_s { 93 93 wwn_t pwwn; /* port wwn */ 94 94 wwn_t nwwn; /* node wwn */ 95 95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ 96 - bfa_boolean_t preboot_vp; /* vport created from PBC */ 96 + bfa_boolean_t preboot_vp; /* vport created from PBC */ 97 97 enum bfa_lport_role roles; /* FCS port roles */ 98 98 u8 tag[16]; /* opaque tag from application */ 99 99 }; 100 100 101 - /** 101 + /* 102 102 * FCS port states 103 103 */ 104 104 enum bfa_lport_state { ··· 108 108 BFA_LPORT_OFFLINE = 3, /* No login to fabric */ 109 109 }; 110 110 111 - /** 111 + /* 112 112 * FCS port type. 113 113 */ 114 114 enum bfa_lport_type { ··· 116 116 BFA_LPORT_TYPE_VIRTUAL, 117 117 }; 118 118 119 - /** 119 + /* 120 120 * FCS port offline reason. 121 121 */ 122 122 enum bfa_lport_offline_reason { ··· 128 128 BFA_LPORT_OFFLINE_FAB_LOGOUT, 129 129 }; 130 130 131 - /** 131 + /* 132 132 * FCS lport info. 133 133 */ 134 134 struct bfa_lport_info_s { ··· 150 150 151 151 }; 152 152 153 - /** 153 + /* 154 154 * FCS port statistics 155 155 */ 156 156 struct bfa_lport_stats_s { ··· 222 222 * (max retry of plogi) */ 223 223 }; 224 224 225 - /** 225 + /* 226 226 * BFA port attribute returned in queries 227 227 */ 228 228 struct bfa_lport_attr_s { ··· 239 239 }; 240 240 241 241 242 - /** 242 + /* 243 243 * VPORT states 244 244 */ 245 245 enum bfa_vport_state { ··· 258 258 BFA_FCS_VPORT_MAX_STATE, 259 259 }; 260 260 261 - /** 261 + /* 262 262 * vport statistics 263 263 */ 264 264 struct bfa_vport_stats_s { ··· 296 296 u32 rsvd; 297 297 }; 298 298 299 - /** 299 + /* 300 300 * BFA vport attribute returned in queries 301 301 */ 302 302 struct bfa_vport_attr_s { ··· 305 305 u32 rsvd; 306 306 }; 307 307 308 - /** 308 + /* 309 309 * FCS remote port states 310 310 */ 311 311 enum bfa_rport_state { ··· 321 321 BFA_RPORT_NSDISC = 9, /* re-discover rport */ 322 322 }; 323 323 324 - /** 324 + /* 325 325 * Rport Scsi Function : Initiator/Target. 326 326 */ 327 327 enum bfa_rport_function { ··· 329 329 BFA_RPORT_TARGET = 0x02, /* SCSI Target */ 330 330 }; 331 331 332 - /** 332 + /* 333 333 * port/node symbolic names for rport 334 334 */ 335 335 #define BFA_RPORT_SYMNAME_MAXLEN 255 ··· 337 337 char symname[BFA_RPORT_SYMNAME_MAXLEN]; 338 338 }; 339 339 340 - /** 340 + /* 341 341 * FCS remote port statistics 342 342 */ 343 343 struct bfa_rport_stats_s { ··· 374 374 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */ 375 375 }; 376 376 377 - /** 377 + /* 378 378 * FCS remote port attributes returned in queries 379 379 */ 380 380 struct bfa_rport_attr_s { ··· 411 411 #define BFA_MAX_IO_INDEX 7 412 412 #define BFA_NO_IO_INDEX 9 413 413 414 - /** 414 + /* 415 415 * FCS itnim states 416 416 */ 417 417 enum bfa_itnim_state { ··· 425 425 BFA_ITNIM_INITIATIOR = 7, /* initiator */ 426 426 }; 427 427 428 - /** 428 + /* 429 429 * FCS remote port statistics 430 430 */ 431 431 struct bfa_itnim_stats_s { ··· 443 443 u32 rsvd; /* padding for 64 bit alignment */ 444 444 }; 445 445 446 - /** 446 + /* 447 447 * FCS itnim attributes returned in queries 448 448 */ 449 449 struct bfa_itnim_attr_s {
+41 -41
drivers/scsi/bfa/bfa_defs_svc.h
··· 27 27 #define BFA_IOCFCOE_INTR_DELAY 25 28 28 #define BFA_IOCFCOE_INTR_LATENCY 5 29 29 30 - /** 30 + /* 31 31 * Interrupt coalescing configuration. 32 32 */ 33 33 #pragma pack(1) ··· 38 38 u16 delay; /* delay in microseconds */ 39 39 }; 40 40 41 - /** 41 + /* 42 42 * IOC firmware configuraton 43 43 */ 44 44 struct bfa_iocfc_fwcfg_s { ··· 71 71 u32 rsvd; 72 72 }; 73 73 74 - /** 74 + /* 75 75 * IOC configuration 76 76 */ 77 77 struct bfa_iocfc_cfg_s { ··· 79 79 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */ 80 80 }; 81 81 82 - /** 82 + /* 83 83 * IOC firmware IO stats 84 84 */ 85 85 struct bfa_fw_io_stats_s { ··· 152 152 */ 153 153 }; 154 154 155 - /** 155 + /* 156 156 * IOC port firmware stats 157 157 */ 158 158 ··· 262 262 u32 mac_invalids; /* Invalid mac assigned */ 263 263 }; 264 264 265 - /** 265 + /* 266 266 * IOC firmware FCoE port stats 267 267 */ 268 268 struct bfa_fw_fcoe_port_stats_s { ··· 270 270 struct bfa_fw_fip_stats_s fip_stats; 271 271 }; 272 272 273 - /** 273 + /* 274 274 * IOC firmware FC uport stats 275 275 */ 276 276 struct bfa_fw_fc_uport_stats_s { ··· 278 278 struct bfa_fw_port_lksm_stats_s lksm_stats; 279 279 }; 280 280 281 - /** 281 + /* 282 282 * IOC firmware FC port stats 283 283 */ 284 284 union bfa_fw_fc_port_stats_s { ··· 286 286 struct bfa_fw_fcoe_port_stats_s fcoe_stats; 287 287 }; 288 288 289 - /** 289 + /* 290 290 * IOC firmware port stats 291 291 */ 292 292 struct bfa_fw_port_stats_s { ··· 295 295 union bfa_fw_fc_port_stats_s fc_port; 296 296 }; 297 297 298 - /** 298 + /* 299 299 * fcxchg module statistics 300 300 */ 301 301 struct bfa_fw_fcxchg_stats_s { ··· 308 308 u32 cls_tx; 309 309 }; 310 310 311 - /** 311 + /* 312 312 * Trunk statistics 313 313 */ 314 314 struct bfa_fw_trunk_stats_s { ··· 334 334 u32 elp_dropped; /* ELP dropped */ 335 335 }; 336 336 337 - /** 337 + /* 338 338 * IOCFC firmware stats 339 339 */ 340 340 struct bfa_fw_iocfc_stats_s { ··· 345 345 u32 set_intr_reqs; /* set interrupt reqs */ 346 346 }; 347 347 348 - /** 348 + /* 349 349 * IOC attributes returned in queries 350 350 */ 351 351 struct bfa_iocfc_attr_s { ··· 353 353 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */ 354 354 }; 355 355 356 - /** 356 + /* 357 357 * Eth_sndrcv mod stats 358 358 */ 359 359 struct bfa_fw_eth_sndrcv_stats_s { ··· 361 361 u32 rsvd; /* 64bit align */ 362 362 }; 363 363 364 - /** 364 + /* 365 365 * CT MAC mod stats 366 366 */ 367 367 struct bfa_fw_mac_mod_stats_s { ··· 379 379 u32 rsvd; /* 64bit align */ 380 380 }; 381 381 382 - /** 382 + /* 383 383 * CT MOD stats 384 384 */ 385 385 struct bfa_fw_ct_mod_stats_s { ··· 391 391 u32 rsvd; /* 64bit align */ 392 392 }; 393 393 394 - /** 394 + /* 395 395 * IOC firmware stats 396 396 */ 397 397 struct bfa_fw_stats_s { ··· 412 412 #define BFA_IOCFC_PATHTOV_MAX 60 413 413 #define BFA_IOCFC_QDEPTH_MAX 2000 414 414 415 - /** 415 + /* 416 416 * QoS states 417 417 */ 418 418 enum bfa_qos_state { ··· 420 420 BFA_QOS_OFFLINE = 2, /* QoS is offline */ 421 421 }; 422 422 423 - /** 423 + /* 424 424 * QoS Priority levels. 425 425 */ 426 426 enum bfa_qos_priority { ··· 430 430 BFA_QOS_LOW = 3, /* QoS Priority Level Low */ 431 431 }; 432 432 433 - /** 433 + /* 434 434 * QoS bandwidth allocation for each priority level 435 435 */ 436 436 enum bfa_qos_bw_alloc { ··· 439 439 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ 440 440 }; 441 441 #pragma pack(1) 442 - /** 442 + /* 443 443 * QoS attribute returned in QoS Query 444 444 */ 445 445 struct bfa_qos_attr_s { ··· 448 448 u32 total_bb_cr; /* Total BB Credits */ 449 449 }; 450 450 451 - /** 451 + /* 452 452 * These fields should be displayed only from the CLI. 453 453 * There will be a separate BFAL API (get_qos_vc_attr ?) 454 454 * to retrieve this. ··· 471 471 * total_vc_count */ 472 472 }; 473 473 474 - /** 474 + /* 475 475 * QoS statistics 476 476 */ 477 477 struct bfa_qos_stats_s { ··· 489 489 u32 rsvd; /* padding for 64 bit alignment */ 490 490 }; 491 491 492 - /** 492 + /* 493 493 * FCoE statistics 494 494 */ 495 495 struct bfa_fcoe_stats_s { ··· 540 540 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ 541 541 }; 542 542 543 - /** 543 + /* 544 544 * QoS or FCoE stats (fcport stats excluding physical FC port stats) 545 545 */ 546 546 union bfa_fcport_stats_u { ··· 639 639 BFA_PORT_ST_MAX_STATE, 640 640 }; 641 641 642 - /** 642 + /* 643 643 * Port operational type (in sync with SNIA port type). 644 644 */ 645 645 enum bfa_port_type { ··· 651 651 BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */ 652 652 }; 653 653 654 - /** 654 + /* 655 655 * Port topology setting. A port's topology and fabric login status 656 656 * determine its operational type. 657 657 */ ··· 662 662 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ 663 663 }; 664 664 665 - /** 665 + /* 666 666 * Physical port loopback types. 667 667 */ 668 668 enum bfa_port_opmode { ··· 679 679 (_mode == BFA_PORT_OPMODE_LB_SLW) || \ 680 680 (_mode == BFA_PORT_OPMODE_LB_EXT)) 681 681 682 - /** 682 + /* 683 683 * Port link state 684 684 */ 685 685 enum bfa_port_linkstate { ··· 687 687 BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */ 688 688 }; 689 689 690 - /** 690 + /* 691 691 * Port link state reason code 692 692 */ 693 693 enum bfa_port_linkstate_rsn { ··· 733 733 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 734 734 }; 735 735 #pragma pack(1) 736 - /** 736 + /* 737 737 * Physical port configuration 738 738 */ 739 739 struct bfa_port_cfg_s { ··· 753 753 }; 754 754 #pragma pack() 755 755 756 - /** 756 + /* 757 757 * Port attribute values. 758 758 */ 759 759 struct bfa_port_attr_s { ··· 800 800 u8 rsvd1[6]; 801 801 }; 802 802 803 - /** 803 + /* 804 804 * Port FCP mappings. 805 805 */ 806 806 struct bfa_port_fcpmap_s { ··· 815 815 char luid[256]; 816 816 }; 817 817 818 - /** 818 + /* 819 819 * Port RNID info. 820 820 */ 821 821 struct bfa_port_rnid_s { ··· 848 848 mac_t mac; /* FCF mac */ 849 849 }; 850 850 851 - /** 851 + /* 852 852 * Trunk states for BCU/BFAL 853 853 */ 854 854 enum bfa_trunk_state { ··· 857 857 BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */ 858 858 }; 859 859 860 - /** 860 + /* 861 861 * VC attributes for trunked link 862 862 */ 863 863 struct bfa_trunk_vc_attr_s { ··· 867 867 u16 vc_credits[8]; 868 868 }; 869 869 870 - /** 870 + /* 871 871 * Link state information 872 872 */ 873 873 struct bfa_port_link_s { ··· 959 959 u32 rsvd; 960 960 }; 961 961 #pragma pack(1) 962 - /** 962 + /* 963 963 * Rport's QoS attributes 964 964 */ 965 965 struct bfa_rport_qos_attr_s { ··· 987 987 struct bfa_itnim_latency_s io_latency; 988 988 }; 989 989 990 - /** 990 + /* 991 991 * FC physical port statistics. 992 992 */ 993 993 struct bfa_port_fc_stats_s { ··· 1022 1022 u64 err_enc; /* Encoding err frame_8b10b */ 1023 1023 }; 1024 1024 1025 - /** 1025 + /* 1026 1026 * Eth Physical Port statistics. 1027 1027 */ 1028 1028 struct bfa_port_eth_stats_s { ··· 1070 1070 u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */ 1071 1071 }; 1072 1072 1073 - /** 1073 + /* 1074 1074 * Port statistics. 1075 1075 */ 1076 1076 union bfa_port_stats_u {
+3 -3
drivers/scsi/bfa/bfa_drv.c
··· 17 17 18 18 #include "bfa_modules.h" 19 19 20 - /** 20 + /* 21 21 * BFA module list terminated by NULL 22 22 */ 23 23 struct bfa_module_s *hal_mods[] = { ··· 31 31 NULL 32 32 }; 33 33 34 - /** 34 + /* 35 35 * Message handlers for various modules. 36 36 */ 37 37 bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { ··· 70 70 }; 71 71 72 72 73 - /** 73 + /* 74 74 * Message handlers for mailbox command classes 75 75 */ 76 76 bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
+15 -15
drivers/scsi/bfa/bfa_fc.h
··· 1029 1029 struct link_e2e_beacon_param_s beacon_parm; 1030 1030 }; 1031 1031 1032 - /** 1032 + /* 1033 1033 * If RPSC request is sent to the Domain Controller, the request is for 1034 1034 * all the ports within that domain (TODO - I don't think FOS implements 1035 1035 * this...). ··· 1049 1049 struct fc_rpsc_speed_info_s speed_info[1]; 1050 1050 }; 1051 1051 1052 - /** 1052 + /* 1053 1053 * If RPSC2 request is sent to the Domain Controller, 1054 1054 */ 1055 1055 #define FC_BRCD_TOKEN 0x42524344 ··· 1094 1094 struct fc_rpsc2_port_info_s port_info[1]; /* port information */ 1095 1095 }; 1096 1096 1097 - /** 1097 + /* 1098 1098 * bit fields so that multiple classes can be specified 1099 1099 */ 1100 1100 enum fc_cos { ··· 1131 1131 #define FC_VF_ID_MAX 0xEFF 1132 1132 #define FC_VF_ID_CTL 0xFEF /* control VF_ID */ 1133 1133 1134 - /** 1134 + /* 1135 1135 * Virtual Fabric Tagging header format 1136 1136 * @caution This is defined only in BIG ENDIAN format. 1137 1137 */ ··· 1463 1463 u32 dap:24; /* port identifier */ 1464 1464 }; 1465 1465 1466 - /** 1466 + /* 1467 1467 * RFT_ID 1468 1468 */ 1469 1469 struct fcgs_rftid_req_s { ··· 1472 1472 u32 fc4_type[8]; /* fc4 types */ 1473 1473 }; 1474 1474 1475 - /** 1475 + /* 1476 1476 * RFF_ID : Register FC4 features. 1477 1477 */ 1478 1478 ··· 1487 1487 u32 fc4_type:8; /* corresponding FC4 Type */ 1488 1488 }; 1489 1489 1490 - /** 1490 + /* 1491 1491 * GID_FT Request 1492 1492 */ 1493 1493 struct fcgs_gidft_req_s { ··· 1497 1497 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ 1498 1498 }; /* GID_FT Request */ 1499 1499 1500 - /** 1500 + /* 1501 1501 * GID_FT Response 1502 1502 */ 1503 1503 struct fcgs_gidft_resp_s { ··· 1506 1506 u32 pid:24; /* port identifier */ 1507 1507 }; /* GID_FT Response */ 1508 1508 1509 - /** 1509 + /* 1510 1510 * RSPN_ID 1511 1511 */ 1512 1512 struct fcgs_rspnid_req_s { ··· 1516 1516 u8 spn[256]; /* symbolic port name */ 1517 1517 }; 1518 1518 1519 - /** 1519 + /* 1520 1520 * RPN_ID 1521 1521 */ 1522 1522 struct fcgs_rpnid_req_s { ··· 1525 1525 wwn_t port_name; 1526 1526 }; 1527 1527 1528 - /** 1528 + /* 1529 1529 * RNN_ID 1530 1530 */ 1531 1531 struct fcgs_rnnid_req_s { ··· 1534 1534 wwn_t node_name; 1535 1535 }; 1536 1536 1537 - /** 1537 + /* 1538 1538 * RCS_ID 1539 1539 */ 1540 1540 struct fcgs_rcsid_req_s { ··· 1543 1543 u32 cos; 1544 1544 }; 1545 1545 1546 - /** 1546 + /* 1547 1547 * RPT_ID 1548 1548 */ 1549 1549 struct fcgs_rptid_req_s { ··· 1553 1553 u32 rsvd1:24; 1554 1554 }; 1555 1555 1556 - /** 1556 + /* 1557 1557 * GA_NXT Request 1558 1558 */ 1559 1559 struct fcgs_ganxt_req_s { ··· 1561 1561 u32 port_id:24; 1562 1562 }; 1563 1563 1564 - /** 1564 + /* 1565 1565 * GA_NXT Response 1566 1566 */ 1567 1567 struct fcgs_ganxt_rsp_s {
+98 -98
drivers/scsi/bfa/bfa_fcbuild.c
··· 94 94 */ 95 95 plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; 96 96 plogi_tmpl.csp.verlo = FC_PH_VER_4_3; 97 - plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004); 97 + plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004); 98 98 plogi_tmpl.csp.ciro = 0x1; 99 99 plogi_tmpl.csp.cisc = 0x0; 100 100 plogi_tmpl.csp.altbbcred = 0x0; 101 - plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF); 102 - plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002); 103 - plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000); 101 + plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF); 102 + plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002); 103 + plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000); 104 104 105 105 plogi_tmpl.class3.class_valid = 1; 106 106 plogi_tmpl.class3.sequential = 1; ··· 112 112 */ 113 113 prli_tmpl.command = FC_ELS_PRLI; 114 114 prli_tmpl.pglen = 0x10; 115 - prli_tmpl.pagebytes = bfa_os_htons(0x0014); 115 + prli_tmpl.pagebytes = cpu_to_be16(0x0014); 116 116 prli_tmpl.parampage.type = FC_TYPE_FCP; 117 117 prli_tmpl.parampage.imagepair = 1; 118 118 prli_tmpl.parampage.servparams.rxrdisab = 1; ··· 137 137 static void 138 138 fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) 139 139 { 140 - bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); 140 + memset(fchs, 0, sizeof(struct fchs_s)); 141 141 142 142 fchs->routing = FC_RTG_FC4_DEV_DATA; 143 143 fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; ··· 148 148 fchs->rx_id = FC_RXID_ANY; 149 149 fchs->d_id = (d_id); 150 150 fchs->s_id = (s_id); 151 - fchs->ox_id = bfa_os_htons(ox_id); 151 + fchs->ox_id = cpu_to_be16(ox_id); 152 152 153 - /** 153 + /* 154 154 * @todo no need to set ox_id for request 155 155 * no need to set rx_id for response 156 156 */ ··· 159 159 void 160 160 fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 161 161 { 162 - bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); 162 + memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); 163 163 fchs->d_id = (d_id); 164 164 fchs->s_id = (s_id); 165 - fchs->ox_id = bfa_os_htons(ox_id); 165 + fchs->ox_id = cpu_to_be16(ox_id); 166 166 } 167 167 168 168 static void 169 169 fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 170 170 { 171 - bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); 171 + memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); 172 172 fchs->d_id = d_id; 173 173 fchs->s_id = s_id; 174 174 fchs->ox_id = ox_id; ··· 198 198 static void 199 199 fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 200 200 { 201 - bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); 201 + memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); 202 202 fchs->d_id = d_id; 203 203 fchs->s_id = s_id; 204 204 fchs->ox_id = ox_id; ··· 211 211 { 212 212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); 213 213 214 - bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 214 + memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 215 215 216 216 plogi->els_cmd.els_code = els_code; 217 217 if (els_code == FC_ELS_PLOGI) ··· 219 219 else 220 220 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 221 221 222 - plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size); 222 + plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); 223 223 224 - bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); 225 - bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); 224 + memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); 225 + memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); 226 226 227 227 return sizeof(struct fc_logi_s); 228 228 } ··· 235 235 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 236 236 u32 *vvl_info; 237 237 238 - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 238 + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 239 239 240 240 flogi->els_cmd.els_code = FC_ELS_FLOGI; 241 241 fc_els_req_build(fchs, d_id, s_id, ox_id); 242 242 243 - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); 243 + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); 244 244 flogi->port_name = port_name; 245 245 flogi->node_name = node_name; 246 246 ··· 253 253 /* set AUTH capability */ 254 254 flogi->csp.security = set_auth; 255 255 256 - flogi->csp.bbcred = bfa_os_htons(local_bb_credits); 256 + flogi->csp.bbcred = cpu_to_be16(local_bb_credits); 257 257 258 258 /* Set brcd token in VVL */ 259 259 vvl_info = (u32 *)&flogi->vvl[0]; 260 260 261 261 /* set the flag to indicate the presence of VVL */ 262 262 flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ 263 - vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); 263 + vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD); 264 264 265 265 return sizeof(struct fc_logi_s); 266 266 } ··· 272 272 { 273 273 u32 d_id = 0; 274 274 275 - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 275 + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 276 276 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 277 277 278 278 flogi->els_cmd.els_code = FC_ELS_ACC; 279 - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); 279 + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); 280 280 flogi->port_name = port_name; 281 281 flogi->node_name = node_name; 282 282 283 - flogi->csp.bbcred = bfa_os_htons(local_bb_credits); 283 + flogi->csp.bbcred = cpu_to_be16(local_bb_credits); 284 284 285 285 return sizeof(struct fc_logi_s); 286 286 } ··· 291 291 { 292 292 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 293 293 294 - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 294 + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 295 295 296 296 flogi->els_cmd.els_code = FC_ELS_FDISC; 297 297 fc_els_req_build(fchs, d_id, s_id, ox_id); 298 298 299 - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); 299 + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); 300 300 flogi->port_name = port_name; 301 301 flogi->node_name = node_name; 302 302 ··· 346 346 if (!plogi->class3.class_valid) 347 347 return FC_PARSE_FAILURE; 348 348 349 - if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) 349 + if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) 350 350 return FC_PARSE_FAILURE; 351 351 352 352 return FC_PARSE_OK; ··· 363 363 if (plogi->class3.class_valid != 1) 364 364 return FC_PARSE_FAILURE; 365 365 366 - if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) 367 - || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) 366 + if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ) 367 + || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ) 368 368 || (plogi->class3.rxsz == 0)) 369 369 return FC_PARSE_FAILURE; 370 370 ··· 378 378 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 379 379 380 380 fc_els_req_build(fchs, d_id, s_id, ox_id); 381 - bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 381 + memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 382 382 383 383 prli->command = FC_ELS_PRLI; 384 384 prli->parampage.servparams.initiator = 1; ··· 397 397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 398 398 399 399 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 400 - bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 400 + memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 401 401 402 402 prli->command = FC_ELS_ACC; 403 403 ··· 448 448 { 449 449 fc_els_req_build(fchs, d_id, s_id, ox_id); 450 450 451 - bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s)); 451 + memset(logo, '\0', sizeof(struct fc_logo_s)); 452 452 logo->els_cmd.els_code = FC_ELS_LOGO; 453 453 logo->nport_id = (s_id); 454 454 logo->orig_port_name = port_name; ··· 461 461 u32 s_id, u16 ox_id, wwn_t port_name, 462 462 wwn_t node_name, u8 els_code) 463 463 { 464 - bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s)); 464 + memset(adisc, '\0', sizeof(struct fc_adisc_s)); 465 465 466 466 adisc->els_cmd.els_code = els_code; 467 467 ··· 537 537 if (pdisc->class3.class_valid != 1) 538 538 return FC_PARSE_FAILURE; 539 539 540 - if ((bfa_os_ntohs(pdisc->class3.rxsz) < 540 + if ((be16_to_cpu(pdisc->class3.rxsz) < 541 541 (FC_MIN_PDUSZ - sizeof(struct fchs_s))) 542 542 || (pdisc->class3.rxsz == 0)) 543 543 return FC_PARSE_FAILURE; ··· 554 554 u16 555 555 fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 556 556 { 557 - bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); 557 + memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); 558 558 fchs->cat_info = FC_CAT_ABTS; 559 559 fchs->d_id = (d_id); 560 560 fchs->s_id = (s_id); 561 - fchs->ox_id = bfa_os_htons(ox_id); 561 + fchs->ox_id = cpu_to_be16(ox_id); 562 562 563 563 return sizeof(struct fchs_s); 564 564 } ··· 582 582 /* 583 583 * build rrq payload 584 584 */ 585 - bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); 585 + memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); 586 586 rrq->s_id = (s_id); 587 - rrq->ox_id = bfa_os_htons(rrq_oxid); 587 + rrq->ox_id = cpu_to_be16(rrq_oxid); 588 588 rrq->rx_id = FC_RXID_ANY; 589 589 590 590 return sizeof(struct fc_rrq_s); ··· 598 598 599 599 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 600 600 601 - bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s)); 601 + memset(acc, 0, sizeof(struct fc_els_cmd_s)); 602 602 acc->els_code = FC_ELS_ACC; 603 603 604 604 return sizeof(struct fc_els_cmd_s); ··· 610 610 u8 reason_code_expl) 611 611 { 612 612 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 613 - bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); 613 + memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); 614 614 615 615 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; 616 616 ls_rjt->reason_code = reason_code; ··· 626 626 { 627 627 fc_bls_rsp_build(fchs, d_id, s_id, ox_id); 628 628 629 - bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); 629 + memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); 630 630 631 631 fchs->rx_id = rx_id; 632 632 ··· 641 641 u32 s_id, u16 ox_id) 642 642 { 643 643 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 644 - bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 644 + memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 645 645 els_cmd->els_code = FC_ELS_ACC; 646 646 647 647 return sizeof(struct fc_els_cmd_s); ··· 656 656 657 657 if (els_code == FC_ELS_PRLO) { 658 658 prlo = (struct fc_prlo_s *) (fc_frame + 1); 659 - num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16; 659 + num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; 660 660 } else { 661 661 tprlo = (struct fc_tprlo_s *) (fc_frame + 1); 662 - num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; 662 + num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; 663 663 } 664 664 return num_pages; 665 665 } ··· 672 672 673 673 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 674 674 675 - bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4); 675 + memset(tprlo_acc, 0, (num_pages * 16) + 4); 676 676 tprlo_acc->command = FC_ELS_ACC; 677 677 678 678 tprlo_acc->page_len = 0x10; 679 - tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); 679 + tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); 680 680 681 681 for (page = 0; page < num_pages; page++) { 682 682 tprlo_acc->tprlo_acc_params[page].opa_valid = 0; ··· 685 685 tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; 686 686 tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; 687 687 } 688 - return bfa_os_ntohs(tprlo_acc->payload_len); 688 + return be16_to_cpu(tprlo_acc->payload_len); 689 689 } 690 690 691 691 u16 ··· 696 696 697 697 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 698 698 699 - bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4); 699 + memset(prlo_acc, 0, (num_pages * 16) + 4); 700 700 prlo_acc->command = FC_ELS_ACC; 701 701 prlo_acc->page_len = 0x10; 702 - prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); 702 + prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); 703 703 704 704 for (page = 0; page < num_pages; page++) { 705 705 prlo_acc->prlo_acc_params[page].opa_valid = 0; ··· 709 709 prlo_acc->prlo_acc_params[page].resp_process_assc = 0; 710 710 } 711 711 712 - return bfa_os_ntohs(prlo_acc->payload_len); 712 + return be16_to_cpu(prlo_acc->payload_len); 713 713 } 714 714 715 715 u16 ··· 718 718 { 719 719 fc_els_req_build(fchs, d_id, s_id, ox_id); 720 720 721 - bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); 721 + memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); 722 722 723 723 rnid->els_cmd.els_code = FC_ELS_RNID; 724 724 rnid->node_id_data_format = data_format; ··· 732 732 struct fc_rnid_common_id_data_s *common_id_data, 733 733 struct fc_rnid_general_topology_data_s *gen_topo_data) 734 734 { 735 - bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); 735 + memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); 736 736 737 737 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 738 738 ··· 745 745 if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { 746 746 rnid_acc->specific_id_data_length = 747 747 sizeof(struct fc_rnid_general_topology_data_s); 748 - bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); 748 + rnid_acc->gen_topology_data = *gen_topo_data; 749 749 return sizeof(struct fc_rnid_acc_s); 750 750 } else { 751 751 return sizeof(struct fc_rnid_acc_s) - ··· 760 760 { 761 761 fc_els_req_build(fchs, d_id, s_id, ox_id); 762 762 763 - bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); 763 + memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); 764 764 765 765 rpsc->els_cmd.els_code = FC_ELS_RPSC; 766 766 return sizeof(struct fc_rpsc_cmd_s); ··· 775 775 776 776 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); 777 777 778 - bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); 778 + memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); 779 779 780 780 rpsc2->els_cmd.els_code = FC_ELS_RPSC; 781 - rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); 782 - rpsc2->num_pids = bfa_os_htons(npids); 781 + rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN); 782 + rpsc2->num_pids = cpu_to_be16(npids); 783 783 for (i = 0; i < npids; i++) 784 784 rpsc2->pid_list[i].pid = pid_list[i]; 785 785 ··· 791 791 u32 d_id, u32 s_id, u16 ox_id, 792 792 struct fc_rpsc_speed_info_s *oper_speed) 793 793 { 794 - bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); 794 + memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); 795 795 796 796 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 797 797 798 798 rpsc_acc->command = FC_ELS_ACC; 799 - rpsc_acc->num_entries = bfa_os_htons(1); 799 + rpsc_acc->num_entries = cpu_to_be16(1); 800 800 801 801 rpsc_acc->speed_info[0].port_speed_cap = 802 - bfa_os_htons(oper_speed->port_speed_cap); 802 + cpu_to_be16(oper_speed->port_speed_cap); 803 803 804 804 rpsc_acc->speed_info[0].port_op_speed = 805 - bfa_os_htons(oper_speed->port_op_speed); 805 + cpu_to_be16(oper_speed->port_op_speed); 806 806 807 807 return sizeof(struct fc_rpsc_acc_s); 808 808 } ··· 830 830 { 831 831 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 832 832 833 - bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); 833 + memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); 834 834 835 835 pdisc->els_cmd.els_code = FC_ELS_PDISC; 836 836 fc_els_req_build(fchs, d_id, s_id, ox_id); 837 837 838 - pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size); 838 + pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size); 839 839 pdisc->port_name = port_name; 840 840 pdisc->node_name = node_name; 841 841 ··· 859 859 if (!pdisc->class3.class_valid) 860 860 return FC_PARSE_NWWN_NOT_EQUAL; 861 861 862 - if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) 862 + if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) 863 863 return FC_PARSE_RXSZ_INVAL; 864 864 865 865 return FC_PARSE_OK; ··· 873 873 int page; 874 874 875 875 fc_els_req_build(fchs, d_id, s_id, ox_id); 876 - bfa_os_memset(prlo, 0, (num_pages * 16) + 4); 876 + memset(prlo, 0, (num_pages * 16) + 4); 877 877 prlo->command = FC_ELS_PRLO; 878 878 prlo->page_len = 0x10; 879 - prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 879 + prlo->payload_len = cpu_to_be16((num_pages * 16) + 4); 880 880 881 881 for (page = 0; page < num_pages; page++) { 882 882 prlo->prlo_params[page].type = FC_TYPE_FCP; ··· 886 886 prlo->prlo_params[page].resp_process_assc = 0; 887 887 } 888 888 889 - return bfa_os_ntohs(prlo->payload_len); 889 + return be16_to_cpu(prlo->payload_len); 890 890 } 891 891 892 892 u16 ··· 901 901 if (prlo->command != FC_ELS_ACC) 902 902 return FC_PARSE_FAILURE; 903 903 904 - num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; 904 + num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16; 905 905 906 906 for (page = 0; page < num_pages; page++) { 907 907 if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP) ··· 931 931 int page; 932 932 933 933 fc_els_req_build(fchs, d_id, s_id, ox_id); 934 - bfa_os_memset(tprlo, 0, (num_pages * 16) + 4); 934 + memset(tprlo, 0, (num_pages * 16) + 4); 935 935 tprlo->command = FC_ELS_TPRLO; 936 936 tprlo->page_len = 0x10; 937 - tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 937 + tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4); 938 938 939 939 for (page = 0; page < num_pages; page++) { 940 940 tprlo->tprlo_params[page].type = FC_TYPE_FCP; ··· 950 950 } 951 951 } 952 952 953 - return bfa_os_ntohs(tprlo->payload_len); 953 + return be16_to_cpu(tprlo->payload_len); 954 954 } 955 955 956 956 u16 ··· 965 965 if (tprlo->command != FC_ELS_ACC) 966 966 return FC_PARSE_ACC_INVAL; 967 967 968 - num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; 968 + num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; 969 969 970 970 for (page = 0; page < num_pages; page++) { 971 971 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) ··· 1011 1011 static void 1012 1012 fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) 1013 1013 { 1014 - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1014 + memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1015 1015 cthdr->rev_id = CT_GS3_REVISION; 1016 1016 cthdr->gs_type = CT_GSTYPE_DIRSERVICE; 1017 1017 cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; 1018 - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); 1018 + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); 1019 1019 } 1020 1020 1021 1021 static void 1022 1022 fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) 1023 1023 { 1024 - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1024 + memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1025 1025 cthdr->rev_id = CT_GS3_REVISION; 1026 1026 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; 1027 1027 cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; 1028 - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); 1028 + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); 1029 1029 } 1030 1030 1031 1031 static void 1032 1032 fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, 1033 1033 u8 sub_type) 1034 1034 { 1035 - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1035 + memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1036 1036 cthdr->rev_id = CT_GS3_REVISION; 1037 1037 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; 1038 1038 cthdr->gs_sub_type = sub_type; 1039 - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); 1039 + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); 1040 1040 } 1041 1041 1042 1042 u16 ··· 1050 1050 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1051 1051 fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); 1052 1052 1053 - bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); 1053 + memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); 1054 1054 gidpn->port_name = port_name; 1055 1055 return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); 1056 1056 } ··· 1066 1066 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1067 1067 fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); 1068 1068 1069 - bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); 1069 + memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); 1070 1070 gpnid->dap = port_id; 1071 1071 return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); 1072 1072 } ··· 1082 1082 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1083 1083 fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); 1084 1084 1085 - bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); 1085 + memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); 1086 1086 gnnid->dap = port_id; 1087 1087 return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); 1088 1088 } ··· 1090 1090 u16 1091 1091 fc_ct_rsp_parse(struct ct_hdr_s *cthdr) 1092 1092 { 1093 - if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { 1093 + if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { 1094 1094 if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) 1095 1095 return FC_PARSE_BUSY; 1096 1096 else ··· 1108 1108 1109 1109 fc_els_req_build(fchs, d_id, s_id, ox_id); 1110 1110 1111 - bfa_os_memset(scr, 0, sizeof(struct fc_scr_s)); 1111 + memset(scr, 0, sizeof(struct fc_scr_s)); 1112 1112 scr->command = FC_ELS_SCR; 1113 1113 scr->reg_func = FC_SCR_REG_FUNC_FULL; 1114 1114 if (set_br_reg) ··· 1129 1129 rscn->pagelen = sizeof(rscn->event[0]); 1130 1130 1131 1131 payldlen = sizeof(u32) + rscn->pagelen; 1132 - rscn->payldlen = bfa_os_htons(payldlen); 1132 + rscn->payldlen = cpu_to_be16(payldlen); 1133 1133 1134 1134 rscn->event[0].format = FC_RSCN_FORMAT_PORTID; 1135 1135 rscn->event[0].portid = s_id; ··· 1149 1149 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1150 1150 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); 1151 1151 1152 - bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); 1152 + memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); 1153 1153 1154 1154 rftid->dap = s_id; 1155 1155 1156 1156 /* By default, FCP FC4 Type is registered */ 1157 1157 index = FC_TYPE_FCP >> 5; 1158 1158 type_value = 1 << (FC_TYPE_FCP % 32); 1159 - rftid->fc4_type[index] = bfa_os_htonl(type_value); 1159 + rftid->fc4_type[index] = cpu_to_be32(type_value); 1160 1160 1161 1161 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1162 1162 } ··· 1172 1172 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1173 1173 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); 1174 1174 1175 - bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); 1175 + memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); 1176 1176 1177 1177 rftid->dap = s_id; 1178 - bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, 1178 + memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, 1179 1179 (bitmap_size < 32 ? bitmap_size : 32)); 1180 1180 1181 1181 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); ··· 1192 1192 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1193 1193 fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); 1194 1194 1195 - bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); 1195 + memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); 1196 1196 1197 1197 rffid->dap = s_id; 1198 1198 rffid->fc4ftr_bits = fc4_ftrs; ··· 1214 1214 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1215 1215 fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); 1216 1216 1217 - bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); 1217 + memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); 1218 1218 1219 1219 rspnid->dap = s_id; 1220 1220 rspnid->spn_len = (u8) strlen((char *)name); ··· 1235 1235 1236 1236 fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); 1237 1237 1238 - bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); 1238 + memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); 1239 1239 gidft->fc4_type = fc4_type; 1240 1240 gidft->domain_id = 0; 1241 1241 gidft->area_id = 0; ··· 1254 1254 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1255 1255 fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); 1256 1256 1257 - bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); 1257 + memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); 1258 1258 rpnid->port_id = port_id; 1259 1259 rpnid->port_name = port_name; 1260 1260 ··· 1272 1272 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1273 1273 fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); 1274 1274 1275 - bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); 1275 + memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); 1276 1276 rnnid->port_id = port_id; 1277 1277 rnnid->node_name = node_name; 1278 1278 ··· 1291 1291 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1292 1292 fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); 1293 1293 1294 - bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); 1294 + memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); 1295 1295 rcsid->port_id = port_id; 1296 1296 rcsid->cos = cos; 1297 1297 ··· 1309 1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1310 1310 fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); 1311 1311 1312 - bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); 1312 + memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); 1313 1313 rptid->port_id = port_id; 1314 1314 rptid->port_type = port_type; 1315 1315 ··· 1326 1326 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1327 1327 fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); 1328 1328 1329 - bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); 1329 + memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); 1330 1330 ganxt->port_id = port_id; 1331 1331 1332 1332 return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); ··· 1365 1365 1366 1366 index = fc4_type >> 5; 1367 1367 type_value = 1 << (fc4_type % 32); 1368 - ptr[index] = bfa_os_htonl(type_value); 1368 + ptr[index] = cpu_to_be32(type_value); 1369 1369 1370 1370 } 1371 1371 ··· 1383 1383 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, 1384 1384 CT_GSSUBTYPE_CFGSERVER); 1385 1385 1386 - bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); 1386 + memset(gmal, 0, sizeof(fcgs_gmal_req_t)); 1387 1387 gmal->wwn = wwn; 1388 1388 1389 1389 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); ··· 1403 1403 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, 1404 1404 CT_GSSUBTYPE_CFGSERVER); 1405 1405 1406 - bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); 1406 + memset(gfn, 0, sizeof(fcgs_gfn_req_t)); 1407 1407 gfn->wwn = wwn; 1408 1408 1409 1409 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
+175 -176
drivers/scsi/bfa/bfa_fcpim.c
··· 26 26 (__l->__stats += __r->__stats) 27 27 28 28 29 - /** 29 + /* 30 30 * BFA ITNIM Related definitions 31 31 */ 32 32 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); ··· 72 72 } \ 73 73 } while (0) 74 74 75 - /** 75 + /* 76 76 * bfa_itnim_sm BFA itnim state machine 77 77 */ 78 78 ··· 89 89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ 90 90 }; 91 91 92 - /** 92 + /* 93 93 * BFA IOIM related definitions 94 94 */ 95 95 #define bfa_ioim_move_to_comp_q(__ioim) do { \ ··· 107 107 if ((__fcpim)->profile_start) \ 108 108 (__fcpim)->profile_start(__ioim); \ 109 109 } while (0) 110 - /** 110 + /* 111 111 * hal_ioim_sm 112 112 */ 113 113 114 - /** 114 + /* 115 115 * IO state machine events 116 116 */ 117 117 enum bfa_ioim_event { ··· 136 136 }; 137 137 138 138 139 - /** 139 + /* 140 140 * BFA TSKIM related definitions 141 141 */ 142 142 143 - /** 143 + /* 144 144 * task management completion handling 145 145 */ 146 146 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ ··· 165 165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 166 166 }; 167 167 168 - /** 168 + /* 169 169 * forward declaration for BFA ITNIM functions 170 170 */ 171 171 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); ··· 183 183 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); 184 184 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); 185 185 186 - /** 186 + /* 187 187 * forward declaration of ITNIM state machine 188 188 */ 189 189 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, ··· 217 217 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 218 218 enum bfa_itnim_event event); 219 219 220 - /** 220 + /* 221 221 * forward declaration for BFA IOIM functions 222 222 */ 223 223 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); ··· 233 233 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 234 234 235 235 236 - /** 236 + /* 237 237 * forward declaration of BFA IO state machine 238 238 */ 239 239 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, ··· 261 261 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 262 262 enum bfa_ioim_event event); 263 263 264 - /** 264 + /* 265 265 * forward declaration for BFA TSKIM functions 266 266 */ 267 267 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); ··· 276 276 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 277 277 278 278 279 - /** 279 + /* 280 280 * forward declaration of BFA TSKIM state machine 281 281 */ 282 282 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, ··· 294 294 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 295 295 enum bfa_tskim_event event); 296 296 297 - /** 297 + /* 298 298 * hal_fcpim_mod BFA FCP Initiator Mode module 299 299 */ 300 300 301 - /** 301 + /* 302 302 * Compute and return memory needed by FCP(im) module. 303 303 */ 304 304 static void ··· 307 307 { 308 308 bfa_itnim_meminfo(cfg, km_len, dm_len); 309 309 310 - /** 310 + /* 311 311 * IO memory 312 312 */ 313 313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) ··· 320 320 321 321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; 322 322 323 - /** 323 + /* 324 324 * task management command memory 325 325 */ 326 326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) ··· 463 463 struct bfa_itnim_s *itnim; 464 464 465 465 /* accumulate IO stats from itnim */ 466 - bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); 466 + memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); 467 467 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 468 468 itnim = (struct bfa_itnim_s *) qe; 469 469 if (itnim->rport->rport_info.lp_tag != lp_tag) ··· 480 480 struct bfa_itnim_s *itnim; 481 481 482 482 /* accumulate IO stats from itnim */ 483 - bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 483 + memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 484 484 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 485 485 itnim = (struct bfa_itnim_s *) qe; 486 486 bfa_fcpim_add_stats(modstats, &(itnim->stats)); ··· 560 560 itnim = (struct bfa_itnim_s *) qe; 561 561 bfa_itnim_clear_stats(itnim); 562 562 } 563 - bfa_os_memset(&fcpim->del_itn_stats, 0, 563 + memset(&fcpim->del_itn_stats, 0, 564 564 sizeof(struct bfa_fcpim_del_itn_stats_s)); 565 565 566 566 return BFA_STATUS_OK; ··· 604 604 605 605 606 606 607 - /** 607 + /* 608 608 * BFA ITNIM module state machine functions 609 609 */ 610 610 611 - /** 611 + /* 612 612 * Beginning/unallocated state - no events expected. 613 613 */ 614 614 static void ··· 629 629 } 630 630 } 631 631 632 - /** 632 + /* 633 633 * Beginning state, only online event expected. 634 634 */ 635 635 static void ··· 660 660 } 661 661 } 662 662 663 - /** 663 + /* 664 664 * Waiting for itnim create response from firmware. 665 665 */ 666 666 static void ··· 732 732 } 733 733 } 734 734 735 - /** 735 + /* 736 736 * Waiting for itnim create response from firmware, a delete is pending. 737 737 */ 738 738 static void ··· 760 760 } 761 761 } 762 762 763 - /** 763 + /* 764 764 * Online state - normal parking state. 765 765 */ 766 766 static void ··· 802 802 } 803 803 } 804 804 805 - /** 805 + /* 806 806 * Second level error recovery need. 807 807 */ 808 808 static void ··· 833 833 } 834 834 } 835 835 836 - /** 836 + /* 837 837 * Going offline. Waiting for active IO cleanup. 838 838 */ 839 839 static void ··· 870 870 } 871 871 } 872 872 873 - /** 873 + /* 874 874 * Deleting itnim. Waiting for active IO cleanup. 875 875 */ 876 876 static void ··· 898 898 } 899 899 } 900 900 901 - /** 901 + /* 902 902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. 903 903 */ 904 904 static void ··· 955 955 } 956 956 } 957 957 958 - /** 958 + /* 959 959 * Offline state. 960 960 */ 961 961 static void ··· 987 987 } 988 988 } 989 989 990 - /** 990 + /* 991 991 * IOC h/w failed state. 992 992 */ 993 993 static void ··· 1023 1023 } 1024 1024 } 1025 1025 1026 - /** 1026 + /* 1027 1027 * Itnim is deleted, waiting for firmware response to delete. 1028 1028 */ 1029 1029 static void ··· 1068 1068 } 1069 1069 } 1070 1070 1071 - /** 1071 + /* 1072 1072 * Initiate cleanup of all IOs on an IOC failure. 1073 1073 */ 1074 1074 static void ··· 1088 1088 bfa_ioim_iocdisable(ioim); 1089 1089 } 1090 1090 1091 - /** 1091 + /* 1092 1092 * For IO request in pending queue, we pretend an early timeout. 1093 1093 */ 1094 1094 list_for_each_safe(qe, qen, &itnim->pending_q) { ··· 1102 1102 } 1103 1103 } 1104 1104 1105 - /** 1105 + /* 1106 1106 * IO cleanup completion 1107 1107 */ 1108 1108 static void ··· 1114 1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); 1115 1115 } 1116 1116 1117 - /** 1117 + /* 1118 1118 * Initiate cleanup of all IOs. 1119 1119 */ 1120 1120 static void ··· 1129 1129 list_for_each_safe(qe, qen, &itnim->io_q) { 1130 1130 ioim = (struct bfa_ioim_s *) qe; 1131 1131 1132 - /** 1132 + /* 1133 1133 * Move IO to a cleanup queue from active queue so that a later 1134 1134 * TM will not pickup this IO. 1135 1135 */ ··· 1176 1176 bfa_cb_itnim_sler(itnim->ditn); 1177 1177 } 1178 1178 1179 - /** 1179 + /* 1180 1180 * Call to resume any I/O requests waiting for room in request queue. 1181 1181 */ 1182 1182 static void ··· 1190 1190 1191 1191 1192 1192 1193 - /** 1193 + /* 1194 1194 * bfa_itnim_public 1195 1195 */ 1196 1196 ··· 1210 1210 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1211 1211 u32 *dm_len) 1212 1212 { 1213 - /** 1213 + /* 1214 1214 * ITN memory 1215 1215 */ 1216 1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); ··· 1229 1229 fcpim->itnim_arr = itnim; 1230 1230 1231 1231 for (i = 0; i < fcpim->num_itnims; i++, itnim++) { 1232 - bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s)); 1232 + memset(itnim, 0, sizeof(struct bfa_itnim_s)); 1233 1233 itnim->bfa = bfa; 1234 1234 itnim->fcpim = fcpim; 1235 1235 itnim->reqq = BFA_REQQ_QOS_LO; ··· 1264 1264 1265 1265 itnim->msg_no++; 1266 1266 1267 - /** 1267 + /* 1268 1268 * check for room in queue to send request now 1269 1269 */ 1270 1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq); ··· 1281 1281 m->msg_no = itnim->msg_no; 1282 1282 bfa_stats(itnim, fw_create); 1283 1283 1284 - /** 1284 + /* 1285 1285 * queue I/O message to firmware 1286 1286 */ 1287 1287 bfa_reqq_produce(itnim->bfa, itnim->reqq); ··· 1293 1293 { 1294 1294 struct bfi_itnim_delete_req_s *m; 1295 1295 1296 - /** 1296 + /* 1297 1297 * check for room in queue to send request now 1298 1298 */ 1299 1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq); ··· 1307 1307 m->fw_handle = itnim->rport->fw_handle; 1308 1308 bfa_stats(itnim, fw_delete); 1309 1309 1310 - /** 1310 + /* 1311 1311 * queue I/O message to firmware 1312 1312 */ 1313 1313 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1314 1314 return BFA_TRUE; 1315 1315 } 1316 1316 1317 - /** 1317 + /* 1318 1318 * Cleanup all pending failed inflight requests. 1319 1319 */ 1320 1320 static void ··· 1329 1329 } 1330 1330 } 1331 1331 1332 - /** 1332 + /* 1333 1333 * Start all pending IO requests. 1334 1334 */ 1335 1335 static void ··· 1339 1339 1340 1340 bfa_itnim_iotov_stop(itnim); 1341 1341 1342 - /** 1342 + /* 1343 1343 * Abort all inflight IO requests in the queue 1344 1344 */ 1345 1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE); 1346 1346 1347 - /** 1347 + /* 1348 1348 * Start all pending IO requests. 1349 1349 */ 1350 1350 while (!list_empty(&itnim->pending_q)) { ··· 1354 1354 } 1355 1355 } 1356 1356 1357 - /** 1357 + /* 1358 1358 * Fail all pending IO requests 1359 1359 */ 1360 1360 static void ··· 1362 1362 { 1363 1363 struct bfa_ioim_s *ioim; 1364 1364 1365 - /** 1365 + /* 1366 1366 * Fail all inflight IO requests in the queue 1367 1367 */ 1368 1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE); 1369 1369 1370 - /** 1370 + /* 1371 1371 * Fail any pending IO requests. 1372 1372 */ 1373 1373 while (!list_empty(&itnim->pending_q)) { ··· 1377 1377 } 1378 1378 } 1379 1379 1380 - /** 1380 + /* 1381 1381 * IO TOV timer callback. Fail any pending IO requests. 1382 1382 */ 1383 1383 static void ··· 1392 1392 bfa_cb_itnim_tov(itnim->ditn); 1393 1393 } 1394 1394 1395 - /** 1395 + /* 1396 1396 * Start IO TOV timer for failing back pending IO requests in offline state. 1397 1397 */ 1398 1398 static void ··· 1407 1407 } 1408 1408 } 1409 1409 1410 - /** 1410 + /* 1411 1411 * Stop IO TOV timer. 1412 1412 */ 1413 1413 static void ··· 1419 1419 } 1420 1420 } 1421 1421 1422 - /** 1422 + /* 1423 1423 * Stop IO TOV timer. 1424 1424 */ 1425 1425 static void ··· 1459 1459 1460 1460 1461 1461 1462 - /** 1462 + /* 1463 1463 * bfa_itnim_public 1464 1464 */ 1465 1465 1466 - /** 1466 + /* 1467 1467 * Itnim interrupt processing. 1468 1468 */ 1469 1469 void ··· 1509 1509 1510 1510 1511 1511 1512 - /** 1512 + /* 1513 1513 * bfa_itnim_api 1514 1514 */ 1515 1515 ··· 1552 1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); 1553 1553 } 1554 1554 1555 - /** 1555 + /* 1556 1556 * Return true if itnim is considered offline for holding off IO request. 1557 1557 * IO is not held if itnim is being deleted. 1558 1558 */ ··· 1597 1597 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1598 1598 { 1599 1599 int j; 1600 - bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats)); 1601 - bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); 1600 + memset(&itnim->stats, 0, sizeof(itnim->stats)); 1601 + memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); 1602 1602 for (j = 0; j < BFA_IOBUCKET_MAX; j++) 1603 1603 itnim->ioprofile.io_latency.min[j] = ~0; 1604 1604 } 1605 1605 1606 - /** 1606 + /* 1607 1607 * BFA IO module state machine functions 1608 1608 */ 1609 1609 1610 - /** 1610 + /* 1611 1611 * IO is not started (unallocated). 1612 1612 */ 1613 1613 static void ··· 1657 1657 break; 1658 1658 1659 1659 case BFA_IOIM_SM_ABORT: 1660 - /** 1660 + /* 1661 1661 * IO in pending queue can get abort requests. Complete abort 1662 1662 * requests immediately. 1663 1663 */ ··· 1672 1672 } 1673 1673 } 1674 1674 1675 - /** 1675 + /* 1676 1676 * IO is waiting for SG pages. 1677 1677 */ 1678 1678 static void ··· 1719 1719 } 1720 1720 } 1721 1721 1722 - /** 1722 + /* 1723 1723 * IO is active. 1724 1724 */ 1725 1725 static void ··· 1803 1803 } 1804 1804 } 1805 1805 1806 - /** 1806 + /* 1807 1807 * IO is retried with new tag. 1808 1808 */ 1809 1809 static void ··· 1844 1844 break; 1845 1845 1846 1846 case BFA_IOIM_SM_ABORT: 1847 - /** in this state IO abort is done. 1847 + /* in this state IO abort is done. 1848 1848 * Waiting for IO tag resource free. 1849 1849 */ 1850 1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); ··· 1857 1857 } 1858 1858 } 1859 1859 1860 - /** 1860 + /* 1861 1861 * IO is being aborted, waiting for completion from firmware. 1862 1862 */ 1863 1863 static void ··· 1919 1919 } 1920 1920 } 1921 1921 1922 - /** 1922 + /* 1923 1923 * IO is being cleaned up (implicit abort), waiting for completion from 1924 1924 * firmware. 1925 1925 */ ··· 1937 1937 break; 1938 1938 1939 1939 case BFA_IOIM_SM_ABORT: 1940 - /** 1940 + /* 1941 1941 * IO is already being aborted implicitly 1942 1942 */ 1943 1943 ioim->io_cbfn = __bfa_cb_ioim_abort; ··· 1969 1969 break; 1970 1970 1971 1971 case BFA_IOIM_SM_CLEANUP: 1972 - /** 1972 + /* 1973 1973 * IO can be in cleanup state already due to TM command. 1974 1974 * 2nd cleanup request comes from ITN offline event. 1975 1975 */ ··· 1980 1980 } 1981 1981 } 1982 1982 1983 - /** 1983 + /* 1984 1984 * IO is waiting for room in request CQ 1985 1985 */ 1986 1986 static void ··· 2024 2024 } 2025 2025 } 2026 2026 2027 - /** 2027 + /* 2028 2028 * Active IO is being aborted, waiting for room in request CQ. 2029 2029 */ 2030 2030 static void ··· 2075 2075 } 2076 2076 } 2077 2077 2078 - /** 2078 + /* 2079 2079 * Active IO is being cleaned up, waiting for room in request CQ. 2080 2080 */ 2081 2081 static void ··· 2091 2091 break; 2092 2092 2093 2093 case BFA_IOIM_SM_ABORT: 2094 - /** 2094 + /* 2095 2095 * IO is alraedy being cleaned up implicitly 2096 2096 */ 2097 2097 ioim->io_cbfn = __bfa_cb_ioim_abort; ··· 2125 2125 } 2126 2126 } 2127 2127 2128 - /** 2128 + /* 2129 2129 * IO bfa callback is pending. 2130 2130 */ 2131 2131 static void ··· 2152 2152 } 2153 2153 } 2154 2154 2155 - /** 2155 + /* 2156 2156 * IO bfa callback is pending. IO resource cannot be freed. 2157 2157 */ 2158 2158 static void ··· 2185 2185 } 2186 2186 } 2187 2187 2188 - /** 2188 + /* 2189 2189 * IO is completed, waiting resource free from firmware. 2190 2190 */ 2191 2191 static void ··· 2214 2214 2215 2215 2216 2216 2217 - /** 2217 + /* 2218 2218 * hal_ioim_private 2219 2219 */ 2220 2220 ··· 2247 2247 2248 2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; 2249 2249 if (m->io_status == BFI_IOIM_STS_OK) { 2250 - /** 2250 + /* 2251 2251 * setup sense information, if present 2252 2252 */ 2253 2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && ··· 2256 2256 snsinfo = ioim->iosp->snsinfo; 2257 2257 } 2258 2258 2259 - /** 2259 + /* 2260 2260 * setup residue value correctly for normal completions 2261 2261 */ 2262 2262 if (m->resid_flags == FCP_RESID_UNDER) { 2263 - residue = bfa_os_ntohl(m->residue); 2263 + residue = be32_to_cpu(m->residue); 2264 2264 bfa_stats(ioim->itnim, iocomp_underrun); 2265 2265 } 2266 2266 if (m->resid_flags == FCP_RESID_OVER) { 2267 - residue = bfa_os_ntohl(m->residue); 2267 + residue = be32_to_cpu(m->residue); 2268 2268 residue = -residue; 2269 2269 bfa_stats(ioim->itnim, iocomp_overrun); 2270 2270 } ··· 2327 2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2328 2328 } 2329 2329 2330 - /** 2330 + /* 2331 2331 * Send I/O request to firmware. 2332 2332 */ 2333 2333 static bfa_boolean_t ··· 2343 2343 struct scatterlist *sg; 2344 2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2345 2345 2346 - /** 2346 + /* 2347 2347 * check for room in queue to send request now 2348 2348 */ 2349 2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq); ··· 2354 2354 return BFA_FALSE; 2355 2355 } 2356 2356 2357 - /** 2357 + /* 2358 2358 * build i/o request message next 2359 2359 */ 2360 - m->io_tag = bfa_os_htons(ioim->iotag); 2360 + m->io_tag = cpu_to_be16(ioim->iotag); 2361 2361 m->rport_hdl = ioim->itnim->rport->fw_handle; 2362 2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); 2363 2363 2364 - /** 2364 + /* 2365 2365 * build inline IO SG element here 2366 2366 */ 2367 2367 sge = &m->sges[0]; ··· 2387 2387 sge->flags = BFI_SGE_PGDLEN; 2388 2388 bfa_sge_to_be(sge); 2389 2389 2390 - /** 2390 + /* 2391 2391 * set up I/O command parameters 2392 2392 */ 2393 - bfa_os_assign(m->cmnd, cmnd_z0); 2393 + m->cmnd = cmnd_z0; 2394 2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); 2395 2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); 2396 - bfa_os_assign(m->cmnd.cdb, 2397 - *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio)); 2396 + m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio); 2398 2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2399 - m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl); 2398 + m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); 2400 2399 2401 - /** 2400 + /* 2402 2401 * set up I/O message header 2403 2402 */ 2404 2403 switch (m->cmnd.iodir) { ··· 2426 2427 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); 2427 2428 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); 2428 2429 2429 - /** 2430 + /* 2430 2431 * Handle large CDB (>16 bytes). 2431 2432 */ 2432 2433 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - 2433 2434 FCP_CMND_CDB_LEN) / sizeof(u32); 2434 2435 if (m->cmnd.addl_cdb_len) { 2435 - bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) 2436 + memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) 2436 2437 bfa_cb_ioim_get_cdb(ioim->dio) + 1, 2437 2438 m->cmnd.addl_cdb_len * sizeof(u32)); 2438 2439 fcp_cmnd_fcpdl(&m->cmnd) = 2439 - bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio)); 2440 + cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio)); 2440 2441 } 2441 2442 #endif 2442 2443 2443 - /** 2444 + /* 2444 2445 * queue I/O message to firmware 2445 2446 */ 2446 2447 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2447 2448 return BFA_TRUE; 2448 2449 } 2449 2450 2450 - /** 2451 + /* 2451 2452 * Setup any additional SG pages needed.Inline SG element is setup 2452 2453 * at queuing time. 2453 2454 */ ··· 2458 2459 2459 2460 bfa_assert(ioim->nsges > BFI_SGE_INLINE); 2460 2461 2461 - /** 2462 + /* 2462 2463 * allocate SG pages needed 2463 2464 */ 2464 2465 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); ··· 2507 2508 sge->sg_len = sg_dma_len(sg); 2508 2509 pgcumsz += sge->sg_len; 2509 2510 2510 - /** 2511 + /* 2511 2512 * set flags 2512 2513 */ 2513 2514 if (i < (nsges - 1)) ··· 2522 2523 2523 2524 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); 2524 2525 2525 - /** 2526 + /* 2526 2527 * set the link element of each page 2527 2528 */ 2528 2529 if (sgeid == ioim->nsges) { ··· 2539 2540 } while (sgeid < ioim->nsges); 2540 2541 } 2541 2542 2542 - /** 2543 + /* 2543 2544 * Send I/O abort request to firmware. 2544 2545 */ 2545 2546 static bfa_boolean_t ··· 2548 2549 struct bfi_ioim_abort_req_s *m; 2549 2550 enum bfi_ioim_h2i msgop; 2550 2551 2551 - /** 2552 + /* 2552 2553 * check for room in queue to send request now 2553 2554 */ 2554 2555 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2555 2556 if (!m) 2556 2557 return BFA_FALSE; 2557 2558 2558 - /** 2559 + /* 2559 2560 * build i/o request message next 2560 2561 */ 2561 2562 if (ioim->iosp->abort_explicit) ··· 2564 2565 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; 2565 2566 2566 2567 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); 2567 - m->io_tag = bfa_os_htons(ioim->iotag); 2568 + m->io_tag = cpu_to_be16(ioim->iotag); 2568 2569 m->abort_tag = ++ioim->abort_tag; 2569 2570 2570 - /** 2571 + /* 2571 2572 * queue I/O message to firmware 2572 2573 */ 2573 2574 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2574 2575 return BFA_TRUE; 2575 2576 } 2576 2577 2577 - /** 2578 + /* 2578 2579 * Call to resume any I/O requests waiting for room in request queue. 2579 2580 */ 2580 2581 static void ··· 2590 2591 static void 2591 2592 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) 2592 2593 { 2593 - /** 2594 + /* 2594 2595 * Move IO from itnim queue to fcpim global queue since itnim will be 2595 2596 * freed. 2596 2597 */ ··· 2623 2624 return BFA_TRUE; 2624 2625 } 2625 2626 2626 - /** 2627 + /* 2627 2628 * or after the link comes back. 2628 2629 */ 2629 2630 void 2630 2631 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2631 2632 { 2632 - /** 2633 + /* 2633 2634 * If path tov timer expired, failback with PATHTOV status - these 2634 2635 * IO requests are not normally retried by IO stack. 2635 2636 * ··· 2644 2645 } 2645 2646 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 2646 2647 2647 - /** 2648 + /* 2648 2649 * Move IO to fcpim global queue since itnim will be 2649 2650 * freed. 2650 2651 */ ··· 2654 2655 2655 2656 2656 2657 2657 - /** 2658 + /* 2658 2659 * hal_ioim_friend 2659 2660 */ 2660 2661 2661 - /** 2662 + /* 2662 2663 * Memory allocation and initialization. 2663 2664 */ 2664 2665 void ··· 2670 2671 u8 *snsinfo; 2671 2672 u32 snsbufsz; 2672 2673 2673 - /** 2674 + /* 2674 2675 * claim memory first 2675 2676 */ 2676 2677 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); ··· 2681 2682 fcpim->ioim_sp_arr = iosp; 2682 2683 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2683 2684 2684 - /** 2685 + /* 2685 2686 * Claim DMA memory for per IO sense data. 2686 2687 */ 2687 2688 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; ··· 2693 2694 snsinfo = fcpim->snsbase.kva; 2694 2695 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); 2695 2696 2696 - /** 2697 + /* 2697 2698 * Initialize ioim free queues 2698 2699 */ 2699 2700 INIT_LIST_HEAD(&fcpim->ioim_free_q); ··· 2705 2706 /* 2706 2707 * initialize IOIM 2707 2708 */ 2708 - bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s)); 2709 + memset(ioim, 0, sizeof(struct bfa_ioim_s)); 2709 2710 ioim->iotag = i; 2710 2711 ioim->bfa = fcpim->bfa; 2711 2712 ioim->fcpim = fcpim; ··· 2722 2723 } 2723 2724 } 2724 2725 2725 - /** 2726 + /* 2726 2727 * Driver detach time call. 2727 2728 */ 2728 2729 void ··· 2739 2740 u16 iotag; 2740 2741 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; 2741 2742 2742 - iotag = bfa_os_ntohs(rsp->io_tag); 2743 + iotag = be16_to_cpu(rsp->io_tag); 2743 2744 2744 2745 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2745 2746 bfa_assert(ioim->iotag == iotag); ··· 2749 2750 bfa_trc(ioim->bfa, rsp->reuse_io_tag); 2750 2751 2751 2752 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) 2752 - bfa_os_assign(ioim->iosp->comp_rspmsg, *m); 2753 + ioim->iosp->comp_rspmsg = *m; 2753 2754 2754 2755 switch (rsp->io_status) { 2755 2756 case BFI_IOIM_STS_OK: ··· 2822 2823 struct bfa_ioim_s *ioim; 2823 2824 u16 iotag; 2824 2825 2825 - iotag = bfa_os_ntohs(rsp->io_tag); 2826 + iotag = be16_to_cpu(rsp->io_tag); 2826 2827 2827 2828 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2828 2829 bfa_assert(ioim->iotag == iotag); ··· 2836 2837 void 2837 2838 bfa_ioim_profile_start(struct bfa_ioim_s *ioim) 2838 2839 { 2839 - ioim->start_time = bfa_os_get_clock(); 2840 + ioim->start_time = jiffies; 2840 2841 } 2841 2842 2842 2843 void ··· 2844 2845 { 2845 2846 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2846 2847 u32 index = bfa_ioim_get_index(fcp_dl); 2847 - u64 end_time = bfa_os_get_clock(); 2848 + u64 end_time = jiffies; 2848 2849 struct bfa_itnim_latency_s *io_lat = 2849 2850 &(ioim->itnim->ioprofile.io_latency); 2850 2851 u32 val = (u32)(end_time - ioim->start_time); ··· 2858 2859 io_lat->max[index] : val; 2859 2860 io_lat->avg[index] += val; 2860 2861 } 2861 - /** 2862 + /* 2862 2863 * Called by itnim to clean up IO while going offline. 2863 2864 */ 2864 2865 void ··· 2881 2882 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2882 2883 } 2883 2884 2884 - /** 2885 + /* 2885 2886 * IOC failure handling. 2886 2887 */ 2887 2888 void ··· 2892 2893 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); 2893 2894 } 2894 2895 2895 - /** 2896 + /* 2896 2897 * IO offline TOV popped. Fail the pending IO. 2897 2898 */ 2898 2899 void ··· 2904 2905 2905 2906 2906 2907 2907 - /** 2908 + /* 2908 2909 * hal_ioim_api 2909 2910 */ 2910 2911 2911 - /** 2912 + /* 2912 2913 * Allocate IOIM resource for initiator mode I/O request. 2913 2914 */ 2914 2915 struct bfa_ioim_s * ··· 2918 2919 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2919 2920 struct bfa_ioim_s *ioim; 2920 2921 2921 - /** 2922 + /* 2922 2923 * alocate IOIM resource 2923 2924 */ 2924 2925 bfa_q_deq(&fcpim->ioim_free_q, &ioim); ··· 2969 2970 2970 2971 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2971 2972 2972 - /** 2973 + /* 2973 2974 * Obtain the queue over which this request has to be issued 2974 2975 */ 2975 2976 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? ··· 2979 2980 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2980 2981 } 2981 2982 2982 - /** 2983 + /* 2983 2984 * Driver I/O abort request. 2984 2985 */ 2985 2986 bfa_status_t ··· 2998 2999 } 2999 3000 3000 3001 3001 - /** 3002 + /* 3002 3003 * BFA TSKIM state machine functions 3003 3004 */ 3004 3005 3005 - /** 3006 + /* 3006 3007 * Task management command beginning state. 3007 3008 */ 3008 3009 static void ··· 3015 3016 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 3016 3017 bfa_tskim_gather_ios(tskim); 3017 3018 3018 - /** 3019 + /* 3019 3020 * If device is offline, do not send TM on wire. Just cleanup 3020 3021 * any pending IO requests and complete TM request. 3021 3022 */ ··· 3039 3040 } 3040 3041 } 3041 3042 3042 - /** 3043 + /* 3043 3044 * brief 3044 3045 * TM command is active, awaiting completion from firmware to 3045 3046 * cleanup IO requests in TM scope. ··· 3076 3077 } 3077 3078 } 3078 3079 3079 - /** 3080 + /* 3080 3081 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 3081 3082 * completion event from firmware. 3082 3083 */ ··· 3087 3088 3088 3089 switch (event) { 3089 3090 case BFA_TSKIM_SM_DONE: 3090 - /** 3091 + /* 3091 3092 * Ignore and wait for ABORT completion from firmware. 3092 3093 */ 3093 3094 break; ··· 3120 3121 break; 3121 3122 3122 3123 case BFA_TSKIM_SM_CLEANUP: 3123 - /** 3124 + /* 3124 3125 * Ignore, TM command completed on wire. 3125 3126 * Notify TM conmpletion on IO cleanup completion. 3126 3127 */ ··· 3137 3138 } 3138 3139 } 3139 3140 3140 - /** 3141 + /* 3141 3142 * Task management command is waiting for room in request CQ 3142 3143 */ 3143 3144 static void ··· 3152 3153 break; 3153 3154 3154 3155 case BFA_TSKIM_SM_CLEANUP: 3155 - /** 3156 + /* 3156 3157 * No need to send TM on wire since ITN is offline. 3157 3158 */ 3158 3159 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); ··· 3172 3173 } 3173 3174 } 3174 3175 3175 - /** 3176 + /* 3176 3177 * Task management command is active, awaiting for room in request CQ 3177 3178 * to send clean up request. 3178 3179 */ ··· 3185 3186 switch (event) { 3186 3187 case BFA_TSKIM_SM_DONE: 3187 3188 bfa_reqq_wcancel(&tskim->reqq_wait); 3188 - /** 3189 + /* 3189 3190 * 3190 3191 * Fall through !!! 3191 3192 */ ··· 3207 3208 } 3208 3209 } 3209 3210 3210 - /** 3211 + /* 3211 3212 * BFA callback is pending 3212 3213 */ 3213 3214 static void ··· 3235 3236 3236 3237 3237 3238 3238 - /** 3239 + /* 3239 3240 * hal_tskim_private 3240 3241 */ 3241 3242 ··· 3288 3289 return BFA_FALSE; 3289 3290 } 3290 3291 3291 - /** 3292 + /* 3292 3293 * Gather affected IO requests and task management commands. 3293 3294 */ 3294 3295 static void ··· 3300 3301 3301 3302 INIT_LIST_HEAD(&tskim->io_q); 3302 3303 3303 - /** 3304 + /* 3304 3305 * Gather any active IO requests first. 3305 3306 */ 3306 3307 list_for_each_safe(qe, qen, &itnim->io_q) { ··· 3312 3313 } 3313 3314 } 3314 3315 3315 - /** 3316 + /* 3316 3317 * Failback any pending IO requests immediately. 3317 3318 */ 3318 3319 list_for_each_safe(qe, qen, &itnim->pending_q) { ··· 3326 3327 } 3327 3328 } 3328 3329 3329 - /** 3330 + /* 3330 3331 * IO cleanup completion 3331 3332 */ 3332 3333 static void ··· 3338 3339 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); 3339 3340 } 3340 3341 3341 - /** 3342 + /* 3342 3343 * Gather affected IO requests and task management commands. 3343 3344 */ 3344 3345 static void ··· 3358 3359 bfa_wc_wait(&tskim->wc); 3359 3360 } 3360 3361 3361 - /** 3362 + /* 3362 3363 * Send task management request to firmware. 3363 3364 */ 3364 3365 static bfa_boolean_t ··· 3367 3368 struct bfa_itnim_s *itnim = tskim->itnim; 3368 3369 struct bfi_tskim_req_s *m; 3369 3370 3370 - /** 3371 + /* 3371 3372 * check for room in queue to send request now 3372 3373 */ 3373 3374 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3374 3375 if (!m) 3375 3376 return BFA_FALSE; 3376 3377 3377 - /** 3378 + /* 3378 3379 * build i/o request message next 3379 3380 */ 3380 3381 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, 3381 3382 bfa_lpuid(tskim->bfa)); 3382 3383 3383 - m->tsk_tag = bfa_os_htons(tskim->tsk_tag); 3384 + m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3384 3385 m->itn_fhdl = tskim->itnim->rport->fw_handle; 3385 3386 m->t_secs = tskim->tsecs; 3386 3387 m->lun = tskim->lun; 3387 3388 m->tm_flags = tskim->tm_cmnd; 3388 3389 3389 - /** 3390 + /* 3390 3391 * queue I/O message to firmware 3391 3392 */ 3392 3393 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3393 3394 return BFA_TRUE; 3394 3395 } 3395 3396 3396 - /** 3397 + /* 3397 3398 * Send abort request to cleanup an active TM to firmware. 3398 3399 */ 3399 3400 static bfa_boolean_t ··· 3402 3403 struct bfa_itnim_s *itnim = tskim->itnim; 3403 3404 struct bfi_tskim_abortreq_s *m; 3404 3405 3405 - /** 3406 + /* 3406 3407 * check for room in queue to send request now 3407 3408 */ 3408 3409 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3409 3410 if (!m) 3410 3411 return BFA_FALSE; 3411 3412 3412 - /** 3413 + /* 3413 3414 * build i/o request message next 3414 3415 */ 3415 3416 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, 3416 3417 bfa_lpuid(tskim->bfa)); 3417 3418 3418 - m->tsk_tag = bfa_os_htons(tskim->tsk_tag); 3419 + m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3419 3420 3420 - /** 3421 + /* 3421 3422 * queue I/O message to firmware 3422 3423 */ 3423 3424 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3424 3425 return BFA_TRUE; 3425 3426 } 3426 3427 3427 - /** 3428 + /* 3428 3429 * Call to resume task management cmnd waiting for room in request queue. 3429 3430 */ 3430 3431 static void ··· 3436 3437 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); 3437 3438 } 3438 3439 3439 - /** 3440 + /* 3440 3441 * Cleanup IOs associated with a task mangement command on IOC failures. 3441 3442 */ 3442 3443 static void ··· 3453 3454 3454 3455 3455 3456 3456 - /** 3457 + /* 3457 3458 * hal_tskim_friend 3458 3459 */ 3459 3460 3460 - /** 3461 + /* 3461 3462 * Notification on completions from related ioim. 3462 3463 */ 3463 3464 void ··· 3466 3467 bfa_wc_down(&tskim->wc); 3467 3468 } 3468 3469 3469 - /** 3470 + /* 3470 3471 * Handle IOC h/w failure notification from itnim. 3471 3472 */ 3472 3473 void ··· 3477 3478 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); 3478 3479 } 3479 3480 3480 - /** 3481 + /* 3481 3482 * Cleanup TM command and associated IOs as part of ITNIM offline. 3482 3483 */ 3483 3484 void ··· 3488 3489 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); 3489 3490 } 3490 3491 3491 - /** 3492 + /* 3492 3493 * Memory allocation and initialization. 3493 3494 */ 3494 3495 void ··· 3506 3507 /* 3507 3508 * initialize TSKIM 3508 3509 */ 3509 - bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s)); 3510 + memset(tskim, 0, sizeof(struct bfa_tskim_s)); 3510 3511 tskim->tsk_tag = i; 3511 3512 tskim->bfa = fcpim->bfa; 3512 3513 tskim->fcpim = fcpim; ··· 3524 3525 void 3525 3526 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) 3526 3527 { 3527 - /** 3528 + /* 3528 3529 * @todo 3529 3530 */ 3530 3531 } ··· 3535 3536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3536 3537 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; 3537 3538 struct bfa_tskim_s *tskim; 3538 - u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag); 3539 + u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); 3539 3540 3540 3541 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); 3541 3542 bfa_assert(tskim->tsk_tag == tsk_tag); 3542 3543 3543 3544 tskim->tsk_status = rsp->tsk_status; 3544 3545 3545 - /** 3546 + /* 3546 3547 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort 3547 3548 * requests. All other statuses are for normal completions. 3548 3549 */ ··· 3557 3558 3558 3559 3559 3560 3560 - /** 3561 + /* 3561 3562 * hal_tskim_api 3562 3563 */ 3563 3564 ··· 3584 3585 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3585 3586 } 3586 3587 3587 - /** 3588 + /* 3588 3589 * Start a task management command. 3589 3590 * 3590 3591 * @param[in] tskim BFA task management command instance
+9 -9
drivers/scsi/bfa/bfa_fcpim.h
··· 104 104 bfa_fcpim_profile_t profile_start; 105 105 }; 106 106 107 - /** 107 + /* 108 108 * BFA IO (initiator mode) 109 109 */ 110 110 struct bfa_ioim_s { ··· 137 137 struct bfa_tskim_s *tskim; /* Relevant TM cmd */ 138 138 }; 139 139 140 - /** 140 + /* 141 141 * BFA Task management command (initiator mode) 142 142 */ 143 143 struct bfa_tskim_s { ··· 160 160 }; 161 161 162 162 163 - /** 163 + /* 164 164 * BFA i-t-n (initiator mode) 165 165 */ 166 166 struct bfa_itnim_s { ··· 303 303 struct bfa_itnim_ioprofile_s *ioprofile); 304 304 #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) 305 305 306 - /** 306 + /* 307 307 * BFA completion callback for bfa_itnim_online(). 308 308 * 309 309 * @param[in] itnim FCS or driver itnim instance ··· 312 312 */ 313 313 void bfa_cb_itnim_online(void *itnim); 314 314 315 - /** 315 + /* 316 316 * BFA completion callback for bfa_itnim_offline(). 317 317 * 318 318 * @param[in] itnim FCS or driver itnim instance ··· 323 323 void bfa_cb_itnim_tov_begin(void *itnim); 324 324 void bfa_cb_itnim_tov(void *itnim); 325 325 326 - /** 326 + /* 327 327 * BFA notification to FCS/driver for second level error recovery. 328 328 * 329 329 * Atleast one I/O request has timedout and target is unresponsive to ··· 351 351 bfa_boolean_t iotov); 352 352 353 353 354 - /** 354 + /* 355 355 * I/O completion notification. 356 356 * 357 357 * @param[in] dio driver IO structure ··· 368 368 u8 scsi_status, int sns_len, 369 369 u8 *sns_info, s32 residue); 370 370 371 - /** 371 + /* 372 372 * I/O good completion notification. 373 373 * 374 374 * @param[in] dio driver IO structure ··· 377 377 */ 378 378 void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); 379 379 380 - /** 380 + /* 381 381 * I/O abort completion notification 382 382 * 383 383 * @param[in] dio driver IO that was aborted
+69 -266
drivers/scsi/bfa/bfa_fcs.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * bfa_fcs.c BFA FCS main 20 20 */ 21 21 ··· 25 25 26 26 BFA_TRC_FILE(FCS, FCS); 27 27 28 - /** 28 + /* 29 29 * FCS sub-modules 30 30 */ 31 31 struct bfa_fcs_mod_s { ··· 43 43 bfa_fcs_fabric_modexit }, 44 44 }; 45 45 46 - /** 46 + /* 47 47 * fcs_api BFA FCS API 48 48 */ 49 49 ··· 58 58 59 59 60 60 61 - /** 61 + /* 62 62 * fcs_api BFA FCS API 63 63 */ 64 64 65 - /** 65 + /* 66 66 * fcs attach -- called once to initialize data structures at driver attach time 67 67 */ 68 68 void ··· 86 86 } 87 87 } 88 88 89 - /** 89 + /* 90 90 * fcs initialization, called once after bfa initialization is complete 91 91 */ 92 92 void ··· 110 110 } 111 111 } 112 112 113 - /** 113 + /* 114 114 * Start FCS operations. 115 115 */ 116 116 void ··· 119 119 bfa_fcs_fabric_modstart(fcs); 120 120 } 121 121 122 - /** 122 + /* 123 123 * brief 124 124 * FCS driver details initialization. 125 125 * ··· 138 138 bfa_fcs_fabric_psymb_init(&fcs->fabric); 139 139 } 140 140 141 - /** 141 + /* 142 142 * brief 143 143 * FCS FDMI Driver Parameter Initialization 144 144 * ··· 154 154 fcs->fdmi_enabled = fdmi_enable; 155 155 156 156 } 157 - /** 157 + /* 158 158 * brief 159 159 * FCS instance cleanup and exit. 160 160 * ··· 196 196 bfa_wc_down(&fcs->wc); 197 197 } 198 198 199 - /** 199 + /* 200 200 * Fabric module implementation. 201 201 */ 202 202 ··· 232 232 u32 rsp_len, 233 233 u32 resid_len, 234 234 struct fchs_s *rspfchs); 235 - /** 235 + /* 236 236 * fcs_fabric_sm fabric state machine functions 237 237 */ 238 238 239 - /** 239 + /* 240 240 * Fabric state machine events 241 241 */ 242 242 enum bfa_fcs_fabric_event { ··· 286 286 enum bfa_fcs_fabric_event event); 287 287 static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, 288 288 enum bfa_fcs_fabric_event event); 289 - /** 289 + /* 290 290 * Beginning state before fabric creation. 291 291 */ 292 292 static void ··· 312 312 } 313 313 } 314 314 315 - /** 315 + /* 316 316 * Beginning state before fabric creation. 317 317 */ 318 318 static void ··· 345 345 } 346 346 } 347 347 348 - /** 348 + /* 349 349 * Link is down, awaiting LINK UP event from port. This is also the 350 350 * first state at fabric creation. 351 351 */ ··· 375 375 } 376 376 } 377 377 378 - /** 378 + /* 379 379 * FLOGI is in progress, awaiting FLOGI reply. 380 380 */ 381 381 static void ··· 468 468 } 469 469 } 470 470 471 - /** 471 + /* 472 472 * Authentication is in progress, awaiting authentication results. 473 473 */ 474 474 static void ··· 508 508 } 509 509 } 510 510 511 - /** 511 + /* 512 512 * Authentication failed 513 513 */ 514 514 static void ··· 534 534 } 535 535 } 536 536 537 - /** 537 + /* 538 538 * Port is in loopback mode. 539 539 */ 540 540 static void ··· 560 560 } 561 561 } 562 562 563 - /** 563 + /* 564 564 * There is no attached fabric - private loop or NPort-to-NPort topology. 565 565 */ 566 566 static void ··· 593 593 } 594 594 } 595 595 596 - /** 596 + /* 597 597 * Fabric is online - normal operating state. 598 598 */ 599 599 static void ··· 628 628 } 629 629 } 630 630 631 - /** 631 + /* 632 632 * Exchanging virtual fabric parameters. 633 633 */ 634 634 static void ··· 652 652 } 653 653 } 654 654 655 - /** 655 + /* 656 656 * EVFP exchange complete and VFT tagging is enabled. 657 657 */ 658 658 static void ··· 663 663 bfa_trc(fabric->fcs, event); 664 664 } 665 665 666 - /** 666 + /* 667 667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). 668 668 */ 669 669 static void ··· 684 684 fabric->event_arg.swp_vfid); 685 685 } 686 686 687 - /** 687 + /* 688 688 * Fabric is being deleted, awaiting vport delete completions. 689 689 */ 690 690 static void ··· 714 714 715 715 716 716 717 - /** 717 + /* 718 718 * fcs_fabric_private fabric private functions 719 719 */ 720 720 ··· 728 728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); 729 729 } 730 730 731 - /** 731 + /* 732 732 * Port Symbolic Name Creation for base port. 733 733 */ 734 734 void ··· 789 789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; 790 790 } 791 791 792 - /** 792 + /* 793 793 * bfa lps login completion callback 794 794 */ 795 795 void ··· 867 867 bfa_trc(fabric->fcs, fabric->is_npiv); 868 868 bfa_trc(fabric->fcs, fabric->is_auth); 869 869 } 870 - /** 870 + /* 871 871 * Allocate and send FLOGI. 872 872 */ 873 873 static void ··· 897 897 bfa_fcs_fabric_set_opertype(fabric); 898 898 fabric->stats.fabric_onlines++; 899 899 900 - /** 900 + /* 901 901 * notify online event to base and then virtual ports 902 902 */ 903 903 bfa_fcs_lport_online(&fabric->bport); ··· 917 917 bfa_trc(fabric->fcs, fabric->fabric_name); 918 918 fabric->stats.fabric_offlines++; 919 919 920 - /** 920 + /* 921 921 * notify offline event first to vports and then base port. 922 922 */ 923 923 list_for_each_safe(qe, qen, &fabric->vport_q) { ··· 939 939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); 940 940 } 941 941 942 - /** 942 + /* 943 943 * Delete all vports and wait for vport delete completions. 944 944 */ 945 945 static void ··· 965 965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); 966 966 } 967 967 968 - /** 968 + /* 969 969 * fcs_fabric_public fabric public functions 970 970 */ 971 971 972 - /** 972 + /* 973 973 * Attach time initialization. 974 974 */ 975 975 void ··· 978 978 struct bfa_fcs_fabric_s *fabric; 979 979 980 980 fabric = &fcs->fabric; 981 - bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); 981 + memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); 982 982 983 - /** 983 + /* 984 984 * Initialize base fabric. 985 985 */ 986 986 fabric->fcs = fcs; ··· 989 989 fabric->lps = bfa_lps_alloc(fcs->bfa); 990 990 bfa_assert(fabric->lps); 991 991 992 - /** 992 + /* 993 993 * Initialize fabric delete completion handler. Fabric deletion is 994 994 * complete when the last vport delete is complete. 995 995 */ ··· 1007 1007 bfa_trc(fcs, 0); 1008 1008 } 1009 1009 1010 - /** 1010 + /* 1011 1011 * Module cleanup 1012 1012 */ 1013 1013 void ··· 1017 1017 1018 1018 bfa_trc(fcs, 0); 1019 1019 1020 - /** 1020 + /* 1021 1021 * Cleanup base fabric. 1022 1022 */ 1023 1023 fabric = &fcs->fabric; ··· 1025 1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); 1026 1026 } 1027 1027 1028 - /** 1028 + /* 1029 1029 * Fabric module start -- kick starts FCS actions 1030 1030 */ 1031 1031 void ··· 1038 1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); 1039 1039 } 1040 1040 1041 - /** 1041 + /* 1042 1042 * Suspend fabric activity as part of driver suspend. 1043 1043 */ 1044 1044 void ··· 1064 1064 return fabric->oper_type; 1065 1065 } 1066 1066 1067 - /** 1067 + /* 1068 1068 * Link up notification from BFA physical port module. 1069 1069 */ 1070 1070 void ··· 1074 1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); 1075 1075 } 1076 1076 1077 - /** 1077 + /* 1078 1078 * Link down notification from BFA physical port module. 1079 1079 */ 1080 1080 void ··· 1084 1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); 1085 1085 } 1086 1086 1087 - /** 1087 + /* 1088 1088 * A child vport is being created in the fabric. 1089 1089 * 1090 1090 * Call from vport module at vport creation. A list of base port and vports ··· 1099 1099 bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 1100 1100 struct bfa_fcs_vport_s *vport) 1101 1101 { 1102 - /** 1102 + /* 1103 1103 * - add vport to fabric's vport_q 1104 1104 */ 1105 1105 bfa_trc(fabric->fcs, fabric->vf_id); ··· 1109 1109 bfa_wc_up(&fabric->wc); 1110 1110 } 1111 1111 1112 - /** 1112 + /* 1113 1113 * A child vport is being deleted from fabric. 1114 1114 * 1115 1115 * Vport is being deleted. ··· 1123 1123 bfa_wc_down(&fabric->wc); 1124 1124 } 1125 1125 1126 - /** 1126 + /* 1127 1127 * Base port is deleted. 1128 1128 */ 1129 1129 void ··· 1133 1133 } 1134 1134 1135 1135 1136 - /** 1136 + /* 1137 1137 * Check if fabric is online. 1138 1138 * 1139 1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf. ··· 1146 1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); 1147 1147 } 1148 1148 1149 - /** 1149 + /* 1150 1150 * brief 1151 1151 * 1152 1152 */ ··· 1158 1158 return BFA_STATUS_OK; 1159 1159 } 1160 1160 1161 - /** 1161 + /* 1162 1162 * Lookup for a vport withing a fabric given its pwwn 1163 1163 */ 1164 1164 struct bfa_fcs_vport_s * ··· 1176 1176 return NULL; 1177 1177 } 1178 1178 1179 - /** 1179 + /* 1180 1180 * In a given fabric, return the number of lports. 1181 1181 * 1182 1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf. ··· 1214 1214 1215 1215 return oui; 1216 1216 } 1217 - /** 1217 + /* 1218 1218 * Unsolicited frame receive handling. 1219 1219 */ 1220 1220 void ··· 1230 1230 bfa_trc(fabric->fcs, len); 1231 1231 bfa_trc(fabric->fcs, pid); 1232 1232 1233 - /** 1233 + /* 1234 1234 * Look for our own FLOGI frames being looped back. This means an 1235 1235 * external loopback cable is in place. Our own FLOGI frames are 1236 1236 * sometimes looped back when switch port gets temporarily bypassed. ··· 1242 1242 return; 1243 1243 } 1244 1244 1245 - /** 1245 + /* 1246 1246 * FLOGI/EVFP exchanges should be consumed by base fabric. 1247 1247 */ 1248 1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { ··· 1252 1252 } 1253 1253 1254 1254 if (fabric->bport.pid == pid) { 1255 - /** 1255 + /* 1256 1256 * All authentication frames should be routed to auth 1257 1257 */ 1258 1258 bfa_trc(fabric->fcs, els_cmd->els_code); ··· 1266 1266 return; 1267 1267 } 1268 1268 1269 - /** 1269 + /* 1270 1270 * look for a matching local port ID 1271 1271 */ 1272 1272 list_for_each(qe, &fabric->vport_q) { ··· 1280 1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); 1281 1281 } 1282 1282 1283 - /** 1283 + /* 1284 1284 * Unsolicited frames to be processed by fabric. 1285 1285 */ 1286 1286 static void ··· 1304 1304 } 1305 1305 } 1306 1306 1307 - /** 1307 + /* 1308 1308 * Process incoming FLOGI 1309 1309 */ 1310 1310 static void ··· 1329 1329 return; 1330 1330 } 1331 1331 1332 - fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred); 1332 + fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); 1333 1333 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; 1334 1334 bport->port_topo.pn2n.reply_oxid = fchs->ox_id; 1335 1335 ··· 1351 1351 struct fchs_s fchs; 1352 1352 1353 1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); 1354 - /** 1354 + /* 1355 1355 * Do not expect this failure -- expect remote node to retry 1356 1356 */ 1357 1357 if (!fcxp) ··· 1370 1370 FC_MAX_PDUSZ, 0); 1371 1371 } 1372 1372 1373 - /** 1373 + /* 1374 1374 * Flogi Acc completion callback. 1375 1375 */ 1376 1376 static void ··· 1417 1417 } 1418 1418 } 1419 1419 1420 - /** 1421 - * fcs_vf_api virtual fabrics API 1422 - */ 1423 - 1424 - /** 1425 - * Enable VF mode. 1426 - * 1427 - * @param[in] fcs fcs module instance 1428 - * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL 1429 - * to use standard default vf_id of 1. 1430 - * 1431 - * @retval BFA_STATUS_OK vf mode is enabled 1432 - * @retval BFA_STATUS_BUSY Port is active. Port must be disabled 1433 - * before VF mode can be enabled. 1434 - */ 1435 - bfa_status_t 1436 - bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id) 1437 - { 1438 - return BFA_STATUS_OK; 1439 - } 1440 - 1441 - /** 1442 - * Disable VF mode. 1443 - * 1444 - * @param[in] fcs fcs module instance 1445 - * 1446 - * @retval BFA_STATUS_OK vf mode is disabled 1447 - * @retval BFA_STATUS_BUSY VFs are present and being used. All 1448 - * VFs must be deleted before disabling 1449 - * VF mode. 1450 - */ 1451 - bfa_status_t 1452 - bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs) 1453 - { 1454 - return BFA_STATUS_OK; 1455 - } 1456 - 1457 - /** 1458 - * Create a new VF instance. 1459 - * 1460 - * A new VF is created using the given VF configuration. A VF is identified 1461 - * by VF id. No duplicate VF creation is allowed with the same VF id. Once 1462 - * a VF is created, VF is automatically started after link initialization 1463 - * and EVFP exchange is completed. 1464 - * 1465 - * param[in] vf - FCS vf data structure. Memory is 1466 - * allocated by caller (driver) 1467 - * param[in] fcs - FCS module 1468 - * param[in] vf_cfg - VF configuration 1469 - * param[in] vf_drv - Opaque handle back to the driver's 1470 - * virtual vf structure 1471 - * 1472 - * retval BFA_STATUS_OK VF creation is successful 1473 - * retval BFA_STATUS_FAILED VF creation failed 1474 - * retval BFA_STATUS_EEXIST A VF exists with the given vf_id 1475 - */ 1476 - bfa_status_t 1477 - bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id, 1478 - struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv) 1479 - { 1480 - bfa_trc(fcs, vf_id); 1481 - return BFA_STATUS_OK; 1482 - } 1483 - 1484 - /** 1485 - * Use this function to delete a BFA VF object. VF object should 1486 - * be stopped before this function call. 1487 - * 1488 - * param[in] vf - pointer to bfa_vf_t. 1489 - * 1490 - * retval BFA_STATUS_OK On vf deletion success 1491 - * retval BFA_STATUS_BUSY VF is not in a stopped state 1492 - * retval BFA_STATUS_INPROGRESS VF deletion in in progress 1493 - */ 1494 - bfa_status_t 1495 - bfa_fcs_vf_delete(bfa_fcs_vf_t *vf) 1496 - { 1497 - bfa_trc(vf->fcs, vf->vf_id); 1498 - return BFA_STATUS_OK; 1499 - } 1500 - 1501 - 1502 - /** 1503 - * Returns attributes of the given VF. 1504 - * 1505 - * param[in] vf pointer to bfa_vf_t. 1506 - * param[out] vf_attr vf attributes returned 1507 - * 1508 - * return None 1509 - */ 1510 - void 1511 - bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr) 1512 - { 1513 - bfa_trc(vf->fcs, vf->vf_id); 1514 - } 1515 - 1516 - /** 1517 - * Return statistics associated with the given vf. 1518 - * 1519 - * param[in] vf pointer to bfa_vf_t. 1520 - * param[out] vf_stats vf statistics returned 1521 - * 1522 - * @return None 1523 - */ 1524 - void 1525 - bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats) 1526 - { 1527 - bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s)); 1528 - } 1529 - 1530 - /** 1531 - * clear statistics associated with the given vf. 1532 - * 1533 - * param[in] vf pointer to bfa_vf_t. 1534 - * 1535 - * @return None 1536 - */ 1537 - void 1538 - bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf) 1539 - { 1540 - bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s)); 1541 - } 1542 - 1543 - /** 1420 + /* 1544 1421 * Returns FCS vf structure for a given vf_id. 1545 1422 * 1546 1423 * param[in] vf_id - VF_ID ··· 1435 1558 return NULL; 1436 1559 } 1437 1560 1438 - /** 1439 - * Return the list of VFs configured. 1440 - * 1441 - * param[in] fcs fcs module instance 1442 - * param[out] vf_ids returned list of vf_ids 1443 - * param[in,out] nvfs in:size of vf_ids array, 1444 - * out:total elements present, 1445 - * actual elements returned is limited by the size 1446 - * 1447 - * return Driver VF structure 1448 - */ 1449 - void 1450 - bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) 1451 - { 1452 - bfa_trc(fcs, *nvfs); 1453 - } 1454 - 1455 - /** 1456 - * Return the list of all VFs visible from fabric. 1457 - * 1458 - * param[in] fcs fcs module instance 1459 - * param[out] vf_ids returned list of vf_ids 1460 - * param[in,out] nvfs in:size of vf_ids array, 1461 - * out:total elements present, 1462 - * actual elements returned is limited by the size 1463 - * 1464 - * return Driver VF structure 1465 - */ 1466 - void 1467 - bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) 1468 - { 1469 - bfa_trc(fcs, *nvfs); 1470 - } 1471 - 1472 - /** 1473 - * Return the list of local logical ports present in the given VF. 1474 - * 1475 - * param[in] vf vf for which logical ports are returned 1476 - * param[out] lpwwn returned logical port wwn list 1477 - * param[in,out] nlports in:size of lpwwn list; 1478 - * out:total elements present, 1479 - * actual elements returned is limited by the size 1480 - */ 1481 - void 1482 - bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) 1483 - { 1484 - struct list_head *qe; 1485 - struct bfa_fcs_vport_s *vport; 1486 - int i; 1487 - struct bfa_fcs_s *fcs; 1488 - 1489 - if (vf == NULL || lpwwn == NULL || *nlports == 0) 1490 - return; 1491 - 1492 - fcs = vf->fcs; 1493 - 1494 - bfa_trc(fcs, vf->vf_id); 1495 - bfa_trc(fcs, (u32) *nlports); 1496 - 1497 - i = 0; 1498 - lpwwn[i++] = vf->bport.port_cfg.pwwn; 1499 - 1500 - list_for_each(qe, &vf->vport_q) { 1501 - if (i >= *nlports) 1502 - break; 1503 - 1504 - vport = (struct bfa_fcs_vport_s *) qe; 1505 - lpwwn[i++] = vport->lport.port_cfg.pwwn; 1506 - } 1507 - 1508 - bfa_trc(fcs, i); 1509 - *nlports = i; 1510 - } 1511 - 1512 - /** 1561 + /* 1513 1562 * BFA FCS PPORT ( physical port) 1514 1563 */ 1515 1564 static void ··· 1465 1662 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); 1466 1663 } 1467 1664 1468 - /** 1665 + /* 1469 1666 * BFA FCS UF ( Unsolicited Frames) 1470 1667 */ 1471 1668 1472 - /** 1669 + /* 1473 1670 * BFA callback for unsolicited frame receive handler. 1474 1671 * 1475 1672 * @param[in] cbarg callback arg for receive handler ··· 1486 1683 struct fc_vft_s *vft; 1487 1684 struct bfa_fcs_fabric_s *fabric; 1488 1685 1489 - /** 1686 + /* 1490 1687 * check for VFT header 1491 1688 */ 1492 1689 if (fchs->routing == FC_RTG_EXT_HDR && ··· 1498 1695 else 1499 1696 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); 1500 1697 1501 - /** 1698 + /* 1502 1699 * drop frame if vfid is unknown 1503 1700 */ 1504 1701 if (!fabric) { ··· 1508 1705 return; 1509 1706 } 1510 1707 1511 - /** 1708 + /* 1512 1709 * skip vft header 1513 1710 */ 1514 1711 fchs = (struct fchs_s *) (vft + 1);
+15 -28
drivers/scsi/bfa/bfa_fcs.h
··· 196 196 #define bfa_fcs_fabric_is_switched(__f) \ 197 197 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) 198 198 199 - /** 199 + /* 200 200 * The design calls for a single implementation of base fabric and vf. 201 201 */ 202 202 #define bfa_fcs_vf_t struct bfa_fcs_fabric_s ··· 216 216 217 217 #define bfa_fcs_lport_t struct bfa_fcs_lport_s 218 218 219 - /** 219 + /* 220 220 * Symbolic Name related defines 221 221 * Total bytes 255. 222 222 * Physical Port's symbolic name 128 bytes. ··· 239 239 #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 240 240 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 241 241 242 - /** 242 + /* 243 243 * Get FC port ID for a logical port. 244 244 */ 245 245 #define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid) ··· 262 262 #define bfa_fcs_lport_get_fabric_ipaddr(_lport) \ 263 263 ((_lport)->fabric->fabric_ip_addr) 264 264 265 - /** 265 + /* 266 266 * bfa fcs port public functions 267 267 */ 268 268 ··· 342 342 #define bfa_fcs_vport_get_port(vport) \ 343 343 ((struct bfa_fcs_lport_s *)(&vport->port)) 344 344 345 - /** 345 + /* 346 346 * bfa fcs vport public functions 347 347 */ 348 348 bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, ··· 393 393 enum bfa_port_speed rpsc_speed; 394 394 /* Current Speed from RPSC. O if RPSC fails */ 395 395 enum bfa_port_speed assigned_speed; 396 - /** 396 + /* 397 397 * Speed assigned by the user. will be used if RPSC is 398 398 * not supported by the rport. 399 399 */ ··· 434 434 return rport->bfa_rport; 435 435 } 436 436 437 - /** 437 + /* 438 438 * bfa fcs rport API functions 439 439 */ 440 440 bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, ··· 573 573 return itnim->bfa_itnim; 574 574 } 575 575 576 - /** 576 + /* 577 577 * bfa fcs FCP Initiator mode API functions 578 578 */ 579 579 void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim, ··· 677 677 void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); 678 678 void bfa_fcs_start(struct bfa_fcs_s *fcs); 679 679 680 - /** 680 + /* 681 681 * bfa fcs vf public functions 682 682 */ 683 - bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id); 684 - bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs); 685 - bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, 686 - u16 vf_id, struct bfa_lport_cfg_s *port_cfg, 687 - struct bfad_vf_s *vf_drv); 688 - bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf); 689 - void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); 690 - void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); 691 - void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr); 692 - void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, 693 - struct bfa_vf_stats_s *vf_stats); 694 - void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf); 695 - void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); 696 683 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); 697 684 u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); 698 685 ··· 716 729 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); 717 730 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); 718 731 719 - /** 732 + /* 720 733 * BFA FCS callback interfaces 721 734 */ 722 735 723 - /** 736 + /* 724 737 * fcb Main fcs callbacks 725 738 */ 726 739 ··· 729 742 struct bfad_vport_s; 730 743 struct bfad_rport_s; 731 744 732 - /** 745 + /* 733 746 * lport callbacks 734 747 */ 735 748 struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad, ··· 741 754 struct bfad_vf_s *vf_drv, 742 755 struct bfad_vport_s *vp_drv); 743 756 744 - /** 757 + /* 745 758 * vport callbacks 746 759 */ 747 760 void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s); 748 761 749 - /** 762 + /* 750 763 * rport callbacks 751 764 */ 752 765 bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, 753 766 struct bfa_fcs_rport_s **rport, 754 767 struct bfad_rport_s **rport_drv); 755 768 756 - /** 769 + /* 757 770 * itnim callbacks 758 771 */ 759 772 void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+17 -17
drivers/scsi/bfa/bfa_fcs_fcpim.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * fcpim.c - FCP initiator mode i-t nexus state machine 20 20 */ 21 21 ··· 38 38 bfa_status_t req_status, u32 rsp_len, 39 39 u32 resid_len, struct fchs_s *rsp_fchs); 40 40 41 - /** 41 + /* 42 42 * fcs_itnim_sm FCS itnim state machine events 43 43 */ 44 44 ··· 84 84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, 85 85 }; 86 86 87 - /** 87 + /* 88 88 * fcs_itnim_sm FCS itnim state machine 89 89 */ 90 90 ··· 494 494 495 495 496 496 497 - /** 497 + /* 498 498 * itnim_public FCS ITNIM public interfaces 499 499 */ 500 500 501 - /** 501 + /* 502 502 * Called by rport when a new rport is created. 503 503 * 504 504 * @param[in] rport - remote port. ··· 554 554 return itnim; 555 555 } 556 556 557 - /** 557 + /* 558 558 * Called by rport to delete the instance of FCPIM. 559 559 * 560 560 * @param[in] rport - remote port. ··· 566 566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); 567 567 } 568 568 569 - /** 569 + /* 570 570 * Notification from rport that PLOGI is complete to initiate FC-4 session. 571 571 */ 572 572 void ··· 586 586 } 587 587 } 588 588 589 - /** 589 + /* 590 590 * Called by rport to handle a remote device offline. 591 591 */ 592 592 void ··· 596 596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); 597 597 } 598 598 599 - /** 599 + /* 600 600 * Called by rport when remote port is known to be an initiator from 601 601 * PRLI received. 602 602 */ ··· 608 608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 609 609 } 610 610 611 - /** 611 + /* 612 612 * Called by rport to check if the itnim is online. 613 613 */ 614 614 bfa_status_t ··· 625 625 } 626 626 } 627 627 628 - /** 628 + /* 629 629 * BFA completion callback for bfa_itnim_online(). 630 630 */ 631 631 void ··· 637 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 638 638 } 639 639 640 - /** 640 + /* 641 641 * BFA completion callback for bfa_itnim_offline(). 642 642 */ 643 643 void ··· 649 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 650 650 } 651 651 652 - /** 652 + /* 653 653 * Mark the beginning of PATH TOV handling. IO completion callbacks 654 654 * are still pending. 655 655 */ ··· 661 661 bfa_trc(itnim->fcs, itnim->rport->pwwn); 662 662 } 663 663 664 - /** 664 + /* 665 665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. 666 666 */ 667 667 void ··· 674 674 itnim_drv->state = ITNIM_STATE_TIMEOUT; 675 675 } 676 676 677 - /** 677 + /* 678 678 * BFA notification to FCS/driver for second level error recovery. 679 679 * 680 680 * Atleast one I/O request has timedout and target is unresponsive to ··· 736 736 if (itnim == NULL) 737 737 return BFA_STATUS_NO_FCPIM_NEXUS; 738 738 739 - bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); 739 + memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); 740 740 741 741 return BFA_STATUS_OK; 742 742 } ··· 753 753 if (itnim == NULL) 754 754 return BFA_STATUS_NO_FCPIM_NEXUS; 755 755 756 - bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); 756 + memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); 757 757 return BFA_STATUS_OK; 758 758 } 759 759
+204 -264
drivers/scsi/bfa/bfa_fcs_lport.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 19 - * bfa_fcs_lport.c BFA FCS port 20 - */ 21 - 22 18 #include "bfa_fcs.h" 23 19 #include "bfa_fcbuild.h" 24 20 #include "bfa_fc.h" 25 21 #include "bfad_drv.h" 26 22 27 23 BFA_TRC_FILE(FCS, PORT); 28 - 29 - /** 30 - * Forward declarations 31 - */ 32 24 33 25 static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 34 26 struct fchs_s *rx_fchs, u8 reason_code, ··· 64 72 bfa_fcs_lport_n2n_offline}, 65 73 }; 66 74 67 - /** 75 + /* 68 76 * fcs_port_sm FCS logical port state machine 69 77 */ 70 78 ··· 232 240 } 233 241 } 234 242 235 - /** 243 + /* 236 244 * fcs_port_pvt 237 245 */ 238 246 ··· 264 272 FC_MAX_PDUSZ, 0); 265 273 } 266 274 267 - /** 275 + /* 268 276 * Process incoming plogi from a remote port. 269 277 */ 270 278 static void ··· 295 303 return; 296 304 } 297 305 298 - /** 306 + /* 299 307 * Direct Attach P2P mode : verify address assigned by the r-port. 300 308 */ 301 309 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && ··· 311 319 port->pid = rx_fchs->d_id; 312 320 } 313 321 314 - /** 322 + /* 315 323 * First, check if we know the device by pwwn. 316 324 */ 317 325 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); 318 326 if (rport) { 319 - /** 327 + /* 320 328 * Direct Attach P2P mode : handle address assigned by r-port. 321 329 */ 322 330 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && ··· 329 337 return; 330 338 } 331 339 332 - /** 340 + /* 333 341 * Next, lookup rport by PID. 334 342 */ 335 343 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); 336 344 if (!rport) { 337 - /** 345 + /* 338 346 * Inbound PLOGI from a new device. 339 347 */ 340 348 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 341 349 return; 342 350 } 343 351 344 - /** 352 + /* 345 353 * Rport is known only by PID. 346 354 */ 347 355 if (rport->pwwn) { 348 - /** 356 + /* 349 357 * This is a different device with the same pid. Old device 350 358 * disappeared. Send implicit LOGO to old device. 351 359 */ 352 360 bfa_assert(rport->pwwn != plogi->port_name); 353 361 bfa_fcs_rport_logo_imp(rport); 354 362 355 - /** 363 + /* 356 364 * Inbound PLOGI from a new device (with old PID). 357 365 */ 358 366 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 359 367 return; 360 368 } 361 369 362 - /** 370 + /* 363 371 * PLOGI crossing each other. 364 372 */ 365 373 bfa_assert(rport->pwwn == WWN_NULL); ··· 471 479 bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, 472 480 struct fc_rnid_general_topology_data_s *gen_topo_data) 473 481 { 474 - bfa_os_memset(gen_topo_data, 0, 482 + memset(gen_topo_data, 0, 475 483 sizeof(struct fc_rnid_general_topology_data_s)); 476 484 477 - gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST); 485 + gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST); 478 486 gen_topo_data->phy_port_num = 0; /* @todo */ 479 - gen_topo_data->num_attached_nodes = bfa_os_htonl(1); 487 + gen_topo_data->num_attached_nodes = cpu_to_be32(1); 480 488 } 481 489 482 490 static void ··· 590 598 591 599 592 600 593 - /** 601 + /* 594 602 * fcs_lport_api BFA FCS port API 595 603 */ 596 - /** 604 + /* 597 605 * Module initialization 598 606 */ 599 607 void ··· 602 610 603 611 } 604 612 605 - /** 613 + /* 606 614 * Module cleanup 607 615 */ 608 616 void ··· 611 619 bfa_fcs_modexit_comp(fcs); 612 620 } 613 621 614 - /** 622 + /* 615 623 * Unsolicited frame receive handling. 616 624 */ 617 625 void ··· 629 637 return; 630 638 } 631 639 632 - /** 640 + /* 633 641 * First, handle ELSs that donot require a login. 634 642 */ 635 643 /* ··· 665 673 bfa_fcs_lport_abts_acc(lport, fchs); 666 674 return; 667 675 } 668 - /** 676 + /* 669 677 * look for a matching remote port ID 670 678 */ 671 679 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); ··· 678 686 return; 679 687 } 680 688 681 - /** 689 + /* 682 690 * Only handles ELS frames for now. 683 691 */ 684 692 if (fchs->type != FC_TYPE_ELS) { ··· 694 702 } 695 703 696 704 if (els_cmd->els_code == FC_ELS_LOGO) { 697 - /** 705 + /* 698 706 * @todo Handle LOGO frames received. 699 707 */ 700 708 return; 701 709 } 702 710 703 711 if (els_cmd->els_code == FC_ELS_PRLI) { 704 - /** 712 + /* 705 713 * @todo Handle PRLI frames received. 706 714 */ 707 715 return; 708 716 } 709 717 710 - /** 718 + /* 711 719 * Unhandled ELS frames. Send a LS_RJT. 712 720 */ 713 721 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, ··· 715 723 716 724 } 717 725 718 - /** 726 + /* 719 727 * PID based Lookup for a R-Port in the Port R-Port Queue 720 728 */ 721 729 struct bfa_fcs_rport_s * ··· 734 742 return NULL; 735 743 } 736 744 737 - /** 745 + /* 738 746 * PWWN based Lookup for a R-Port in the Port R-Port Queue 739 747 */ 740 748 struct bfa_fcs_rport_s * ··· 753 761 return NULL; 754 762 } 755 763 756 - /** 764 + /* 757 765 * NWWN based Lookup for a R-Port in the Port R-Port Queue 758 766 */ 759 767 struct bfa_fcs_rport_s * ··· 772 780 return NULL; 773 781 } 774 782 775 - /** 783 + /* 776 784 * Called by rport module when new rports are discovered. 777 785 */ 778 786 void ··· 784 792 port->num_rports++; 785 793 } 786 794 787 - /** 795 + /* 788 796 * Called by rport module to when rports are deleted. 789 797 */ 790 798 void ··· 799 807 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); 800 808 } 801 809 802 - /** 810 + /* 803 811 * Called by fabric for base port when fabric login is complete. 804 812 * Called by vport for virtual ports when FDISC is complete. 805 813 */ ··· 809 817 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 810 818 } 811 819 812 - /** 820 + /* 813 821 * Called by fabric for base port when fabric goes offline. 814 822 * Called by vport for virtual ports when virtual port becomes offline. 815 823 */ ··· 819 827 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 820 828 } 821 829 822 - /** 830 + /* 823 831 * Called by fabric to delete base lport and associated resources. 824 832 * 825 833 * Called by vport to delete lport and associated resources. Should call ··· 831 839 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 832 840 } 833 841 834 - /** 842 + /* 835 843 * Return TRUE if port is online, else return FALSE 836 844 */ 837 845 bfa_boolean_t ··· 840 848 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); 841 849 } 842 850 843 - /** 851 + /* 844 852 * Attach time initialization of logical ports. 845 853 */ 846 854 void ··· 857 865 lport->num_rports = 0; 858 866 } 859 867 860 - /** 868 + /* 861 869 * Logical port initialization of base or virtual port. 862 870 * Called by fabric for base port or by vport for virtual ports. 863 871 */ ··· 870 878 struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad; 871 879 char lpwwn_buf[BFA_STRING_32]; 872 880 873 - bfa_os_assign(lport->port_cfg, *port_cfg); 881 + lport->port_cfg = *port_cfg; 874 882 875 883 lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport, 876 884 lport->port_cfg.roles, ··· 886 894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 887 895 } 888 896 889 - /** 897 + /* 890 898 * fcs_lport_api 891 899 */ 892 900 ··· 926 934 } 927 935 } 928 936 929 - /** 937 + /* 930 938 * bfa_fcs_lport_fab port fab functions 931 939 */ 932 940 933 - /** 941 + /* 934 942 * Called by port to initialize fabric services of the base port. 935 943 */ 936 944 static void ··· 941 949 bfa_fcs_lport_ms_init(port); 942 950 } 943 951 944 - /** 952 + /* 945 953 * Called by port to notify transition to online state. 946 954 */ 947 955 static void ··· 951 959 bfa_fcs_lport_scn_online(port); 952 960 } 953 961 954 - /** 962 + /* 955 963 * Called by port to notify transition to offline state. 956 964 */ 957 965 static void ··· 962 970 bfa_fcs_lport_ms_offline(port); 963 971 } 964 972 965 - /** 973 + /* 966 974 * bfa_fcs_lport_n2n functions 967 975 */ 968 976 969 - /** 977 + /* 970 978 * Called by fcs/port to initialize N2N topology. 971 979 */ 972 980 static void ··· 974 982 { 975 983 } 976 984 977 - /** 985 + /* 978 986 * Called by fcs/port to notify transition to online state. 979 987 */ 980 988 static void ··· 998 1006 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, 999 1007 sizeof(wwn_t)) > 0) { 1000 1008 port->pid = N2N_LOCAL_PID; 1001 - /** 1009 + /* 1002 1010 * First, check if we know the device by pwwn. 1003 1011 */ 1004 1012 rport = bfa_fcs_lport_get_rport_by_pwwn(port, ··· 1027 1035 } 1028 1036 } 1029 1037 1030 - /** 1038 + /* 1031 1039 * Called by fcs/port to notify transition to offline state. 1032 1040 */ 1033 1041 static void ··· 1086 1094 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 1087 1095 static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 1088 1096 struct bfa_fcs_fdmi_port_attr_s *port_attr); 1089 - /** 1097 + /* 1090 1098 * fcs_fdmi_sm FCS FDMI state machine 1091 1099 */ 1092 1100 1093 - /** 1101 + /* 1094 1102 * FDMI State Machine events 1095 1103 */ 1096 1104 enum port_fdmi_event { ··· 1135 1143 static void bfa_fcs_lport_fdmi_sm_disabled( 1136 1144 struct bfa_fcs_lport_fdmi_s *fdmi, 1137 1145 enum port_fdmi_event event); 1138 - /** 1146 + /* 1139 1147 * Start in offline state - awaiting MS to send start. 1140 1148 */ 1141 1149 static void ··· 1502 1510 bfa_sm_fault(port->fcs, event); 1503 1511 } 1504 1512 } 1505 - /** 1513 + /* 1506 1514 * FDMI is disabled state. 1507 1515 */ 1508 1516 static void ··· 1517 1525 /* No op State. It can only be enabled at Driver Init. */ 1518 1526 } 1519 1527 1520 - /** 1528 + /* 1521 1529 * RHBA : Register HBA Attributes. 1522 1530 */ 1523 1531 static void ··· 1541 1549 fdmi->fcxp = fcxp; 1542 1550 1543 1551 pyld = bfa_fcxp_get_reqbuf(fcxp); 1544 - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); 1552 + memset(pyld, 0, FC_MAX_PDUSZ); 1545 1553 1546 1554 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 1547 1555 FDMI_RHBA); ··· 1576 1584 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); 1577 1585 1578 1586 rhba->hba_id = bfa_fcs_lport_get_pwwn(port); 1579 - rhba->port_list.num_ports = bfa_os_htonl(1); 1587 + rhba->port_list.num_ports = cpu_to_be32(1); 1580 1588 rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port); 1581 1589 1582 1590 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); ··· 1593 1601 * Node Name 1594 1602 */ 1595 1603 attr = (struct fdmi_attr_s *) curr_ptr; 1596 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME); 1604 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); 1597 1605 attr->len = sizeof(wwn_t); 1598 1606 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len); 1599 1607 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1600 1608 len += attr->len; 1601 1609 count++; 1602 - attr->len = 1603 - bfa_os_htons(attr->len + sizeof(attr->type) + 1610 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1604 1611 sizeof(attr->len)); 1605 1612 1606 1613 /* 1607 1614 * Manufacturer 1608 1615 */ 1609 1616 attr = (struct fdmi_attr_s *) curr_ptr; 1610 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER); 1617 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); 1611 1618 attr->len = (u16) strlen(fcs_hba_attr->manufacturer); 1612 1619 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); 1613 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1614 - *fields need 1615 - *to be 4 byte 1616 - *aligned */ 1620 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1617 1621 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1618 1622 len += attr->len; 1619 1623 count++; 1620 - attr->len = 1621 - bfa_os_htons(attr->len + sizeof(attr->type) + 1624 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1622 1625 sizeof(attr->len)); 1623 1626 1624 1627 /* 1625 1628 * Serial Number 1626 1629 */ 1627 1630 attr = (struct fdmi_attr_s *) curr_ptr; 1628 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM); 1631 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); 1629 1632 attr->len = (u16) strlen(fcs_hba_attr->serial_num); 1630 1633 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); 1631 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1632 - *fields need 1633 - *to be 4 byte 1634 - *aligned */ 1634 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1635 1635 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1636 1636 len += attr->len; 1637 1637 count++; 1638 - attr->len = 1639 - bfa_os_htons(attr->len + sizeof(attr->type) + 1638 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1640 1639 sizeof(attr->len)); 1641 1640 1642 1641 /* 1643 1642 * Model 1644 1643 */ 1645 1644 attr = (struct fdmi_attr_s *) curr_ptr; 1646 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL); 1645 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); 1647 1646 attr->len = (u16) strlen(fcs_hba_attr->model); 1648 1647 memcpy(attr->value, fcs_hba_attr->model, attr->len); 1649 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1650 - *fields need 1651 - *to be 4 byte 1652 - *aligned */ 1648 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1653 1649 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1654 1650 len += attr->len; 1655 1651 count++; 1656 - attr->len = 1657 - bfa_os_htons(attr->len + sizeof(attr->type) + 1652 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1658 1653 sizeof(attr->len)); 1659 1654 1660 1655 /* 1661 1656 * Model Desc 1662 1657 */ 1663 1658 attr = (struct fdmi_attr_s *) curr_ptr; 1664 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC); 1659 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); 1665 1660 attr->len = (u16) strlen(fcs_hba_attr->model_desc); 1666 1661 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); 1667 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1668 - *fields need 1669 - *to be 4 byte 1670 - *aligned */ 1662 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1671 1663 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1672 1664 len += attr->len; 1673 1665 count++; 1674 - attr->len = 1675 - bfa_os_htons(attr->len + sizeof(attr->type) + 1666 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1676 1667 sizeof(attr->len)); 1677 1668 1678 1669 /* ··· 1663 1688 */ 1664 1689 if (fcs_hba_attr->hw_version[0] != '\0') { 1665 1690 attr = (struct fdmi_attr_s *) curr_ptr; 1666 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION); 1691 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); 1667 1692 attr->len = (u16) strlen(fcs_hba_attr->hw_version); 1668 1693 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); 1669 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1670 - *fields need 1671 - *to be 4 byte 1672 - *aligned */ 1694 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1673 1695 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1674 1696 len += attr->len; 1675 1697 count++; 1676 - attr->len = 1677 - bfa_os_htons(attr->len + sizeof(attr->type) + 1698 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1678 1699 sizeof(attr->len)); 1679 1700 } 1680 1701 ··· 1678 1707 * Driver Version 1679 1708 */ 1680 1709 attr = (struct fdmi_attr_s *) curr_ptr; 1681 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1710 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1682 1711 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1683 1712 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1684 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1685 - *fields need 1686 - *to be 4 byte 1687 - *aligned */ 1713 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1688 1714 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1689 1715 len += attr->len;; 1690 1716 count++; 1691 - attr->len = 1692 - bfa_os_htons(attr->len + sizeof(attr->type) + 1717 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1693 1718 sizeof(attr->len)); 1694 1719 1695 1720 /* ··· 1693 1726 */ 1694 1727 if (fcs_hba_attr->option_rom_ver[0] != '\0') { 1695 1728 attr = (struct fdmi_attr_s *) curr_ptr; 1696 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION); 1729 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); 1697 1730 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); 1698 1731 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); 1699 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1700 - *fields need 1701 - *to be 4 byte 1702 - *aligned */ 1732 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1703 1733 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1704 1734 len += attr->len; 1705 1735 count++; 1706 - attr->len = 1707 - bfa_os_htons(attr->len + sizeof(attr->type) + 1736 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1708 1737 sizeof(attr->len)); 1709 1738 } 1710 1739 ··· 1708 1745 * f/w Version = driver version 1709 1746 */ 1710 1747 attr = (struct fdmi_attr_s *) curr_ptr; 1711 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION); 1748 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 1712 1749 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1713 1750 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1714 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1715 - *fields need 1716 - *to be 4 byte 1717 - *aligned */ 1751 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1718 1752 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1719 1753 len += attr->len; 1720 1754 count++; 1721 - attr->len = 1722 - bfa_os_htons(attr->len + sizeof(attr->type) + 1755 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1723 1756 sizeof(attr->len)); 1724 1757 1725 1758 /* ··· 1723 1764 */ 1724 1765 if (fcs_hba_attr->os_name[0] != '\0') { 1725 1766 attr = (struct fdmi_attr_s *) curr_ptr; 1726 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME); 1767 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); 1727 1768 attr->len = (u16) strlen(fcs_hba_attr->os_name); 1728 1769 memcpy(attr->value, fcs_hba_attr->os_name, attr->len); 1729 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1730 - *fields need 1731 - *to be 4 byte 1732 - *aligned */ 1770 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1733 1771 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1734 1772 len += attr->len; 1735 1773 count++; 1736 - attr->len = 1737 - bfa_os_htons(attr->len + sizeof(attr->type) + 1774 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1738 1775 sizeof(attr->len)); 1739 1776 } 1740 1777 ··· 1738 1783 * MAX_CT_PAYLOAD 1739 1784 */ 1740 1785 attr = (struct fdmi_attr_s *) curr_ptr; 1741 - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT); 1786 + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); 1742 1787 attr->len = sizeof(fcs_hba_attr->max_ct_pyld); 1743 1788 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); 1744 1789 len += attr->len; 1745 1790 count++; 1746 - attr->len = 1747 - bfa_os_htons(attr->len + sizeof(attr->type) + 1791 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1748 1792 sizeof(attr->len)); 1749 1793 1750 1794 /* 1751 1795 * Update size of payload 1752 1796 */ 1753 - len += ((sizeof(attr->type) + 1754 - sizeof(attr->len)) * count); 1797 + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); 1755 1798 1756 - rhba->hba_attr_blk.attr_count = bfa_os_htonl(count); 1799 + rhba->hba_attr_blk.attr_count = cpu_to_be32(count); 1757 1800 return len; 1758 1801 } 1759 1802 ··· 1778 1825 } 1779 1826 1780 1827 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1781 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1828 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 1782 1829 1783 1830 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1784 1831 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); ··· 1790 1837 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 1791 1838 } 1792 1839 1793 - /** 1840 + /* 1794 1841 * RPRT : Register Port 1795 1842 */ 1796 1843 static void ··· 1814 1861 fdmi->fcxp = fcxp; 1815 1862 1816 1863 pyld = bfa_fcxp_get_reqbuf(fcxp); 1817 - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); 1864 + memset(pyld, 0, FC_MAX_PDUSZ); 1818 1865 1819 1866 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 1820 1867 FDMI_RPRT); ··· 1832 1879 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); 1833 1880 } 1834 1881 1835 - /** 1882 + /* 1836 1883 * This routine builds Port Attribute Block that used in RPA, RPRT commands. 1837 1884 */ 1838 1885 static u16 ··· 1862 1909 * FC4 Types 1863 1910 */ 1864 1911 attr = (struct fdmi_attr_s *) curr_ptr; 1865 - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES); 1912 + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); 1866 1913 attr->len = sizeof(fcs_port_attr.supp_fc4_types); 1867 1914 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); 1868 1915 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1869 1916 len += attr->len; 1870 1917 ++count; 1871 1918 attr->len = 1872 - bfa_os_htons(attr->len + sizeof(attr->type) + 1919 + cpu_to_be16(attr->len + sizeof(attr->type) + 1873 1920 sizeof(attr->len)); 1874 1921 1875 1922 /* 1876 1923 * Supported Speed 1877 1924 */ 1878 1925 attr = (struct fdmi_attr_s *) curr_ptr; 1879 - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED); 1926 + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); 1880 1927 attr->len = sizeof(fcs_port_attr.supp_speed); 1881 1928 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); 1882 1929 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1883 1930 len += attr->len; 1884 1931 ++count; 1885 1932 attr->len = 1886 - bfa_os_htons(attr->len + sizeof(attr->type) + 1933 + cpu_to_be16(attr->len + sizeof(attr->type) + 1887 1934 sizeof(attr->len)); 1888 1935 1889 1936 /* 1890 1937 * current Port Speed 1891 1938 */ 1892 1939 attr = (struct fdmi_attr_s *) curr_ptr; 1893 - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED); 1940 + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); 1894 1941 attr->len = sizeof(fcs_port_attr.curr_speed); 1895 1942 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); 1896 1943 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1897 1944 len += attr->len; 1898 1945 ++count; 1899 - attr->len = 1900 - bfa_os_htons(attr->len + sizeof(attr->type) + 1946 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1901 1947 sizeof(attr->len)); 1902 1948 1903 1949 /* 1904 1950 * max frame size 1905 1951 */ 1906 1952 attr = (struct fdmi_attr_s *) curr_ptr; 1907 - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE); 1953 + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); 1908 1954 attr->len = sizeof(fcs_port_attr.max_frm_size); 1909 1955 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); 1910 1956 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1911 1957 len += attr->len; 1912 1958 ++count; 1913 - attr->len = 1914 - bfa_os_htons(attr->len + sizeof(attr->type) + 1959 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1915 1960 sizeof(attr->len)); 1916 1961 1917 1962 /* ··· 1917 1966 */ 1918 1967 if (fcs_port_attr.os_device_name[0] != '\0') { 1919 1968 attr = (struct fdmi_attr_s *) curr_ptr; 1920 - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME); 1969 + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); 1921 1970 attr->len = (u16) strlen(fcs_port_attr.os_device_name); 1922 1971 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); 1923 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1924 - *fields need 1925 - *to be 4 byte 1926 - *aligned */ 1972 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1927 1973 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1928 1974 len += attr->len; 1929 1975 ++count; 1930 - attr->len = 1931 - bfa_os_htons(attr->len + sizeof(attr->type) + 1976 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1932 1977 sizeof(attr->len)); 1933 1978 } 1934 1979 /* ··· 1932 1985 */ 1933 1986 if (fcs_port_attr.host_name[0] != '\0') { 1934 1987 attr = (struct fdmi_attr_s *) curr_ptr; 1935 - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME); 1988 + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); 1936 1989 attr->len = (u16) strlen(fcs_port_attr.host_name); 1937 1990 memcpy(attr->value, fcs_port_attr.host_name, attr->len); 1938 - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1939 - *fields need 1940 - *to be 4 byte 1941 - *aligned */ 1991 + attr->len = fc_roundup(attr->len, sizeof(u32)); 1942 1992 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1943 1993 len += attr->len; 1944 1994 ++count; 1945 - attr->len = 1946 - bfa_os_htons(attr->len + sizeof(attr->type) + 1995 + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + 1947 1996 sizeof(attr->len)); 1948 1997 } 1949 1998 1950 1999 /* 1951 2000 * Update size of payload 1952 2001 */ 1953 - port_attrib->attr_count = bfa_os_htonl(count); 1954 - len += ((sizeof(attr->type) + 1955 - sizeof(attr->len)) * count); 2002 + port_attrib->attr_count = cpu_to_be32(count); 2003 + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); 1956 2004 return len; 1957 2005 } 1958 2006 ··· 1992 2050 } 1993 2051 1994 2052 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1995 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2053 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 1996 2054 1997 2055 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1998 2056 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); ··· 2004 2062 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 2005 2063 } 2006 2064 2007 - /** 2065 + /* 2008 2066 * RPA : Register Port Attributes. 2009 2067 */ 2010 2068 static void ··· 2028 2086 fdmi->fcxp = fcxp; 2029 2087 2030 2088 pyld = bfa_fcxp_get_reqbuf(fcxp); 2031 - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); 2089 + memset(pyld, 0, FC_MAX_PDUSZ); 2032 2090 2033 2091 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 2034 2092 FDMI_RPA); 2035 2093 2036 - attr_len = 2037 - bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, 2038 - (u8 *) ((struct ct_hdr_s *) pyld 2039 - + 1)); 2094 + attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, 2095 + (u8 *) ((struct ct_hdr_s *) pyld + 1)); 2040 2096 2041 2097 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2042 2098 FC_CLASS_3, len + attr_len, &fchs, ··· 2083 2143 } 2084 2144 2085 2145 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 2086 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2146 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 2087 2147 2088 2148 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 2089 2149 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); ··· 2110 2170 struct bfa_fcs_lport_s *port = fdmi->ms->port; 2111 2171 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 2112 2172 2113 - bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 2173 + memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 2114 2174 2115 2175 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, 2116 2176 hba_attr->manufacturer); ··· 2144 2204 sizeof(driver_info->host_os_patch)); 2145 2205 } 2146 2206 2147 - hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ); 2207 + hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); 2148 2208 } 2149 2209 2150 2210 void ··· 2155 2215 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 2156 2216 struct bfa_port_attr_s pport_attr; 2157 2217 2158 - bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); 2218 + memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); 2159 2219 2160 2220 /* 2161 2221 * get pport attributes from hal ··· 2170 2230 /* 2171 2231 * Supported Speeds 2172 2232 */ 2173 - port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS); 2233 + port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS); 2174 2234 2175 2235 /* 2176 2236 * Current Speed 2177 2237 */ 2178 - port_attr->curr_speed = bfa_os_htonl(pport_attr.speed); 2238 + port_attr->curr_speed = cpu_to_be32(pport_attr.speed); 2179 2239 2180 2240 /* 2181 2241 * Max PDU Size. 2182 2242 */ 2183 - port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ); 2243 + port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ); 2184 2244 2185 2245 /* 2186 2246 * OS device Name ··· 2261 2321 u32 rsp_len, 2262 2322 u32 resid_len, 2263 2323 struct fchs_s *rsp_fchs); 2264 - /** 2324 + /* 2265 2325 * fcs_ms_sm FCS MS state machine 2266 2326 */ 2267 2327 2268 - /** 2328 + /* 2269 2329 * MS State Machine events 2270 2330 */ 2271 2331 enum port_ms_event { ··· 2300 2360 enum port_ms_event event); 2301 2361 static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, 2302 2362 enum port_ms_event event); 2303 - /** 2363 + /* 2304 2364 * Start in offline state - awaiting NS to send start. 2305 2365 */ 2306 2366 static void ··· 2372 2432 */ 2373 2433 bfa_fcs_lport_fdmi_online(ms); 2374 2434 2375 - /** 2435 + /* 2376 2436 * if this is a Vport, go to online state. 2377 2437 */ 2378 2438 if (ms->port->vport) { ··· 2535 2595 bfa_sm_fault(ms->port->fcs, event); 2536 2596 } 2537 2597 } 2538 - /** 2598 + /* 2539 2599 * ms_pvt MS local functions 2540 2600 */ 2541 2601 ··· 2597 2657 } 2598 2658 2599 2659 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 2600 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2660 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 2601 2661 2602 2662 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 2603 2663 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); 2604 2664 2605 - num_entries = bfa_os_ntohl(gmal_resp->ms_len); 2665 + num_entries = be32_to_cpu(gmal_resp->ms_len); 2606 2666 if (num_entries == 0) { 2607 2667 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2608 2668 return; ··· 2735 2795 bfa_sm_fault(ms->port->fcs, event); 2736 2796 } 2737 2797 } 2738 - /** 2798 + /* 2739 2799 * ms_pvt MS local functions 2740 2800 */ 2741 2801 ··· 2793 2853 } 2794 2854 2795 2855 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 2796 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2856 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 2797 2857 2798 2858 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 2799 2859 gfn_resp = (wwn_t *)(cthdr + 1); ··· 2811 2871 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2812 2872 } 2813 2873 2814 - /** 2874 + /* 2815 2875 * ms_pvt MS local functions 2816 2876 */ 2817 2877 ··· 2957 3017 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); 2958 3018 } 2959 3019 2960 - /** 3020 + /* 2961 3021 * @page ns_sm_info VPORT NS State Machine 2962 3022 * 2963 3023 * @section ns_sm_interactions VPORT NS State Machine Interactions ··· 3020 3080 u32 *pid_buf, u32 n_pids); 3021 3081 3022 3082 static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); 3023 - /** 3083 + /* 3024 3084 * fcs_ns_sm FCS nameserver interface state machine 3025 3085 */ 3026 3086 3027 - /** 3087 + /* 3028 3088 * VPort NS State Machine events 3029 3089 */ 3030 3090 enum vport_ns_event { ··· 3079 3139 enum vport_ns_event event); 3080 3140 static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, 3081 3141 enum vport_ns_event event); 3082 - /** 3142 + /* 3083 3143 * Start in offline state - awaiting linkup 3084 3144 */ 3085 3145 static void ··· 3568 3628 3569 3629 3570 3630 3571 - /** 3631 + /* 3572 3632 * ns_pvt Nameserver local functions 3573 3633 */ 3574 3634 ··· 3664 3724 } 3665 3725 } 3666 3726 3667 - /** 3727 + /* 3668 3728 * Register the symbolic port name. 3669 3729 */ 3670 3730 static void ··· 3678 3738 u8 symbl[256]; 3679 3739 u8 *psymbl = &symbl[0]; 3680 3740 3681 - bfa_os_memset(symbl, 0, sizeof(symbl)); 3741 + memset(symbl, 0, sizeof(symbl)); 3682 3742 3683 3743 bfa_trc(port->fcs, port->port_cfg.pwwn); 3684 3744 ··· 3695 3755 * for V-Port, form a Port Symbolic Name 3696 3756 */ 3697 3757 if (port->vport) { 3698 - /** 3758 + /* 3699 3759 * For Vports, we append the vport's port symbolic name 3700 3760 * to that of the base port. 3701 3761 */ ··· 3755 3815 } 3756 3816 3757 3817 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 3758 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 3818 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 3759 3819 3760 3820 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 3761 3821 port->stats.ns_rspnid_accepts++; ··· 3769 3829 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3770 3830 } 3771 3831 3772 - /** 3832 + /* 3773 3833 * Register FC4-Types 3774 3834 */ 3775 3835 static void ··· 3827 3887 } 3828 3888 3829 3889 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 3830 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 3890 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 3831 3891 3832 3892 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 3833 3893 port->stats.ns_rftid_accepts++; ··· 3841 3901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3842 3902 } 3843 3903 3844 - /** 3904 + /* 3845 3905 * Register FC4-Features : Should be done after RFT_ID 3846 3906 */ 3847 3907 static void ··· 3904 3964 } 3905 3965 3906 3966 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 3907 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 3967 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 3908 3968 3909 3969 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 3910 3970 port->stats.ns_rffid_accepts++; ··· 3922 3982 } else 3923 3983 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3924 3984 } 3925 - /** 3985 + /* 3926 3986 * Query Fabric for FC4-Types Devices. 3927 3987 * 3928 3988 * TBD : Need to use a local (FCS private) response buffer, since the response ··· 3998 4058 } 3999 4059 4000 4060 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 4001 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 4061 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 4002 4062 4003 4063 switch (cthdr->cmd_rsp_code) { 4004 4064 ··· 4042 4102 } 4043 4103 } 4044 4104 4045 - /** 4105 + /* 4046 4106 * This routine will be called by bfa_timer on timer timeouts. 4047 4107 * 4048 4108 * param[in] port - pointer to bfa_fcs_lport_t. ··· 4106 4166 } 4107 4167 } 4108 4168 4109 - /** 4169 + /* 4110 4170 * fcs_ns_public FCS nameserver public interfaces 4111 4171 */ 4112 4172 ··· 4167 4227 } 4168 4228 } 4169 4229 4170 - /** 4230 + /* 4171 4231 * FCS SCN 4172 4232 */ 4173 4233 ··· 4190 4250 struct fchs_s *rx_fchs); 4191 4251 static void bfa_fcs_lport_scn_timeout(void *arg); 4192 4252 4193 - /** 4253 + /* 4194 4254 * fcs_scm_sm FCS SCN state machine 4195 4255 */ 4196 4256 4197 - /** 4257 + /* 4198 4258 * VPort SCN State Machine events 4199 4259 */ 4200 4260 enum port_scn_event { ··· 4218 4278 static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, 4219 4279 enum port_scn_event event); 4220 4280 4221 - /** 4281 + /* 4222 4282 * Starting state - awaiting link up. 4223 4283 */ 4224 4284 static void ··· 4322 4382 4323 4383 4324 4384 4325 - /** 4385 + /* 4326 4386 * fcs_scn_private FCS SCN private functions 4327 4387 */ 4328 4388 4329 - /** 4389 + /* 4330 4390 * This routine will be called to send a SCR command. 4331 4391 */ 4332 4392 static void ··· 4439 4499 FC_MAX_PDUSZ, 0); 4440 4500 } 4441 4501 4442 - /** 4502 + /* 4443 4503 * This routine will be called by bfa_timer on timer timeouts. 4444 4504 * 4445 4505 * param[in] vport - pointer to bfa_fcs_lport_t. ··· 4462 4522 4463 4523 4464 4524 4465 - /** 4525 + /* 4466 4526 * fcs_scn_public FCS state change notification public interfaces 4467 4527 */ 4468 4528 ··· 4503 4563 4504 4564 bfa_trc(port->fcs, rpid); 4505 4565 4506 - /** 4566 + /* 4507 4567 * If this is an unknown device, then it just came online. 4508 4568 * Otherwise let rport handle the RSCN event. 4509 4569 */ ··· 4519 4579 bfa_fcs_rport_scn(rport); 4520 4580 } 4521 4581 4522 - /** 4582 + /* 4523 4583 * rscn format based PID comparison 4524 4584 */ 4525 4585 #define __fc_pid_match(__c0, __c1, __fmt) \ ··· 4564 4624 int i = 0, j; 4565 4625 4566 4626 num_entries = 4567 - (bfa_os_ntohs(rscn->payldlen) - 4627 + (be16_to_cpu(rscn->payldlen) - 4568 4628 sizeof(u32)) / sizeof(rscn->event[0]); 4569 4629 4570 4630 bfa_trc(port->fcs, num_entries); ··· 4631 4691 } 4632 4692 } 4633 4693 4634 - /** 4635 - * If any of area, domain or fabric RSCN is received, do a fresh discovery 4636 - * to find new devices. 4694 + /* 4695 + * If any of area, domain or fabric RSCN is received, do a fresh 4696 + * discovery to find new devices. 4637 4697 */ 4638 4698 if (nsquery) 4639 4699 bfa_fcs_lport_ns_query(port); 4640 4700 } 4641 4701 4642 - /** 4702 + /* 4643 4703 * BFA FCS port 4644 4704 */ 4645 - /** 4705 + /* 4646 4706 * fcs_port_api BFA FCS port API 4647 4707 */ 4648 4708 struct bfa_fcs_lport_s * ··· 4883 4943 void 4884 4944 bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) 4885 4945 { 4886 - bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); 4946 + memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); 4887 4947 } 4888 4948 4889 - /** 4949 + /* 4890 4950 * FCS virtual port state machine 4891 4951 */ 4892 4952 ··· 4907 4967 static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); 4908 4968 static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); 4909 4969 4910 - /** 4970 + /* 4911 4971 * fcs_vport_sm FCS virtual port state machine 4912 4972 */ 4913 4973 4914 - /** 4974 + /* 4915 4975 * VPort State Machine events 4916 4976 */ 4917 4977 enum bfa_fcs_vport_event { ··· 4964 5024 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} 4965 5025 }; 4966 5026 4967 - /** 5027 + /* 4968 5028 * Beginning state. 4969 5029 */ 4970 5030 static void ··· 4985 5045 } 4986 5046 } 4987 5047 4988 - /** 5048 + /* 4989 5049 * Created state - a start event is required to start up the state machine. 4990 5050 */ 4991 5051 static void ··· 5002 5062 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); 5003 5063 bfa_fcs_vport_do_fdisc(vport); 5004 5064 } else { 5005 - /** 5065 + /* 5006 5066 * Fabric is offline or not NPIV capable, stay in 5007 5067 * offline state. 5008 5068 */ ··· 5018 5078 5019 5079 case BFA_FCS_VPORT_SM_ONLINE: 5020 5080 case BFA_FCS_VPORT_SM_OFFLINE: 5021 - /** 5081 + /* 5022 5082 * Ignore ONLINE/OFFLINE events from fabric 5023 5083 * till vport is started. 5024 5084 */ ··· 5029 5089 } 5030 5090 } 5031 5091 5032 - /** 5092 + /* 5033 5093 * Offline state - awaiting ONLINE event from fabric SM. 5034 5094 */ 5035 5095 static void ··· 5067 5127 } 5068 5128 5069 5129 5070 - /** 5130 + /* 5071 5131 * FDISC is sent and awaiting reply from fabric. 5072 5132 */ 5073 5133 static void ··· 5114 5174 } 5115 5175 } 5116 5176 5117 - /** 5177 + /* 5118 5178 * FDISC attempt failed - a timer is active to retry FDISC. 5119 5179 */ 5120 5180 static void ··· 5148 5208 } 5149 5209 } 5150 5210 5151 - /** 5211 + /* 5152 5212 * Vport is online (FDISC is complete). 5153 5213 */ 5154 5214 static void ··· 5175 5235 } 5176 5236 } 5177 5237 5178 - /** 5238 + /* 5179 5239 * Vport is being deleted - awaiting lport delete completion to send 5180 5240 * LOGO to fabric. 5181 5241 */ ··· 5204 5264 } 5205 5265 } 5206 5266 5207 - /** 5267 + /* 5208 5268 * Error State. 5209 5269 * This state will be set when the Vport Creation fails due 5210 5270 * to errors like Dup WWN. In this state only operation allowed ··· 5228 5288 } 5229 5289 } 5230 5290 5231 - /** 5291 + /* 5232 5292 * Lport cleanup is in progress since vport is being deleted. Fabric is 5233 5293 * offline, so no LOGO is needed to complete vport deletion. 5234 5294 */ ··· 5253 5313 } 5254 5314 } 5255 5315 5256 - /** 5316 + /* 5257 5317 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup 5258 5318 * is done. 5259 5319 */ ··· 5287 5347 5288 5348 5289 5349 5290 - /** 5350 + /* 5291 5351 * fcs_vport_private FCS virtual port private functions 5292 5352 */ 5293 - /** 5353 + /* 5294 5354 * This routine will be called to send a FDISC command. 5295 5355 */ 5296 5356 static void ··· 5337 5397 } 5338 5398 } 5339 5399 5340 - /** 5400 + /* 5341 5401 * Called to send a logout to the fabric. Used when a V-Port is 5342 5402 * deleted/stopped. 5343 5403 */ ··· 5351 5411 } 5352 5412 5353 5413 5354 - /** 5414 + /* 5355 5415 * This routine will be called by bfa_timer on timer timeouts. 5356 5416 * 5357 5417 * param[in] vport - pointer to bfa_fcs_vport_t. ··· 5389 5449 5390 5450 5391 5451 5392 - /** 5452 + /* 5393 5453 * fcs_vport_public FCS virtual port public interfaces 5394 5454 */ 5395 5455 5396 - /** 5456 + /* 5397 5457 * Online notification from fabric SM. 5398 5458 */ 5399 5459 void ··· 5403 5463 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 5404 5464 } 5405 5465 5406 - /** 5466 + /* 5407 5467 * Offline notification from fabric SM. 5408 5468 */ 5409 5469 void ··· 5413 5473 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); 5414 5474 } 5415 5475 5416 - /** 5476 + /* 5417 5477 * Cleanup notification from fabric SM on link timer expiry. 5418 5478 */ 5419 5479 void ··· 5421 5481 { 5422 5482 vport->vport_stats.fab_cleanup++; 5423 5483 } 5424 - /** 5484 + /* 5425 5485 * delete notification from fabric SM. To be invoked from within FCS. 5426 5486 */ 5427 5487 void ··· 5430 5490 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); 5431 5491 } 5432 5492 5433 - /** 5493 + /* 5434 5494 * Delete completion callback from associated lport 5435 5495 */ 5436 5496 void ··· 5441 5501 5442 5502 5443 5503 5444 - /** 5504 + /* 5445 5505 * fcs_vport_api Virtual port API 5446 5506 */ 5447 5507 5448 - /** 5508 + /* 5449 5509 * Use this function to instantiate a new FCS vport object. This 5450 5510 * function will not trigger any HW initialization process (which will be 5451 5511 * done in vport_start() call) ··· 5495 5555 return BFA_STATUS_OK; 5496 5556 } 5497 5557 5498 - /** 5558 + /* 5499 5559 * Use this function to instantiate a new FCS PBC vport object. This 5500 5560 * function will not trigger any HW initialization process (which will be 5501 5561 * done in vport_start() call) ··· 5525 5585 return rc; 5526 5586 } 5527 5587 5528 - /** 5588 + /* 5529 5589 * Use this function to findout if this is a pbc vport or not. 5530 5590 * 5531 5591 * @param[in] vport - pointer to bfa_fcs_vport_t. ··· 5543 5603 5544 5604 } 5545 5605 5546 - /** 5606 + /* 5547 5607 * Use this function initialize the vport. 5548 5608 * 5549 5609 * @param[in] vport - pointer to bfa_fcs_vport_t. ··· 5558 5618 return BFA_STATUS_OK; 5559 5619 } 5560 5620 5561 - /** 5621 + /* 5562 5622 * Use this function quiese the vport object. This function will return 5563 5623 * immediately, when the vport is actually stopped, the 5564 5624 * bfa_drv_vport_stop_cb() will be called. ··· 5575 5635 return BFA_STATUS_OK; 5576 5636 } 5577 5637 5578 - /** 5638 + /* 5579 5639 * Use this function to delete a vport object. Fabric object should 5580 5640 * be stopped before this function call. 5581 5641 * ··· 5597 5657 return BFA_STATUS_OK; 5598 5658 } 5599 5659 5600 - /** 5660 + /* 5601 5661 * Use this function to get vport's current status info. 5602 5662 * 5603 5663 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5612 5672 if (vport == NULL || attr == NULL) 5613 5673 return; 5614 5674 5615 - bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s)); 5675 + memset(attr, 0, sizeof(struct bfa_vport_attr_s)); 5616 5676 5617 5677 bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); 5618 5678 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); 5619 5679 } 5620 5680 5621 - /** 5681 + /* 5622 5682 * Use this function to get vport's statistics. 5623 5683 * 5624 5684 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5633 5693 *stats = vport->vport_stats; 5634 5694 } 5635 5695 5636 - /** 5696 + /* 5637 5697 * Use this function to clear vport's statistics. 5638 5698 * 5639 5699 * param[in] vport pointer to bfa_fcs_vport_t. ··· 5643 5703 void 5644 5704 bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport) 5645 5705 { 5646 - bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 5706 + memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 5647 5707 } 5648 5708 5649 - /** 5709 + /* 5650 5710 * Lookup a virtual port. Excludes base port from lookup. 5651 5711 */ 5652 5712 struct bfa_fcs_vport_s * ··· 5668 5728 return vport; 5669 5729 } 5670 5730 5671 - /** 5731 + /* 5672 5732 * FDISC Response 5673 5733 */ 5674 5734 void ··· 5724 5784 } 5725 5785 } 5726 5786 5727 - /** 5787 + /* 5728 5788 * LOGO response 5729 5789 */ 5730 5790 void ··· 5734 5794 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 5735 5795 } 5736 5796 5737 - /** 5797 + /* 5738 5798 * Received clear virtual link 5739 5799 */ 5740 5800 void
+99 -99
drivers/scsi/bfa/bfa_fcs_rport.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * rport.c Remote port implementation. 20 20 */ 21 21 ··· 75 75 static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 76 76 struct fchs_s *rx_fchs, u16 len); 77 77 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 78 - /** 78 + /* 79 79 * fcs_rport_sm FCS rport state machine events 80 80 */ 81 81 ··· 172 172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, 173 173 }; 174 174 175 - /** 175 + /* 176 176 * Beginning state. 177 177 */ 178 178 static void ··· 210 210 } 211 211 } 212 212 213 - /** 213 + /* 214 214 * PLOGI is being sent. 215 215 */ 216 216 static void ··· 262 262 } 263 263 } 264 264 265 - /** 265 + /* 266 266 * PLOGI is being sent. 267 267 */ 268 268 static void ··· 287 287 288 288 case RPSM_EVENT_PLOGI_RCVD: 289 289 case RPSM_EVENT_SCN: 290 - /** 290 + /* 291 291 * Ignore, SCN is possibly online notification. 292 292 */ 293 293 break; ··· 309 309 break; 310 310 311 311 case RPSM_EVENT_HCB_OFFLINE: 312 - /** 312 + /* 313 313 * Ignore BFA callback, on a PLOGI receive we call bfa offline. 314 314 */ 315 315 break; ··· 319 319 } 320 320 } 321 321 322 - /** 322 + /* 323 323 * PLOGI is sent. 324 324 */ 325 325 static void ··· 380 380 } 381 381 } 382 382 383 - /** 383 + /* 384 384 * PLOGI is sent. 385 385 */ 386 386 static void ··· 475 475 } 476 476 } 477 477 478 - /** 478 + /* 479 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 480 480 * are offline. 481 481 */ ··· 519 519 break; 520 520 521 521 case RPSM_EVENT_SCN: 522 - /** 522 + /* 523 523 * @todo 524 524 * Ignore SCN - PLOGI just completed, FC-4 login should detect 525 525 * device failures. ··· 531 531 } 532 532 } 533 533 534 - /** 534 + /* 535 535 * Rport is ONLINE. FC-4s active. 536 536 */ 537 537 static void ··· 580 580 } 581 581 } 582 582 583 - /** 583 + /* 584 584 * An SCN event is received in ONLINE state. NS query is being sent 585 585 * prior to ADISC authentication with rport. FC-4s are paused. 586 586 */ ··· 604 604 break; 605 605 606 606 case RPSM_EVENT_SCN: 607 - /** 607 + /* 608 608 * ignore SCN, wait for response to query itself 609 609 */ 610 610 break; ··· 638 638 } 639 639 } 640 640 641 - /** 641 + /* 642 642 * An SCN event is received in ONLINE state. NS query is sent to rport. 643 643 * FC-4s are paused. 644 644 */ ··· 697 697 } 698 698 } 699 699 700 - /** 700 + /* 701 701 * An SCN event is received in ONLINE state. ADISC is being sent for 702 702 * authenticating with rport. FC-4s are paused. 703 703 */ ··· 748 748 } 749 749 } 750 750 751 - /** 751 + /* 752 752 * An SCN event is received in ONLINE state. ADISC is to rport. 753 753 * FC-4s are paused. 754 754 */ ··· 765 765 break; 766 766 767 767 case RPSM_EVENT_PLOGI_RCVD: 768 - /** 768 + /* 769 769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI. 770 770 * At least go offline when a PLOGI is received. 771 771 */ ··· 787 787 break; 788 788 789 789 case RPSM_EVENT_SCN: 790 - /** 790 + /* 791 791 * already processing RSCN 792 792 */ 793 793 break; ··· 810 810 } 811 811 } 812 812 813 - /** 813 + /* 814 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 815 815 */ 816 816 static void ··· 841 841 } 842 842 } 843 843 844 - /** 844 + /* 845 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 846 846 * callback. 847 847 */ ··· 864 864 } 865 865 } 866 866 867 - /** 867 + /* 868 868 * Rport is going offline. Awaiting FC-4 offline completion callback. 869 869 */ 870 870 static void ··· 886 886 case RPSM_EVENT_LOGO_RCVD: 887 887 case RPSM_EVENT_PRLO_RCVD: 888 888 case RPSM_EVENT_ADDRESS_CHANGE: 889 - /** 889 + /* 890 890 * rport is already going offline. 891 891 * SCN - ignore and wait till transitioning to offline state 892 892 */ ··· 901 901 } 902 902 } 903 903 904 - /** 904 + /* 905 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 906 906 * callback. 907 907 */ ··· 945 945 case RPSM_EVENT_SCN: 946 946 case RPSM_EVENT_LOGO_RCVD: 947 947 case RPSM_EVENT_PRLO_RCVD: 948 - /** 948 + /* 949 949 * Ignore, already offline. 950 950 */ 951 951 break; ··· 955 955 } 956 956 } 957 957 958 - /** 958 + /* 959 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 960 960 * callback to send LOGO accept. 961 961 */ ··· 1009 1009 1010 1010 case RPSM_EVENT_LOGO_RCVD: 1011 1011 case RPSM_EVENT_PRLO_RCVD: 1012 - /** 1012 + /* 1013 1013 * Ignore - already processing a LOGO. 1014 1014 */ 1015 1015 break; ··· 1019 1019 } 1020 1020 } 1021 1021 1022 - /** 1022 + /* 1023 1023 * Rport is being deleted. FC-4s are offline. 1024 1024 * Awaiting BFA rport offline 1025 1025 * callback to send LOGO. ··· 1048 1048 } 1049 1049 } 1050 1050 1051 - /** 1051 + /* 1052 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1053 1053 */ 1054 1054 static void ··· 1082 1082 } 1083 1083 } 1084 1084 1085 - /** 1085 + /* 1086 1086 * Rport is offline. FC-4s are offline. BFA rport is offline. 1087 1087 * Timer active to delete stale rport. 1088 1088 */ ··· 1142 1142 } 1143 1143 } 1144 1144 1145 - /** 1145 + /* 1146 1146 * Rport address has changed. Nameserver discovery request is being sent. 1147 1147 */ 1148 1148 static void ··· 1199 1199 } 1200 1200 } 1201 1201 1202 - /** 1202 + /* 1203 1203 * Nameserver discovery failed. Waiting for timeout to retry. 1204 1204 */ 1205 1205 static void ··· 1263 1263 } 1264 1264 } 1265 1265 1266 - /** 1266 + /* 1267 1267 * Rport address has changed. Nameserver discovery request is sent. 1268 1268 */ 1269 1269 static void ··· 1329 1329 bfa_fcs_rport_send_prlo_acc(rport); 1330 1330 break; 1331 1331 case RPSM_EVENT_SCN: 1332 - /** 1332 + /* 1333 1333 * ignore, wait for NS query response 1334 1334 */ 1335 1335 break; 1336 1336 1337 1337 case RPSM_EVENT_LOGO_RCVD: 1338 - /** 1338 + /* 1339 1339 * Not logged-in yet. Accept LOGO. 1340 1340 */ 1341 1341 bfa_fcs_rport_send_logo_acc(rport); ··· 1354 1354 1355 1355 1356 1356 1357 - /** 1357 + /* 1358 1358 * fcs_rport_private FCS RPORT provate functions 1359 1359 */ 1360 1360 ··· 1415 1415 1416 1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); 1417 1417 1418 - /** 1418 + /* 1419 1419 * Check for failure first. 1420 1420 */ 1421 1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { ··· 1436 1436 return; 1437 1437 } 1438 1438 1439 - /** 1439 + /* 1440 1440 * PLOGI is complete. Make sure this device is not one of the known 1441 1441 * device with a new FC port address. 1442 1442 */ ··· 1468 1468 } 1469 1469 } 1470 1470 1471 - /** 1471 + /* 1472 1472 * Normal login path -- no evil twins. 1473 1473 */ 1474 1474 rport->stats.plogi_accs++; ··· 1621 1621 bfa_trc(rport->fcs, rport->pwwn); 1622 1622 1623 1623 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1624 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1624 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 1625 1625 1626 1626 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1627 1627 /* Check if the pid is the same as before. */ ··· 1691 1691 bfa_trc(rport->fcs, rport->pwwn); 1692 1692 1693 1693 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1694 - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1694 + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); 1695 1695 1696 1696 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1697 1697 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); ··· 1722 1722 } 1723 1723 } 1724 1724 1725 - /** 1725 + /* 1726 1726 * Called to send a logout to the rport. 1727 1727 */ 1728 1728 static void ··· 1759 1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1760 1760 } 1761 1761 1762 - /** 1762 + /* 1763 1763 * Send ACC for a LOGO received. 1764 1764 */ 1765 1765 static void ··· 1788 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1789 1789 } 1790 1790 1791 - /** 1791 + /* 1792 1792 * brief 1793 1793 * This routine will be called by bfa_timer on timer timeouts. 1794 1794 * ··· 1961 1961 struct bfa_fcs_rport_s *rport; 1962 1962 struct bfad_rport_s *rport_drv; 1963 1963 1964 - /** 1964 + /* 1965 1965 * allocate rport 1966 1966 */ 1967 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) ··· 1979 1979 rport->pid = rpid; 1980 1980 rport->pwwn = pwwn; 1981 1981 1982 - /** 1982 + /* 1983 1983 * allocate BFA rport 1984 1984 */ 1985 1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); ··· 1989 1989 return NULL; 1990 1990 } 1991 1991 1992 - /** 1992 + /* 1993 1993 * allocate FC-4s 1994 1994 */ 1995 1995 bfa_assert(bfa_fcs_lport_is_initiator(port)); ··· 2021 2021 { 2022 2022 struct bfa_fcs_lport_s *port = rport->port; 2023 2023 2024 - /** 2024 + /* 2025 2025 * - delete FC-4s 2026 2026 * - delete BFA rport 2027 2027 * - remove from queue of rports ··· 2093 2093 } 2094 2094 } 2095 2095 2096 - /** 2096 + /* 2097 2097 * Update rport parameters from PLOGI or PLOGI accept. 2098 2098 */ 2099 2099 static void ··· 2101 2101 { 2102 2102 bfa_fcs_lport_t *port = rport->port; 2103 2103 2104 - /** 2104 + /* 2105 2105 * - port name 2106 2106 * - node name 2107 2107 */ 2108 2108 rport->pwwn = plogi->port_name; 2109 2109 rport->nwwn = plogi->node_name; 2110 2110 2111 - /** 2111 + /* 2112 2112 * - class of service 2113 2113 */ 2114 2114 rport->fc_cos = 0; ··· 2118 2118 if (plogi->class2.class_valid) 2119 2119 rport->fc_cos |= FC_CLASS_2; 2120 2120 2121 - /** 2121 + /* 2122 2122 * - CISC 2123 2123 * - MAX receive frame size 2124 2124 */ 2125 2125 rport->cisc = plogi->csp.cisc; 2126 - rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz); 2126 + rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); 2127 2127 2128 - bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); 2128 + bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); 2129 2129 bfa_trc(port->fcs, port->fabric->bb_credit); 2130 - /** 2130 + /* 2131 2131 * Direct Attach P2P mode : 2132 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach 2133 2133 * Mode. Basically, in FLOGI Accept the target would have ··· 2136 2136 * in PLOGI. 2137 2137 */ 2138 2138 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && 2139 - (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { 2139 + (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) { 2140 2140 2141 - bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); 2141 + bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); 2142 2142 bfa_trc(port->fcs, port->fabric->bb_credit); 2143 2143 2144 - port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); 2144 + port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); 2145 2145 bfa_fcport_set_tx_bbcredit(port->fcs->bfa, 2146 2146 port->fabric->bb_credit); 2147 2147 } 2148 2148 2149 2149 } 2150 2150 2151 - /** 2151 + /* 2152 2152 * Called to handle LOGO received from an existing remote port. 2153 2153 */ 2154 2154 static void ··· 2164 2164 2165 2165 2166 2166 2167 - /** 2167 + /* 2168 2168 * fcs_rport_public FCS rport public interfaces 2169 2169 */ 2170 2170 2171 - /** 2171 + /* 2172 2172 * Called by bport/vport to create a remote port instance for a discovered 2173 2173 * remote device. 2174 2174 * ··· 2191 2191 return rport; 2192 2192 } 2193 2193 2194 - /** 2194 + /* 2195 2195 * Called to create a rport for which only the wwn is known. 2196 2196 * 2197 2197 * @param[in] port - base port ··· 2211 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2212 2212 return rport; 2213 2213 } 2214 - /** 2214 + /* 2215 2215 * Called by bport in private loop topology to indicate that a 2216 2216 * rport has been discovered and plogi has been completed. 2217 2217 * ··· 2233 2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); 2234 2234 } 2235 2235 2236 - /** 2236 + /* 2237 2237 * Called by bport/vport to handle PLOGI received from a new remote port. 2238 2238 * If an existing rport does a plogi, it will be handled separately. 2239 2239 */ ··· 2272 2272 return 0; 2273 2273 } 2274 2274 2275 - /** 2275 + /* 2276 2276 * Called by bport/vport to handle PLOGI received from an existing 2277 2277 * remote port. 2278 2278 */ ··· 2280 2280 bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2281 2281 struct fc_logi_s *plogi) 2282 2282 { 2283 - /** 2283 + /* 2284 2284 * @todo Handle P2P and initiator-initiator. 2285 2285 */ 2286 2286 ··· 2289 2289 rport->reply_oxid = rx_fchs->ox_id; 2290 2290 bfa_trc(rport->fcs, rport->reply_oxid); 2291 2291 2292 - /** 2292 + /* 2293 2293 * In Switched fabric topology, 2294 2294 * PLOGI to each other. If our pwwn is smaller, ignore it, 2295 2295 * if it is not a well known address. ··· 2307 2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2308 2308 } 2309 2309 2310 - /** 2310 + /* 2311 2311 * Called by bport/vport to delete a remote port instance. 2312 2312 * 2313 2313 * Rport delete is called under the following conditions: ··· 2321 2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE); 2322 2322 } 2323 2323 2324 - /** 2324 + /* 2325 2325 * Called by bport/vport to when a target goes offline. 2326 2326 * 2327 2327 */ ··· 2331 2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2332 2332 } 2333 2333 2334 - /** 2334 + /* 2335 2335 * Called by bport in n2n when a target (attached port) becomes online. 2336 2336 * 2337 2337 */ ··· 2340 2340 { 2341 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2342 2342 } 2343 - /** 2343 + /* 2344 2344 * Called by bport/vport to notify SCN for the remote port 2345 2345 */ 2346 2346 void ··· 2350 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2351 2351 } 2352 2352 2353 - /** 2353 + /* 2354 2354 * Called by fcpim to notify that the ITN cleanup is done. 2355 2355 */ 2356 2356 void ··· 2359 2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2360 2360 } 2361 2361 2362 - /** 2362 + /* 2363 2363 * Called by fcptm to notify that the ITN cleanup is done. 2364 2364 */ 2365 2365 void ··· 2368 2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2369 2369 } 2370 2370 2371 - /** 2371 + /* 2372 2372 * brief 2373 2373 * This routine BFA callback for bfa_rport_online() call. 2374 2374 * ··· 2391 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2392 2392 } 2393 2393 2394 - /** 2394 + /* 2395 2395 * brief 2396 2396 * This routine BFA callback for bfa_rport_offline() call. 2397 2397 * ··· 2413 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2414 2414 } 2415 2415 2416 - /** 2416 + /* 2417 2417 * brief 2418 2418 * This routine is a static BFA callback when there is a QoS flow_id 2419 2419 * change notification ··· 2437 2437 bfa_trc(rport->fcs, rport->pwwn); 2438 2438 } 2439 2439 2440 - /** 2440 + /* 2441 2441 * brief 2442 2442 * This routine is a static BFA callback when there is a QoS priority 2443 2443 * change notification ··· 2461 2461 bfa_trc(rport->fcs, rport->pwwn); 2462 2462 } 2463 2463 2464 - /** 2464 + /* 2465 2465 * Called to process any unsolicted frames from this remote port 2466 2466 */ 2467 2467 void ··· 2470 2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2471 2471 } 2472 2472 2473 - /** 2473 + /* 2474 2474 * Called to process any unsolicted frames from this remote port 2475 2475 */ 2476 2476 void ··· 2577 2577 FC_MAX_PDUSZ, 0); 2578 2578 } 2579 2579 2580 - /** 2580 + /* 2581 2581 * Return state of rport. 2582 2582 */ 2583 2583 int ··· 2586 2586 return bfa_sm_to_state(rport_sm_table, rport->sm); 2587 2587 } 2588 2588 2589 - /** 2589 + /* 2590 2590 * brief 2591 2591 * Called by the Driver to set rport delete/ageout timeout 2592 2592 * ··· 2613 2613 2614 2614 2615 2615 2616 - /** 2616 + /* 2617 2617 * Remote port implementation. 2618 2618 */ 2619 2619 2620 - /** 2620 + /* 2621 2621 * fcs_rport_api FCS rport API. 2622 2622 */ 2623 2623 2624 - /** 2624 + /* 2625 2625 * Direct API to add a target by port wwn. This interface is used, for 2626 2626 * example, by bios when target pwwn is known from boot lun configuration. 2627 2627 */ ··· 2634 2634 return BFA_STATUS_OK; 2635 2635 } 2636 2636 2637 - /** 2637 + /* 2638 2638 * Direct API to remove a target and its associated resources. This 2639 2639 * interface is used, for example, by driver to remove target 2640 2640 * ports from the target list for a VM. ··· 2663 2663 2664 2664 } 2665 2665 2666 - /** 2666 + /* 2667 2667 * Remote device status for display/debug. 2668 2668 */ 2669 2669 void ··· 2674 2674 bfa_fcs_lport_t *port = rport->port; 2675 2675 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; 2676 2676 2677 - bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 2677 + memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 2678 2678 2679 2679 rport_attr->pid = rport->pid; 2680 2680 rport_attr->pwwn = rport->pwwn; ··· 2704 2704 } 2705 2705 } 2706 2706 2707 - /** 2707 + /* 2708 2708 * Per remote device statistics. 2709 2709 */ 2710 2710 void ··· 2717 2717 void 2718 2718 bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport) 2719 2719 { 2720 - bfa_os_memset((char *)&rport->stats, 0, 2720 + memset((char *)&rport->stats, 0, 2721 2721 sizeof(struct bfa_rport_stats_s)); 2722 2722 } 2723 2723 ··· 2767 2767 2768 2768 2769 2769 2770 - /** 2770 + /* 2771 2771 * Remote port features (RPF) implementation. 2772 2772 */ 2773 2773 ··· 2786 2786 2787 2787 static void bfa_fcs_rpf_timeout(void *arg); 2788 2788 2789 - /** 2789 + /* 2790 2790 * fcs_rport_ftrs_sm FCS rport state machine events 2791 2791 */ 2792 2792 ··· 2981 2981 bfa_sm_fault(rport->fcs, event); 2982 2982 } 2983 2983 } 2984 - /** 2984 + /* 2985 2985 * Called when Rport is created. 2986 2986 */ 2987 2987 void ··· 2995 2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); 2996 2996 } 2997 2997 2998 - /** 2998 + /* 2999 2999 * Called when Rport becomes online 3000 3000 */ 3001 3001 void ··· 3010 3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); 3011 3011 } 3012 3012 3013 - /** 3013 + /* 3014 3014 * Called when Rport becomes offline 3015 3015 */ 3016 3016 void ··· 3090 3090 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); 3091 3091 if (rpsc2_acc->els_cmd == FC_ELS_ACC) { 3092 3092 rport->stats.rpsc_accs++; 3093 - num_ents = bfa_os_ntohs(rpsc2_acc->num_pids); 3093 + num_ents = be16_to_cpu(rpsc2_acc->num_pids); 3094 3094 bfa_trc(rport->fcs, num_ents); 3095 3095 if (num_ents > 0) { 3096 3096 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); 3097 3097 bfa_trc(rport->fcs, 3098 - bfa_os_ntohs(rpsc2_acc->port_info[0].pid)); 3098 + be16_to_cpu(rpsc2_acc->port_info[0].pid)); 3099 3099 bfa_trc(rport->fcs, 3100 - bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); 3100 + be16_to_cpu(rpsc2_acc->port_info[0].speed)); 3101 3101 bfa_trc(rport->fcs, 3102 - bfa_os_ntohs(rpsc2_acc->port_info[0].index)); 3102 + be16_to_cpu(rpsc2_acc->port_info[0].index)); 3103 3103 bfa_trc(rport->fcs, 3104 3104 rpsc2_acc->port_info[0].type); 3105 3105 ··· 3109 3109 } 3110 3110 3111 3111 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( 3112 - bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); 3112 + be16_to_cpu(rpsc2_acc->port_info[0].speed)); 3113 3113 3114 3114 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); 3115 3115 }
+8 -8
drivers/scsi/bfa/bfa_hw_cb.c
··· 22 22 bfa_hwcb_reginit(struct bfa_s *bfa) 23 23 { 24 24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 25 - bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 25 + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 26 26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 27 27 28 28 if (fn == 0) { ··· 60 60 static void 61 61 bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) 62 62 { 63 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, 64 - __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq)); 63 + writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq), 64 + bfa->iocfc.bfa_regs.intr_status); 65 65 } 66 66 67 67 void ··· 72 72 static void 73 73 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) 74 74 { 75 - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, 76 - __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq)); 75 + writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), 76 + bfa->iocfc.bfa_regs.intr_status); 77 77 } 78 78 79 79 void ··· 102 102 *num_vecs = __HFN_NUMINTS; 103 103 } 104 104 105 - /** 105 + /* 106 106 * No special setup required for crossbow -- vector assignments are implicit. 107 107 */ 108 108 void ··· 129 129 bfa->msix.handler[i] = bfa_msix_lpu_err; 130 130 } 131 131 132 - /** 132 + /* 133 133 * Crossbow -- dummy, interrupts are masked 134 134 */ 135 135 void ··· 142 142 { 143 143 } 144 144 145 - /** 145 + /* 146 146 * No special enable/disable -- vector assignments are implicit. 147 147 */ 148 148 void
+11 -11
drivers/scsi/bfa/bfa_hw_ct.c
··· 31 31 bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec) 32 32 { 33 33 int fn = bfa_ioc_pcifn(&bfa->ioc); 34 - bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 34 + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 35 35 36 36 if (msix) 37 - bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec); 37 + writel(vec, kva + __ct_msix_err_vec_reg[fn]); 38 38 else 39 - bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0); 39 + writel(0, kva + __ct_msix_err_vec_reg[fn]); 40 40 } 41 41 42 - /** 42 + /* 43 43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit. 44 44 */ 45 45 static void ··· 51 51 bfa_hwct_reginit(struct bfa_s *bfa) 52 52 { 53 53 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 54 - bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 54 + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 55 55 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 56 56 57 57 if (fn == 0) { ··· 88 88 { 89 89 u32 r32; 90 90 91 - r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 92 - bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); 91 + r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 92 + writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 93 93 } 94 94 95 95 void ··· 97 97 { 98 98 u32 r32; 99 99 100 - r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 101 - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32); 100 + r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 101 + writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 102 102 } 103 103 104 104 void ··· 110 110 *num_vecs = BFA_MSIX_CT_MAX; 111 111 } 112 112 113 - /** 113 + /* 114 114 * Setup MSI-X vector for catapult 115 115 */ 116 116 void ··· 156 156 bfa->msix.handler[i] = bfa_hwct_msix_dummy; 157 157 } 158 158 159 - /** 159 + /* 160 160 * Enable MSI-X vectors 161 161 */ 162 162 void
+196 -201
drivers/scsi/bfa/bfa_ioc.c
··· 23 23 24 24 BFA_TRC_FILE(CNA, IOC); 25 25 26 - /** 26 + /* 27 27 * IOC local definitions 28 28 */ 29 29 #define BFA_IOC_TOV 3000 /* msecs */ ··· 49 49 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 50 50 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 51 51 52 - /** 52 + /* 53 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 54 54 */ 55 55 ··· 73 73 74 74 #define bfa_ioc_mbox_cmd_pending(__ioc) \ 75 75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 76 - bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd)) 76 + readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 77 77 78 78 bfa_boolean_t bfa_auto_recover = BFA_TRUE; 79 79 ··· 101 101 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); 102 102 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 103 103 104 - /** 104 + /* 105 105 * hal_ioc_sm 106 106 */ 107 107 108 - /** 108 + /* 109 109 * IOC state machine definitions/declarations 110 110 */ 111 111 enum ioc_event { ··· 144 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 145 145 }; 146 146 147 - /** 147 + /* 148 148 * IOCPF state machine definitions/declarations 149 149 */ 150 150 ··· 174 174 static void bfa_iocpf_timeout(void *ioc_arg); 175 175 static void bfa_iocpf_sem_timeout(void *ioc_arg); 176 176 177 - /** 177 + /* 178 178 * IOCPF state machine events 179 179 */ 180 180 enum iocpf_event { ··· 191 191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ 192 192 }; 193 193 194 - /** 194 + /* 195 195 * IOCPF states 196 196 */ 197 197 enum bfa_iocpf_state { ··· 232 232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 233 233 }; 234 234 235 - /** 235 + /* 236 236 * IOC State Machine 237 237 */ 238 238 239 - /** 239 + /* 240 240 * Beginning state. IOC uninit state. 241 241 */ 242 242 ··· 245 245 { 246 246 } 247 247 248 - /** 248 + /* 249 249 * IOC is in uninit state. 250 250 */ 251 251 static void ··· 262 262 bfa_sm_fault(ioc, event); 263 263 } 264 264 } 265 - /** 265 + /* 266 266 * Reset entry actions -- initialize state machine 267 267 */ 268 268 static void ··· 271 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 272 272 } 273 273 274 - /** 274 + /* 275 275 * IOC is in reset state. 276 276 */ 277 277 static void ··· 304 304 bfa_iocpf_enable(ioc); 305 305 } 306 306 307 - /** 307 + /* 308 308 * Host IOC function is being enabled, awaiting response from firmware. 309 309 * Semaphore is acquired. 310 310 */ ··· 352 352 bfa_ioc_send_getattr(ioc); 353 353 } 354 354 355 - /** 355 + /* 356 356 * IOC configuration in progress. Timer is active. 357 357 */ 358 358 static void ··· 447 447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); 448 448 } 449 449 450 - /** 450 + /* 451 451 * IOC is being disabled 452 452 */ 453 453 static void ··· 474 474 } 475 475 } 476 476 477 - /** 477 + /* 478 478 * IOC disable completion entry. 479 479 */ 480 480 static void ··· 514 514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 515 515 } 516 516 517 - /** 517 + /* 518 518 * Hardware initialization failed. 519 519 */ 520 520 static void ··· 528 528 break; 529 529 530 530 case IOC_E_FAILED: 531 - /** 531 + /* 532 532 * Initialization failure during iocpf init retry. 533 533 */ 534 534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 556 556 struct bfa_ioc_hbfail_notify_s *notify; 557 557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 558 558 559 - /** 559 + /* 560 560 * Notify driver and common modules registered for notification. 561 561 */ 562 562 ioc->cbfn->hbfail_cbfn(ioc->bfa); ··· 569 569 "Heart Beat of IOC has failed\n"); 570 570 } 571 571 572 - /** 572 + /* 573 573 * IOC failure. 574 574 */ 575 575 static void ··· 580 580 switch (event) { 581 581 582 582 case IOC_E_FAILED: 583 - /** 583 + /* 584 584 * Initialization failure during iocpf recovery. 585 585 * !!! Fall through !!! 586 586 */ ··· 608 608 609 609 610 610 611 - /** 611 + /* 612 612 * IOCPF State Machine 613 613 */ 614 614 615 615 616 - /** 616 + /* 617 617 * Reset entry actions -- initialize state machine 618 618 */ 619 619 static void ··· 623 623 iocpf->auto_recover = bfa_auto_recover; 624 624 } 625 625 626 - /** 626 + /* 627 627 * Beginning state. IOC is in reset state. 628 628 */ 629 629 static void ··· 646 646 } 647 647 } 648 648 649 - /** 649 + /* 650 650 * Semaphore should be acquired for version check. 651 651 */ 652 652 static void ··· 655 655 bfa_ioc_hw_sem_get(iocpf->ioc); 656 656 } 657 657 658 - /** 658 + /* 659 659 * Awaiting h/w semaphore to continue with version check. 660 660 */ 661 661 static void ··· 692 692 } 693 693 } 694 694 695 - /** 695 + /* 696 696 * Notify enable completion callback. 697 697 */ 698 698 static void ··· 708 708 bfa_iocpf_timer_start(iocpf->ioc); 709 709 } 710 710 711 - /** 711 + /* 712 712 * Awaiting firmware version match. 713 713 */ 714 714 static void ··· 739 739 } 740 740 } 741 741 742 - /** 742 + /* 743 743 * Request for semaphore. 744 744 */ 745 745 static void ··· 748 748 bfa_ioc_hw_sem_get(iocpf->ioc); 749 749 } 750 750 751 - /** 751 + /* 752 752 * Awaiting semaphore for h/w initialzation. 753 753 */ 754 754 static void ··· 782 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE); 783 783 } 784 784 785 - /** 785 + /* 786 786 * Hardware is being initialized. Interrupts are enabled. 787 787 * Holding hardware semaphore lock. 788 788 */ ··· 839 839 bfa_ioc_send_enable(iocpf->ioc); 840 840 } 841 841 842 - /** 842 + /* 843 843 * Host IOC function is being enabled, awaiting response from firmware. 844 844 * Semaphore is acquired. 845 845 */ ··· 866 866 case IOCPF_E_TIMEOUT: 867 867 iocpf->retry_count++; 868 868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { 869 - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, 870 - BFI_IOC_UNINIT); 869 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 871 870 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 872 871 break; 873 872 } ··· 943 944 bfa_ioc_send_disable(iocpf->ioc); 944 945 } 945 946 946 - /** 947 + /* 947 948 * IOC is being disabled 948 949 */ 949 950 static void ··· 967 968 */ 968 969 969 970 case IOCPF_E_TIMEOUT: 970 - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 971 + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 971 972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 972 973 break; 973 974 ··· 979 980 } 980 981 } 981 982 982 - /** 983 + /* 983 984 * IOC disable completion entry. 984 985 */ 985 986 static void ··· 1017 1018 bfa_iocpf_timer_start(iocpf->ioc); 1018 1019 } 1019 1020 1020 - /** 1021 + /* 1021 1022 * Hardware initialization failed. 1022 1023 */ 1023 1024 static void ··· 1052 1053 static void 1053 1054 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1054 1055 { 1055 - /** 1056 + /* 1056 1057 * Mark IOC as failed in hardware and stop firmware. 1057 1058 */ 1058 1059 bfa_ioc_lpu_stop(iocpf->ioc); 1059 - bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 1060 + writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate); 1060 1061 1061 - /** 1062 + /* 1062 1063 * Notify other functions on HB failure. 1063 1064 */ 1064 1065 bfa_ioc_notify_hbfail(iocpf->ioc); 1065 1066 1066 - /** 1067 + /* 1067 1068 * Flush any queued up mailbox requests. 1068 1069 */ 1069 1070 bfa_ioc_mbox_hbfail(iocpf->ioc); ··· 1072 1073 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1073 1074 } 1074 1075 1075 - /** 1076 + /* 1076 1077 * IOC is in failed state. 1077 1078 */ 1078 1079 static void ··· 1100 1101 1101 1102 1102 1103 1103 - /** 1104 + /* 1104 1105 * hal_ioc_pvt BFA IOC private functions 1105 1106 */ 1106 1107 ··· 1112 1113 1113 1114 ioc->cbfn->disable_cbfn(ioc->bfa); 1114 1115 1115 - /** 1116 + /* 1116 1117 * Notify common modules registered for notification. 1117 1118 */ 1118 1119 list_for_each(qe, &ioc->hb_notify_q) { ··· 1122 1123 } 1123 1124 1124 1125 bfa_boolean_t 1125 - bfa_ioc_sem_get(bfa_os_addr_t sem_reg) 1126 + bfa_ioc_sem_get(void __iomem *sem_reg) 1126 1127 { 1127 1128 u32 r32; 1128 1129 int cnt = 0; 1129 1130 #define BFA_SEM_SPINCNT 3000 1130 1131 1131 - r32 = bfa_reg_read(sem_reg); 1132 + r32 = readl(sem_reg); 1132 1133 1133 1134 while (r32 && (cnt < BFA_SEM_SPINCNT)) { 1134 1135 cnt++; 1135 - bfa_os_udelay(2); 1136 - r32 = bfa_reg_read(sem_reg); 1136 + udelay(2); 1137 + r32 = readl(sem_reg); 1137 1138 } 1138 1139 1139 1140 if (r32 == 0) ··· 1144 1145 } 1145 1146 1146 1147 void 1147 - bfa_ioc_sem_release(bfa_os_addr_t sem_reg) 1148 + bfa_ioc_sem_release(void __iomem *sem_reg) 1148 1149 { 1149 - bfa_reg_write(sem_reg, 1); 1150 + writel(1, sem_reg); 1150 1151 } 1151 1152 1152 1153 static void ··· 1154 1155 { 1155 1156 u32 r32; 1156 1157 1157 - /** 1158 + /* 1158 1159 * First read to the semaphore register will return 0, subsequent reads 1159 1160 * will return 1. Semaphore is released by writing 1 to the register 1160 1161 */ 1161 - r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 1162 + r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1162 1163 if (r32 == 0) { 1163 1164 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1164 1165 return; ··· 1170 1171 void 1171 1172 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) 1172 1173 { 1173 - bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); 1174 + writel(1, ioc->ioc_regs.ioc_sem_reg); 1174 1175 } 1175 1176 1176 1177 static void ··· 1179 1180 bfa_sem_timer_stop(ioc); 1180 1181 } 1181 1182 1182 - /** 1183 + /* 1183 1184 * Initialize LPU local memory (aka secondary memory / SRAM) 1184 1185 */ 1185 1186 static void ··· 1189 1190 int i; 1190 1191 #define PSS_LMEM_INIT_TIME 10000 1191 1192 1192 - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1193 + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1193 1194 pss_ctl &= ~__PSS_LMEM_RESET; 1194 1195 pss_ctl |= __PSS_LMEM_INIT_EN; 1195 1196 ··· 1197 1198 * i2c workaround 12.5khz clock 1198 1199 */ 1199 1200 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1200 - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1201 + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1201 1202 1202 - /** 1203 + /* 1203 1204 * wait for memory initialization to be complete 1204 1205 */ 1205 1206 i = 0; 1206 1207 do { 1207 - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1208 + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1208 1209 i++; 1209 1210 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1210 1211 1211 - /** 1212 + /* 1212 1213 * If memory initialization is not successful, IOC timeout will catch 1213 1214 * such failures. 1214 1215 */ ··· 1216 1217 bfa_trc(ioc, pss_ctl); 1217 1218 1218 1219 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1219 - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1220 + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1220 1221 } 1221 1222 1222 1223 static void ··· 1224 1225 { 1225 1226 u32 pss_ctl; 1226 1227 1227 - /** 1228 + /* 1228 1229 * Take processor out of reset. 1229 1230 */ 1230 - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1231 + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1231 1232 pss_ctl &= ~__PSS_LPU0_RESET; 1232 1233 1233 - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1234 + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1234 1235 } 1235 1236 1236 1237 static void ··· 1238 1239 { 1239 1240 u32 pss_ctl; 1240 1241 1241 - /** 1242 + /* 1242 1243 * Put processors in reset. 1243 1244 */ 1244 - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1245 + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1245 1246 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1246 1247 1247 - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1248 + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1248 1249 } 1249 1250 1250 - /** 1251 + /* 1251 1252 * Get driver and firmware versions. 1252 1253 */ 1253 1254 void ··· 1260 1261 1261 1262 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1262 1263 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1263 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1264 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1264 1265 1265 1266 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); 1266 1267 i++) { ··· 1270 1271 } 1271 1272 } 1272 1273 1273 - /** 1274 + /* 1274 1275 * Returns TRUE if same. 1275 1276 */ 1276 1277 bfa_boolean_t ··· 1295 1296 return BFA_TRUE; 1296 1297 } 1297 1298 1298 - /** 1299 + /* 1299 1300 * Return true if current running version is valid. Firmware signature and 1300 1301 * execution context (driver/bios) must match. 1301 1302 */ ··· 1304 1305 { 1305 1306 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1306 1307 1307 - /** 1308 + /* 1308 1309 * If bios/efi boot (flash based) -- return true 1309 1310 */ 1310 1311 if (bfa_ioc_is_bios_optrom(ioc)) ··· 1320 1321 return BFA_FALSE; 1321 1322 } 1322 1323 1323 - if (bfa_os_swap32(fwhdr.param) != boot_env) { 1324 + if (swab32(fwhdr.param) != boot_env) { 1324 1325 bfa_trc(ioc, fwhdr.param); 1325 1326 bfa_trc(ioc, boot_env); 1326 1327 return BFA_FALSE; ··· 1329 1330 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 1330 1331 } 1331 1332 1332 - /** 1333 + /* 1333 1334 * Conditionally flush any pending message from firmware at start. 1334 1335 */ 1335 1336 static void ··· 1337 1338 { 1338 1339 u32 r32; 1339 1340 1340 - r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1341 + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 1341 1342 if (r32) 1342 - bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); 1343 + writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1343 1344 } 1344 1345 1345 1346 ··· 1351 1352 u32 boot_type; 1352 1353 u32 boot_env; 1353 1354 1354 - ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 1355 + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1355 1356 1356 1357 if (force) 1357 1358 ioc_fwstate = BFI_IOC_UNINIT; ··· 1361 1362 boot_type = BFI_BOOT_TYPE_NORMAL; 1362 1363 boot_env = BFI_BOOT_LOADER_OS; 1363 1364 1364 - /** 1365 + /* 1365 1366 * Flash based firmware boot BIOS env. 1366 1367 */ 1367 1368 if (bfa_ioc_is_bios_optrom(ioc)) { ··· 1369 1370 boot_env = BFI_BOOT_LOADER_BIOS; 1370 1371 } 1371 1372 1372 - /** 1373 + /* 1373 1374 * Flash based firmware boot UEFI env. 1374 1375 */ 1375 1376 if (bfa_ioc_is_uefi(ioc)) { ··· 1377 1378 boot_env = BFI_BOOT_LOADER_UEFI; 1378 1379 } 1379 1380 1380 - /** 1381 + /* 1381 1382 * check if firmware is valid 1382 1383 */ 1383 1384 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? ··· 1388 1389 return; 1389 1390 } 1390 1391 1391 - /** 1392 + /* 1392 1393 * If hardware initialization is in progress (initialized by other IOC), 1393 1394 * just wait for an initialization completion interrupt. 1394 1395 */ ··· 1397 1398 return; 1398 1399 } 1399 1400 1400 - /** 1401 + /* 1401 1402 * If IOC function is disabled and firmware version is same, 1402 1403 * just re-enable IOC. 1403 1404 * ··· 1408 1409 if (ioc_fwstate == BFI_IOC_DISABLED || 1409 1410 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1410 1411 1411 - /** 1412 + /* 1412 1413 * When using MSI-X any pending firmware ready event should 1413 1414 * be flushed. Otherwise MSI-X interrupts are not delivered. 1414 1415 */ ··· 1418 1419 return; 1419 1420 } 1420 1421 1421 - /** 1422 + /* 1422 1423 * Initialize the h/w for any other states. 1423 1424 */ 1424 1425 bfa_ioc_boot(ioc, boot_type, boot_env); ··· 1448 1449 * first write msg to mailbox registers 1449 1450 */ 1450 1451 for (i = 0; i < len / sizeof(u32); i++) 1451 - bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 1452 - bfa_os_wtole(msgp[i])); 1452 + writel(cpu_to_le32(msgp[i]), 1453 + ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1453 1454 1454 1455 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1455 - bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0); 1456 + writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1456 1457 1457 1458 /* 1458 1459 * write 1 to mailbox CMD to trigger LPU event 1459 1460 */ 1460 - bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); 1461 - (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1461 + writel(1, ioc->ioc_regs.hfn_mbox_cmd); 1462 + (void) readl(ioc->ioc_regs.hfn_mbox_cmd); 1462 1463 } 1463 1464 1464 1465 static void ··· 1471 1472 bfa_ioc_portid(ioc)); 1472 1473 enable_req.ioc_class = ioc->ioc_mc; 1473 1474 bfa_os_gettimeofday(&tv); 1474 - enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec); 1475 + enable_req.tv_sec = be32_to_cpu(tv.tv_sec); 1475 1476 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1476 1477 } 1477 1478 ··· 1502 1503 struct bfa_ioc_s *ioc = cbarg; 1503 1504 u32 hb_count; 1504 1505 1505 - hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1506 + hb_count = readl(ioc->ioc_regs.heartbeat); 1506 1507 if (ioc->hb_count == hb_count) { 1507 1508 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count); 1508 1509 bfa_ioc_recover(ioc); ··· 1518 1519 static void 1519 1520 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1520 1521 { 1521 - ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1522 + ioc->hb_count = readl(ioc->ioc_regs.heartbeat); 1522 1523 bfa_hb_timer_start(ioc); 1523 1524 } 1524 1525 ··· 1529 1530 } 1530 1531 1531 1532 1532 - /** 1533 + /* 1533 1534 * Initiate a full firmware download. 1534 1535 */ 1535 1536 static void ··· 1542 1543 u32 chunkno = 0; 1543 1544 u32 i; 1544 1545 1545 - /** 1546 + /* 1546 1547 * Initialize LMEM first before code download 1547 1548 */ 1548 1549 bfa_ioc_lmem_init(ioc); ··· 1553 1554 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1554 1555 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1555 1556 1556 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1557 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1557 1558 1558 1559 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { 1559 1560 ··· 1563 1564 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1564 1565 } 1565 1566 1566 - /** 1567 + /* 1567 1568 * write smem 1568 1569 */ 1569 1570 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, ··· 1571 1572 1572 1573 loff += sizeof(u32); 1573 1574 1574 - /** 1575 + /* 1575 1576 * handle page offset wrap around 1576 1577 */ 1577 1578 loff = PSS_SMEM_PGOFF(loff); 1578 1579 if (loff == 0) { 1579 1580 pgnum++; 1580 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1581 - pgnum); 1581 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1582 1582 } 1583 1583 } 1584 1584 1585 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1586 - bfa_ioc_smem_pgnum(ioc, 0)); 1585 + writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); 1587 1586 1588 1587 /* 1589 1588 * Set boot type and boot param at the end. 1590 1589 */ 1591 1590 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1592 - bfa_os_swap32(boot_type)); 1591 + swab32(boot_type)); 1593 1592 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, 1594 - bfa_os_swap32(boot_env)); 1593 + swab32(boot_env)); 1595 1594 } 1596 1595 1597 1596 static void ··· 1598 1601 bfa_ioc_hwinit(ioc, force); 1599 1602 } 1600 1603 1601 - /** 1604 + /* 1602 1605 * Update BFA configuration from firmware configuration. 1603 1606 */ 1604 1607 static void ··· 1606 1609 { 1607 1610 struct bfi_ioc_attr_s *attr = ioc->attr; 1608 1611 1609 - attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); 1610 - attr->card_type = bfa_os_ntohl(attr->card_type); 1611 - attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); 1612 + attr->adapter_prop = be32_to_cpu(attr->adapter_prop); 1613 + attr->card_type = be32_to_cpu(attr->card_type); 1614 + attr->maxfrsize = be16_to_cpu(attr->maxfrsize); 1612 1615 1613 1616 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1614 1617 } 1615 1618 1616 - /** 1619 + /* 1617 1620 * Attach time initialization of mbox logic. 1618 1621 */ 1619 1622 static void ··· 1629 1632 } 1630 1633 } 1631 1634 1632 - /** 1635 + /* 1633 1636 * Mbox poll timer -- restarts any pending mailbox requests. 1634 1637 */ 1635 1638 static void ··· 1639 1642 struct bfa_mbox_cmd_s *cmd; 1640 1643 u32 stat; 1641 1644 1642 - /** 1645 + /* 1643 1646 * If no command pending, do nothing 1644 1647 */ 1645 1648 if (list_empty(&mod->cmd_q)) 1646 1649 return; 1647 1650 1648 - /** 1651 + /* 1649 1652 * If previous command is not yet fetched by firmware, do nothing 1650 1653 */ 1651 - stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1654 + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 1652 1655 if (stat) 1653 1656 return; 1654 1657 1655 - /** 1658 + /* 1656 1659 * Enqueue command to firmware. 1657 1660 */ 1658 1661 bfa_q_deq(&mod->cmd_q, &cmd); 1659 1662 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1660 1663 } 1661 1664 1662 - /** 1665 + /* 1663 1666 * Cleanup any pending requests. 1664 1667 */ 1665 1668 static void ··· 1672 1675 bfa_q_deq(&mod->cmd_q, &cmd); 1673 1676 } 1674 1677 1675 - /** 1678 + /* 1676 1679 * Read data from SMEM to host through PCI memmap 1677 1680 * 1678 1681 * @param[in] ioc memory for IOC ··· 1701 1704 return BFA_STATUS_FAILED; 1702 1705 } 1703 1706 1704 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1707 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1705 1708 1706 1709 len = sz/sizeof(u32); 1707 1710 bfa_trc(ioc, len); 1708 1711 for (i = 0; i < len; i++) { 1709 1712 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 1710 - buf[i] = bfa_os_ntohl(r32); 1713 + buf[i] = be32_to_cpu(r32); 1711 1714 loff += sizeof(u32); 1712 1715 1713 - /** 1716 + /* 1714 1717 * handle page offset wrap around 1715 1718 */ 1716 1719 loff = PSS_SMEM_PGOFF(loff); 1717 1720 if (loff == 0) { 1718 1721 pgnum++; 1719 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1722 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1720 1723 } 1721 1724 } 1722 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1723 - bfa_ioc_smem_pgnum(ioc, 0)); 1725 + writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); 1724 1726 /* 1725 1727 * release semaphore. 1726 1728 */ ··· 1729 1733 return BFA_STATUS_OK; 1730 1734 } 1731 1735 1732 - /** 1736 + /* 1733 1737 * Clear SMEM data from host through PCI memmap 1734 1738 * 1735 1739 * @param[in] ioc memory for IOC ··· 1756 1760 return BFA_STATUS_FAILED; 1757 1761 } 1758 1762 1759 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1763 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1760 1764 1761 1765 len = sz/sizeof(u32); /* len in words */ 1762 1766 bfa_trc(ioc, len); ··· 1764 1768 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); 1765 1769 loff += sizeof(u32); 1766 1770 1767 - /** 1771 + /* 1768 1772 * handle page offset wrap around 1769 1773 */ 1770 1774 loff = PSS_SMEM_PGOFF(loff); 1771 1775 if (loff == 0) { 1772 1776 pgnum++; 1773 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1777 + writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1774 1778 } 1775 1779 } 1776 - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1777 - bfa_ioc_smem_pgnum(ioc, 0)); 1780 + writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); 1778 1781 1779 1782 /* 1780 1783 * release semaphore. ··· 1783 1788 return BFA_STATUS_OK; 1784 1789 } 1785 1790 1786 - /** 1791 + /* 1787 1792 * hal iocpf to ioc interface 1788 1793 */ 1789 1794 static void ··· 1808 1813 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) 1809 1814 { 1810 1815 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 1811 - /** 1816 + /* 1812 1817 * Provide enable completion callback. 1813 1818 */ 1814 1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 1819 1824 1820 1825 1821 1826 1822 - /** 1827 + /* 1823 1828 * hal_ioc_public 1824 1829 */ 1825 1830 ··· 1843 1848 return BFA_STATUS_OK; 1844 1849 } 1845 1850 1846 - /** 1851 + /* 1847 1852 * Interface used by diag module to do firmware boot with memory test 1848 1853 * as the entry vector. 1849 1854 */ 1850 1855 void 1851 1856 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) 1852 1857 { 1853 - bfa_os_addr_t rb; 1858 + void __iomem *rb; 1854 1859 1855 1860 bfa_ioc_stats(ioc, ioc_boots); 1856 1861 1857 1862 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1858 1863 return; 1859 1864 1860 - /** 1865 + /* 1861 1866 * Initialize IOC state of all functions on a chip reset. 1862 1867 */ 1863 1868 rb = ioc->pcidev.pci_bar_kva; 1864 1869 if (boot_type == BFI_BOOT_TYPE_MEMTEST) { 1865 - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); 1866 - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); 1870 + writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 1871 + writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); 1867 1872 } else { 1868 - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING); 1869 - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING); 1873 + writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG)); 1874 + writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG)); 1870 1875 } 1871 1876 1872 1877 bfa_ioc_msgflush(ioc); 1873 1878 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1874 1879 1875 - /** 1880 + /* 1876 1881 * Enable interrupts just before starting LPU 1877 1882 */ 1878 1883 ioc->cbfn->reset_cbfn(ioc->bfa); 1879 1884 bfa_ioc_lpu_start(ioc); 1880 1885 } 1881 1886 1882 - /** 1887 + /* 1883 1888 * Enable/disable IOC failure auto recovery. 1884 1889 */ 1885 1890 void ··· 1899 1904 bfa_boolean_t 1900 1905 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) 1901 1906 { 1902 - u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 1907 + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 1903 1908 1904 1909 return ((r32 != BFI_IOC_UNINIT) && 1905 1910 (r32 != BFI_IOC_INITING) && ··· 1913 1918 u32 r32; 1914 1919 int i; 1915 1920 1916 - /** 1921 + /* 1917 1922 * read the MBOX msg 1918 1923 */ 1919 1924 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 1920 1925 i++) { 1921 - r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox + 1926 + r32 = readl(ioc->ioc_regs.lpu_mbox + 1922 1927 i * sizeof(u32)); 1923 - msgp[i] = bfa_os_htonl(r32); 1928 + msgp[i] = cpu_to_be32(r32); 1924 1929 } 1925 1930 1926 - /** 1931 + /* 1927 1932 * turn off mailbox interrupt by clearing mailbox status 1928 1933 */ 1929 - bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); 1930 - bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1934 + writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1935 + readl(ioc->ioc_regs.lpu_mbox_cmd); 1931 1936 } 1932 1937 1933 1938 void ··· 1966 1971 } 1967 1972 } 1968 1973 1969 - /** 1974 + /* 1970 1975 * IOC attach time initialization and setup. 1971 1976 * 1972 1977 * @param[in] ioc memory for IOC ··· 1991 1996 bfa_fsm_send_event(ioc, IOC_E_RESET); 1992 1997 } 1993 1998 1994 - /** 1999 + /* 1995 2000 * Driver detach time IOC cleanup. 1996 2001 */ 1997 2002 void ··· 2000 2005 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2001 2006 } 2002 2007 2003 - /** 2008 + /* 2004 2009 * Setup IOC PCI properties. 2005 2010 * 2006 2011 * @param[in] pcidev PCI device information for this IOC ··· 2014 2019 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2015 2020 ioc->cna = ioc->ctdev && !ioc->fcmode; 2016 2021 2017 - /** 2022 + /* 2018 2023 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2019 2024 */ 2020 2025 if (ioc->ctdev) ··· 2026 2031 bfa_ioc_reg_init(ioc); 2027 2032 } 2028 2033 2029 - /** 2034 + /* 2030 2035 * Initialize IOC dma memory 2031 2036 * 2032 2037 * @param[in] dm_kva kernel virtual address of IOC dma memory ··· 2035 2040 void 2036 2041 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2037 2042 { 2038 - /** 2043 + /* 2039 2044 * dma memory for firmware attribute 2040 2045 */ 2041 2046 ioc->attr_dma.kva = dm_kva; ··· 2043 2048 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; 2044 2049 } 2045 2050 2046 - /** 2051 + /* 2047 2052 * Return size of dma memory required. 2048 2053 */ 2049 2054 u32 ··· 2068 2073 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2069 2074 } 2070 2075 2071 - /** 2076 + /* 2072 2077 * Returns memory required for saving firmware trace in case of crash. 2073 2078 * Driver must call this interface to allocate memory required for 2074 2079 * automatic saving of firmware trace. Driver should call ··· 2081 2086 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2082 2087 } 2083 2088 2084 - /** 2089 + /* 2085 2090 * Initialize memory for saving firmware trace. Driver must initialize 2086 2091 * trace memory before call bfa_ioc_enable(). 2087 2092 */ ··· 2104 2109 return PSS_SMEM_PGOFF(fmaddr); 2105 2110 } 2106 2111 2107 - /** 2112 + /* 2108 2113 * Register mailbox message handler functions 2109 2114 * 2110 2115 * @param[in] ioc IOC instance ··· 2120 2125 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2121 2126 } 2122 2127 2123 - /** 2128 + /* 2124 2129 * Register mailbox message handler function, to be called by common modules 2125 2130 */ 2126 2131 void ··· 2133 2138 mod->mbhdlr[mc].cbarg = cbarg; 2134 2139 } 2135 2140 2136 - /** 2141 + /* 2137 2142 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2138 2143 * Responsibility of caller to serialize 2139 2144 * ··· 2146 2151 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2147 2152 u32 stat; 2148 2153 2149 - /** 2154 + /* 2150 2155 * If a previous command is pending, queue new command 2151 2156 */ 2152 2157 if (!list_empty(&mod->cmd_q)) { ··· 2154 2159 return; 2155 2160 } 2156 2161 2157 - /** 2162 + /* 2158 2163 * If mailbox is busy, queue command for poll timer 2159 2164 */ 2160 - stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 2165 + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2161 2166 if (stat) { 2162 2167 list_add_tail(&cmd->qe, &mod->cmd_q); 2163 2168 return; 2164 2169 } 2165 2170 2166 - /** 2171 + /* 2167 2172 * mailbox is free -- queue command to firmware 2168 2173 */ 2169 2174 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2170 2175 } 2171 2176 2172 - /** 2177 + /* 2173 2178 * Handle mailbox interrupts 2174 2179 */ 2175 2180 void ··· 2181 2186 2182 2187 bfa_ioc_msgget(ioc, &m); 2183 2188 2184 - /** 2189 + /* 2185 2190 * Treat IOC message class as special. 2186 2191 */ 2187 2192 mc = m.mh.msg_class; ··· 2209 2214 ioc->port_id = bfa_ioc_pcifn(ioc); 2210 2215 } 2211 2216 2212 - /** 2217 + /* 2213 2218 * return true if IOC is disabled 2214 2219 */ 2215 2220 bfa_boolean_t ··· 2219 2224 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2220 2225 } 2221 2226 2222 - /** 2227 + /* 2223 2228 * return true if IOC firmware is different. 2224 2229 */ 2225 2230 bfa_boolean_t ··· 2238 2243 ((__sm) == BFI_IOC_FAIL) || \ 2239 2244 ((__sm) == BFI_IOC_CFG_DISABLED)) 2240 2245 2241 - /** 2246 + /* 2242 2247 * Check if adapter is disabled -- both IOCs should be in a disabled 2243 2248 * state. 2244 2249 */ ··· 2246 2251 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) 2247 2252 { 2248 2253 u32 ioc_state; 2249 - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 2254 + void __iomem *rb = ioc->pcidev.pci_bar_kva; 2250 2255 2251 2256 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2252 2257 return BFA_FALSE; 2253 2258 2254 - ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG); 2259 + ioc_state = readl(rb + BFA_IOC0_STATE_REG); 2255 2260 if (!bfa_ioc_state_disabled(ioc_state)) 2256 2261 return BFA_FALSE; 2257 2262 2258 2263 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { 2259 - ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); 2264 + ioc_state = readl(rb + BFA_IOC1_STATE_REG); 2260 2265 if (!bfa_ioc_state_disabled(ioc_state)) 2261 2266 return BFA_FALSE; 2262 2267 } ··· 2264 2269 return BFA_TRUE; 2265 2270 } 2266 2271 2267 - /** 2272 + /* 2268 2273 * Add to IOC heartbeat failure notification queue. To be used by common 2269 2274 * modules such as cee, port, diag. 2270 2275 */ ··· 2288 2293 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2289 2294 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2290 2295 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2291 - bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2296 + memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2292 2297 sizeof(struct bfa_mfg_vpd_s)); 2293 2298 2294 2299 ad_attr->nports = bfa_ioc_get_nports(ioc); ··· 2338 2343 void 2339 2344 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) 2340 2345 { 2341 - bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); 2342 - bfa_os_memcpy((void *)serial_num, 2346 + memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); 2347 + memcpy((void *)serial_num, 2343 2348 (void *)ioc->attr->brcd_serialnum, 2344 2349 BFA_ADAPTER_SERIAL_NUM_LEN); 2345 2350 } ··· 2347 2352 void 2348 2353 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) 2349 2354 { 2350 - bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); 2351 - bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2355 + memset((void *)fw_ver, 0, BFA_VERSION_LEN); 2356 + memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2352 2357 } 2353 2358 2354 2359 void ··· 2356 2361 { 2357 2362 bfa_assert(chip_rev); 2358 2363 2359 - bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2364 + memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2360 2365 2361 2366 chip_rev[0] = 'R'; 2362 2367 chip_rev[1] = 'e'; ··· 2369 2374 void 2370 2375 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) 2371 2376 { 2372 - bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 2373 - bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, 2377 + memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 2378 + memcpy(optrom_ver, ioc->attr->optrom_version, 2374 2379 BFA_VERSION_LEN); 2375 2380 } 2376 2381 2377 2382 void 2378 2383 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) 2379 2384 { 2380 - bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 2381 - bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2385 + memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 2386 + memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2382 2387 } 2383 2388 2384 2389 void ··· 2387 2392 struct bfi_ioc_attr_s *ioc_attr; 2388 2393 2389 2394 bfa_assert(model); 2390 - bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2395 + memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2391 2396 2392 2397 ioc_attr = ioc->attr; 2393 2398 2394 - /** 2399 + /* 2395 2400 * model name 2396 2401 */ 2397 - bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2402 + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2398 2403 BFA_MFG_NAME, ioc_attr->card_type); 2399 2404 } 2400 2405 ··· 2441 2446 void 2442 2447 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) 2443 2448 { 2444 - bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 2449 + memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 2445 2450 2446 2451 ioc_attr->state = bfa_ioc_get_state(ioc); 2447 2452 ioc_attr->port_id = ioc->port_id; ··· 2455 2460 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2456 2461 } 2457 2462 2458 - /** 2463 + /* 2459 2464 * hal_wwn_public 2460 2465 */ 2461 2466 wwn_t ··· 2521 2526 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); 2522 2527 } 2523 2528 2524 - /** 2529 + /* 2525 2530 * Retrieve saved firmware trace from a prior IOC failure. 2526 2531 */ 2527 2532 bfa_status_t ··· 2536 2541 if (tlen > ioc->dbg_fwsave_len) 2537 2542 tlen = ioc->dbg_fwsave_len; 2538 2543 2539 - bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen); 2544 + memcpy(trcdata, ioc->dbg_fwsave, tlen); 2540 2545 *trclen = tlen; 2541 2546 return BFA_STATUS_OK; 2542 2547 } 2543 2548 2544 - /** 2549 + /* 2545 2550 * Clear saved firmware trace 2546 2551 */ 2547 2552 void ··· 2550 2555 ioc->dbg_fwsave_once = BFA_TRUE; 2551 2556 } 2552 2557 2553 - /** 2558 + /* 2554 2559 * Retrieve saved firmware trace from a prior IOC failure. 2555 2560 */ 2556 2561 bfa_status_t ··· 2590 2595 2591 2596 bfa_ioc_send_fwsync(ioc); 2592 2597 2593 - /** 2598 + /* 2594 2599 * After sending a fw sync mbox command wait for it to 2595 2600 * take effect. We will not wait for a response because 2596 2601 * 1. fw_sync mbox cmd doesn't have a response. ··· 2605 2610 fwsync_iter--; 2606 2611 } 2607 2612 2608 - /** 2613 + /* 2609 2614 * Dump firmware smem 2610 2615 */ 2611 2616 bfa_status_t ··· 2625 2630 loff = *offset; 2626 2631 dlen = *buflen; 2627 2632 2628 - /** 2633 + /* 2629 2634 * First smem read, sync smem before proceeding 2630 2635 * No need to sync before reading every chunk. 2631 2636 */ ··· 2652 2657 return status; 2653 2658 } 2654 2659 2655 - /** 2660 + /* 2656 2661 * Firmware statistics 2657 2662 */ 2658 2663 bfa_status_t ··· 2697 2702 return status; 2698 2703 } 2699 2704 2700 - /** 2705 + /* 2701 2706 * Save firmware trace if configured. 2702 2707 */ 2703 2708 static void ··· 2711 2716 } 2712 2717 } 2713 2718 2714 - /** 2719 + /* 2715 2720 * Firmware failure detected. Start recovery actions. 2716 2721 */ 2717 2722 static void ··· 2733 2738 return; 2734 2739 } 2735 2740 2736 - /** 2741 + /* 2737 2742 * hal_iocpf_pvt BFA IOC PF private functions 2738 2743 */ 2739 2744 ··· 2790 2795 bfa_ioc_hw_sem_get(ioc); 2791 2796 } 2792 2797 2793 - /** 2798 + /* 2794 2799 * bfa timer function 2795 2800 */ 2796 2801 void ··· 2835 2840 } 2836 2841 } 2837 2842 2838 - /** 2843 + /* 2839 2844 * Should be called with lock protection 2840 2845 */ 2841 2846 void ··· 2853 2858 list_add_tail(&timer->qe, &mod->timer_q); 2854 2859 } 2855 2860 2856 - /** 2861 + /* 2857 2862 * Should be called with lock protection 2858 2863 */ 2859 2864 void
+53 -55
drivers/scsi/bfa/bfa_ioc.h
··· 22 22 #include "bfa_cs.h" 23 23 #include "bfi.h" 24 24 25 - /** 25 + /* 26 26 * BFA timer declarations 27 27 */ 28 28 typedef void (*bfa_timer_cbfn_t)(void *); 29 29 30 - /** 30 + /* 31 31 * BFA timer data structure 32 32 */ 33 33 struct bfa_timer_s { 34 34 struct list_head qe; 35 35 bfa_timer_cbfn_t timercb; 36 36 void *arg; 37 - int timeout; /**< in millisecs. */ 37 + int timeout; /* in millisecs */ 38 38 }; 39 39 40 - /** 40 + /* 41 41 * Timer module structure 42 42 */ 43 43 struct bfa_timer_mod_s { 44 44 struct list_head timer_q; 45 45 }; 46 46 47 - #define BFA_TIMER_FREQ 200 /**< specified in millisecs */ 47 + #define BFA_TIMER_FREQ 200 /* specified in millisecs */ 48 48 49 49 void bfa_timer_beat(struct bfa_timer_mod_s *mod); 50 50 void bfa_timer_init(struct bfa_timer_mod_s *mod); ··· 53 53 unsigned int timeout); 54 54 void bfa_timer_stop(struct bfa_timer_s *timer); 55 55 56 - /** 56 + /* 57 57 * Generic Scatter Gather Element used by driver 58 58 */ 59 59 struct bfa_sge_s { ··· 62 62 }; 63 63 64 64 #define bfa_sge_word_swap(__sge) do { \ 65 - ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \ 66 - ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \ 67 - ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \ 65 + ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \ 66 + ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \ 67 + ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \ 68 68 } while (0) 69 69 70 70 #define bfa_swap_words(_x) ( \ ··· 80 80 #define bfa_sgaddr_le(_x) (_x) 81 81 #endif 82 82 83 - /** 83 + /* 84 84 * PCI device information required by IOC 85 85 */ 86 86 struct bfa_pcidev_s { 87 87 int pci_slot; 88 88 u8 pci_func; 89 - u16 device_id; 90 - bfa_os_addr_t pci_bar_kva; 89 + u16 device_id; 90 + void __iomem *pci_bar_kva; 91 91 }; 92 92 93 - /** 93 + /* 94 94 * Structure used to remember the DMA-able memory block's KVA and Physical 95 95 * Address 96 96 */ ··· 102 102 #define BFA_DMA_ALIGN_SZ 256 103 103 #define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) 104 104 105 - /** 105 + /* 106 106 * smem size for Crossbow and Catapult 107 107 */ 108 108 #define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ ··· 125 125 static inline void 126 126 __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) 127 127 { 128 - dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa); 129 - dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa)); 128 + dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa); 129 + dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa)); 130 130 } 131 131 132 132 struct bfa_ioc_regs_s { 133 - bfa_os_addr_t hfn_mbox_cmd; 134 - bfa_os_addr_t hfn_mbox; 135 - bfa_os_addr_t lpu_mbox_cmd; 136 - bfa_os_addr_t lpu_mbox; 137 - bfa_os_addr_t pss_ctl_reg; 138 - bfa_os_addr_t pss_err_status_reg; 139 - bfa_os_addr_t app_pll_fast_ctl_reg; 140 - bfa_os_addr_t app_pll_slow_ctl_reg; 141 - bfa_os_addr_t ioc_sem_reg; 142 - bfa_os_addr_t ioc_usage_sem_reg; 143 - bfa_os_addr_t ioc_init_sem_reg; 144 - bfa_os_addr_t ioc_usage_reg; 145 - bfa_os_addr_t host_page_num_fn; 146 - bfa_os_addr_t heartbeat; 147 - bfa_os_addr_t ioc_fwstate; 148 - bfa_os_addr_t ll_halt; 149 - bfa_os_addr_t err_set; 150 - bfa_os_addr_t shirq_isr_next; 151 - bfa_os_addr_t shirq_msk_next; 152 - bfa_os_addr_t smem_page_start; 133 + void __iomem *hfn_mbox_cmd; 134 + void __iomem *hfn_mbox; 135 + void __iomem *lpu_mbox_cmd; 136 + void __iomem *lpu_mbox; 137 + void __iomem *pss_ctl_reg; 138 + void __iomem *pss_err_status_reg; 139 + void __iomem *app_pll_fast_ctl_reg; 140 + void __iomem *app_pll_slow_ctl_reg; 141 + void __iomem *ioc_sem_reg; 142 + void __iomem *ioc_usage_sem_reg; 143 + void __iomem *ioc_init_sem_reg; 144 + void __iomem *ioc_usage_reg; 145 + void __iomem *host_page_num_fn; 146 + void __iomem *heartbeat; 147 + void __iomem *ioc_fwstate; 148 + void __iomem *ll_halt; 149 + void __iomem *err_set; 150 + void __iomem *shirq_isr_next; 151 + void __iomem *shirq_msk_next; 152 + void __iomem *smem_page_start; 153 153 u32 smem_pg0; 154 154 }; 155 155 156 - #define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr) 157 - #define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val) 158 - #define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off) 156 + #define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off)))) 159 157 #define bfa_mem_write(_raddr, _off, _val) \ 160 - bfa_os_mem_write(_raddr, _off, _val) 161 - /** 158 + writel(swab32((_val)), ((_raddr) + (_off))) 159 + /* 162 160 * IOC Mailbox structures 163 161 */ 164 162 struct bfa_mbox_cmd_s { ··· 164 166 u32 msg[BFI_IOC_MSGSZ]; 165 167 }; 166 168 167 - /** 169 + /* 168 170 * IOC mailbox module 169 171 */ 170 172 typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); ··· 177 179 } mbhdlr[BFI_MC_MAX]; 178 180 }; 179 181 180 - /** 182 + /* 181 183 * IOC callback function interfaces 182 184 */ 183 185 typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); ··· 191 193 bfa_ioc_reset_cbfn_t reset_cbfn; 192 194 }; 193 195 194 - /** 196 + /* 195 197 * Heartbeat failure notification queue element. 196 198 */ 197 199 struct bfa_ioc_hbfail_notify_s { ··· 200 202 void *cbarg; 201 203 }; 202 204 203 - /** 205 + /* 204 206 * Initialize a heartbeat failure notification structure 205 207 */ 206 208 #define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \ ··· 247 249 }; 248 250 249 251 struct bfa_ioc_hwif_s { 250 - bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode); 252 + bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode); 251 253 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 252 254 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 253 255 void (*ioc_reg_init) (struct bfa_ioc_s *ioc); ··· 265 267 #define bfa_ioc_fetch_stats(__ioc, __stats) \ 266 268 (((__stats)->drv_stats) = (__ioc)->stats) 267 269 #define bfa_ioc_clr_stats(__ioc) \ 268 - bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) 270 + memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) 269 271 #define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) 270 272 #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 271 273 #define bfa_ioc_speed_sup(__ioc) \ ··· 285 287 #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 286 288 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 287 289 288 - /** 290 + /* 289 291 * IOC mailbox interface 290 292 */ 291 293 void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd); ··· 297 299 void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, 298 300 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 299 301 300 - /** 302 + /* 301 303 * IOC interfaces 302 304 */ 303 305 ··· 306 308 (__ioc)->fcmode)) 307 309 308 310 bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); 309 - bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); 310 - bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb); 311 - bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); 311 + bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode); 312 + bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb); 313 + bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode); 312 314 313 315 #define bfa_ioc_isr_mode_set(__ioc, __msix) \ 314 316 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) ··· 368 370 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); 369 371 void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 370 372 struct bfa_ioc_hbfail_notify_s *notify); 371 - bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg); 372 - void bfa_ioc_sem_release(bfa_os_addr_t sem_reg); 373 + bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); 374 + void bfa_ioc_sem_release(void __iomem *sem_reg); 373 375 void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); 374 376 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, 375 377 struct bfi_ioc_image_hdr_s *fwhdr); ··· 439 441 } 440 442 } 441 443 442 - /** 444 + /* 443 445 * CNA TRCMOD declaration 444 446 */ 445 447 /*
+42 -48
drivers/scsi/bfa/bfa_ioc_cb.c
··· 34 34 35 35 struct bfa_ioc_hwif_s hwif_cb; 36 36 37 - /** 37 + /* 38 38 * Called from bfa_ioc_attach() to map asic specific calls. 39 39 */ 40 40 void ··· 52 52 ioc->ioc_hwif = &hwif_cb; 53 53 } 54 54 55 - /** 55 + /* 56 56 * Return true if firmware of current driver matches the running firmware. 57 57 */ 58 58 static bfa_boolean_t ··· 66 66 { 67 67 } 68 68 69 - /** 69 + /* 70 70 * Notify other functions on HB failure. 71 71 */ 72 72 static void 73 73 bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) 74 74 { 75 - bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); 76 - bfa_reg_read(ioc->ioc_regs.err_set); 75 + writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 76 + readl(ioc->ioc_regs.err_set); 77 77 } 78 78 79 - /** 79 + /* 80 80 * Host to LPU mailbox message addresses 81 81 */ 82 82 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { ··· 84 84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } 85 85 }; 86 86 87 - /** 87 + /* 88 88 * Host <-> LPU mailbox command/status registers 89 89 */ 90 90 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { ··· 96 96 static void 97 97 bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) 98 98 { 99 - bfa_os_addr_t rb; 99 + void __iomem *rb; 100 100 int pcifn = bfa_ioc_pcifn(ioc); 101 101 102 102 rb = bfa_ioc_bar0(ioc); ··· 113 113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 114 114 } 115 115 116 - /** 116 + /* 117 117 * Host <-> LPU mailbox command/status registers 118 118 */ 119 119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; ··· 133 133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 134 134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 135 135 136 - /** 136 + /* 137 137 * sram memory access 138 138 */ 139 139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ··· 145 145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 146 146 } 147 147 148 - /** 148 + /* 149 149 * Initialize IOC to port mapping. 150 150 */ 151 151 152 152 static void 153 153 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 154 154 { 155 - /** 155 + /* 156 156 * For crossbow, port id is same as pci function. 157 157 */ 158 158 ioc->port_id = bfa_ioc_pcifn(ioc); ··· 160 160 bfa_trc(ioc, ioc->port_id); 161 161 } 162 162 163 - /** 163 + /* 164 164 * Set interrupt mode for a function: INTX or MSIX 165 165 */ 166 166 static void ··· 168 168 { 169 169 } 170 170 171 - /** 171 + /* 172 172 * Cleanup hw semaphore and usecnt registers 173 173 */ 174 174 static void ··· 180 180 * before we clear it. If it is not locked, writing 1 181 181 * will lock it instead of clearing it. 182 182 */ 183 - bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 183 + readl(ioc->ioc_regs.ioc_sem_reg); 184 184 bfa_ioc_hw_sem_release(ioc); 185 185 } 186 186 187 187 188 188 189 189 bfa_status_t 190 - bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) 190 + bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 191 191 { 192 192 u32 pll_sclk, pll_fclk; 193 193 ··· 199 199 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | 200 200 __APP_PLL_400_JITLMT0_1(3U) | 201 201 __APP_PLL_400_CNTLMT0_1(3U); 202 - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 203 - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 204 - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 205 - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 206 - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 207 - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 208 - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 209 - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 210 - bfa_reg_write(rb + APP_PLL_212_CTL_REG, 211 - __APP_PLL_212_LOGIC_SOFT_RESET); 212 - bfa_reg_write(rb + APP_PLL_212_CTL_REG, 213 - __APP_PLL_212_BYPASS | 214 - __APP_PLL_212_LOGIC_SOFT_RESET); 215 - bfa_reg_write(rb + APP_PLL_400_CTL_REG, 216 - __APP_PLL_400_LOGIC_SOFT_RESET); 217 - bfa_reg_write(rb + APP_PLL_400_CTL_REG, 218 - __APP_PLL_400_BYPASS | 219 - __APP_PLL_400_LOGIC_SOFT_RESET); 220 - bfa_os_udelay(2); 221 - bfa_reg_write(rb + APP_PLL_212_CTL_REG, 222 - __APP_PLL_212_LOGIC_SOFT_RESET); 223 - bfa_reg_write(rb + APP_PLL_400_CTL_REG, 224 - __APP_PLL_400_LOGIC_SOFT_RESET); 225 - bfa_reg_write(rb + APP_PLL_212_CTL_REG, 226 - pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); 227 - bfa_reg_write(rb + APP_PLL_400_CTL_REG, 228 - pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET); 229 - bfa_os_udelay(2000); 230 - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 231 - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 232 - bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk); 233 - bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk); 202 + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 203 + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 204 + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 205 + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 206 + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 207 + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 208 + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 209 + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 210 + writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); 211 + writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET, 212 + rb + APP_PLL_212_CTL_REG); 213 + writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); 214 + writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET, 215 + rb + APP_PLL_400_CTL_REG); 216 + udelay(2); 217 + writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); 218 + writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); 219 + writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET, 220 + rb + APP_PLL_212_CTL_REG); 221 + writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET, 222 + rb + APP_PLL_400_CTL_REG); 223 + udelay(2000); 224 + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 225 + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 226 + writel(pll_sclk, (rb + APP_PLL_212_CTL_REG)); 227 + writel(pll_fclk, (rb + APP_PLL_400_CTL_REG)); 234 228 235 229 return BFA_STATUS_OK; 236 230 }
+81 -86
drivers/scsi/bfa/bfa_ioc_ct.c
··· 34 34 35 35 struct bfa_ioc_hwif_s hwif_ct; 36 36 37 - /** 37 + /* 38 38 * Called from bfa_ioc_attach() to map asic specific calls. 39 39 */ 40 40 void ··· 52 52 ioc->ioc_hwif = &hwif_ct; 53 53 } 54 54 55 - /** 55 + /* 56 56 * Return true if firmware of current driver matches the running firmware. 57 57 */ 58 58 static bfa_boolean_t ··· 62 62 u32 usecnt; 63 63 struct bfi_ioc_image_hdr_s fwhdr; 64 64 65 - /** 65 + /* 66 66 * Firmware match check is relevant only for CNA. 67 67 */ 68 68 if (!ioc->cna) 69 69 return BFA_TRUE; 70 70 71 - /** 71 + /* 72 72 * If bios boot (flash based) -- do not increment usage count 73 73 */ 74 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < ··· 76 76 return BFA_TRUE; 77 77 78 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 79 - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 79 + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 80 80 81 - /** 81 + /* 82 82 * If usage count is 0, always return TRUE. 83 83 */ 84 84 if (usecnt == 0) { 85 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); 85 + writel(1, ioc->ioc_regs.ioc_usage_reg); 86 86 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 87 87 bfa_trc(ioc, usecnt); 88 88 return BFA_TRUE; 89 89 } 90 90 91 - ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 91 + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 92 92 bfa_trc(ioc, ioc_fwstate); 93 93 94 - /** 94 + /* 95 95 * Use count cannot be non-zero and chip in uninitialized state. 96 96 */ 97 97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 98 98 99 - /** 99 + /* 100 100 * Check if another driver with a different firmware is active 101 101 */ 102 102 bfa_ioc_fwver_get(ioc, &fwhdr); ··· 106 106 return BFA_FALSE; 107 107 } 108 108 109 - /** 109 + /* 110 110 * Same firmware version. Increment the reference count. 111 111 */ 112 112 usecnt++; 113 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 113 + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 114 114 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 115 115 bfa_trc(ioc, usecnt); 116 116 return BFA_TRUE; ··· 121 121 { 122 122 u32 usecnt; 123 123 124 - /** 124 + /* 125 125 * Firmware lock is relevant only for CNA. 126 126 */ 127 127 if (!ioc->cna) 128 128 return; 129 129 130 - /** 130 + /* 131 131 * If bios boot (flash based) -- do not decrement usage count 132 132 */ 133 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 134 134 BFA_IOC_FWIMG_MINSZ) 135 135 return; 136 136 137 - /** 137 + /* 138 138 * decrement usage count 139 139 */ 140 140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 141 - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 141 + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 142 142 bfa_assert(usecnt > 0); 143 143 144 144 usecnt--; 145 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 145 + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 146 146 bfa_trc(ioc, usecnt); 147 147 148 148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 149 149 } 150 150 151 - /** 151 + /* 152 152 * Notify other functions on HB failure. 153 153 */ 154 154 static void 155 155 bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) 156 156 { 157 157 if (ioc->cna) { 158 - bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); 158 + writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 159 159 /* Wait for halt to take effect */ 160 - bfa_reg_read(ioc->ioc_regs.ll_halt); 160 + readl(ioc->ioc_regs.ll_halt); 161 161 } else { 162 - bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); 163 - bfa_reg_read(ioc->ioc_regs.err_set); 162 + writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 163 + readl(ioc->ioc_regs.err_set); 164 164 } 165 165 } 166 166 167 - /** 167 + /* 168 168 * Host to LPU mailbox message addresses 169 169 */ 170 170 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { ··· 174 174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 175 175 }; 176 176 177 - /** 177 + /* 178 178 * Host <-> LPU mailbox command/status registers - port 0 179 179 */ 180 180 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { ··· 184 184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 185 185 }; 186 186 187 - /** 187 + /* 188 188 * Host <-> LPU mailbox command/status registers - port 1 189 189 */ 190 190 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { ··· 197 197 static void 198 198 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) 199 199 { 200 - bfa_os_addr_t rb; 200 + void __iomem *rb; 201 201 int pcifn = bfa_ioc_pcifn(ioc); 202 202 203 203 rb = bfa_ioc_bar0(ioc); ··· 236 236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 237 237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 238 238 239 - /** 239 + /* 240 240 * sram memory access 241 241 */ 242 242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ··· 248 248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 249 249 } 250 250 251 - /** 251 + /* 252 252 * Initialize IOC to port mapping. 253 253 */ 254 254 ··· 256 256 static void 257 257 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) 258 258 { 259 - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 259 + void __iomem *rb = ioc->pcidev.pci_bar_kva; 260 260 u32 r32; 261 261 262 - /** 262 + /* 263 263 * For catapult, base port id on personality register and IOC type 264 264 */ 265 - r32 = bfa_reg_read(rb + FNC_PERS_REG); 265 + r32 = readl(rb + FNC_PERS_REG); 266 266 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 267 267 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 268 268 ··· 270 270 bfa_trc(ioc, ioc->port_id); 271 271 } 272 272 273 - /** 273 + /* 274 274 * Set interrupt mode for a function: INTX or MSIX 275 275 */ 276 276 static void 277 277 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 278 278 { 279 - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 279 + void __iomem *rb = ioc->pcidev.pci_bar_kva; 280 280 u32 r32, mode; 281 281 282 - r32 = bfa_reg_read(rb + FNC_PERS_REG); 282 + r32 = readl(rb + FNC_PERS_REG); 283 283 bfa_trc(ioc, r32); 284 284 285 285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 286 286 __F0_INTX_STATUS; 287 287 288 - /** 288 + /* 289 289 * If already in desired mode, do not change anything 290 290 */ 291 291 if (!msix && mode) ··· 300 300 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 301 301 bfa_trc(ioc, r32); 302 302 303 - bfa_reg_write(rb + FNC_PERS_REG, r32); 303 + writel(r32, rb + FNC_PERS_REG); 304 304 } 305 305 306 - /** 306 + /* 307 307 * Cleanup hw semaphore and usecnt registers 308 308 */ 309 309 static void ··· 312 312 313 313 if (ioc->cna) { 314 314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 315 - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); 315 + writel(0, ioc->ioc_regs.ioc_usage_reg); 316 316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 317 317 } 318 318 ··· 321 321 * before we clear it. If it is not locked, writing 1 322 322 * will lock it instead of clearing it. 323 323 */ 324 - bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 324 + readl(ioc->ioc_regs.ioc_sem_reg); 325 325 bfa_ioc_hw_sem_release(ioc); 326 326 } 327 327 ··· 331 331 * Check the firmware state to know if pll_init has been completed already 332 332 */ 333 333 bfa_boolean_t 334 - bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb) 334 + bfa_ioc_ct_pll_init_complete(void __iomem *rb) 335 335 { 336 - if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || 337 - (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) 336 + if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || 337 + (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) 338 338 return BFA_TRUE; 339 339 340 340 return BFA_FALSE; 341 341 } 342 342 343 343 bfa_status_t 344 - bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) 344 + bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 345 345 { 346 346 u32 pll_sclk, pll_fclk, r32; 347 347 ··· 354 354 __APP_PLL_425_JITLMT0_1(3U) | 355 355 __APP_PLL_425_CNTLMT0_1(1U); 356 356 if (fcmode) { 357 - bfa_reg_write((rb + OP_MODE), 0); 358 - bfa_reg_write((rb + ETH_MAC_SER_REG), 359 - __APP_EMS_CMLCKSEL | 360 - __APP_EMS_REFCKBUFEN2 | 361 - __APP_EMS_CHANNEL_SEL); 357 + writel(0, (rb + OP_MODE)); 358 + writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | 359 + __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); 362 360 } else { 363 - bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); 364 - bfa_reg_write((rb + ETH_MAC_SER_REG), 365 - __APP_EMS_REFCKBUFEN1); 361 + writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); 362 + writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); 366 363 } 367 - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 368 - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 369 - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 370 - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 371 - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 372 - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 373 - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 374 - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 375 - bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | 376 - __APP_PLL_312_LOGIC_SOFT_RESET); 377 - bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | 378 - __APP_PLL_425_LOGIC_SOFT_RESET); 379 - bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | 380 - __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); 381 - bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | 382 - __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); 383 - bfa_reg_read(rb + HOSTFN0_INT_MSK); 384 - bfa_os_udelay(2000); 385 - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 386 - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 387 - bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | 388 - __APP_PLL_312_ENABLE); 389 - bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | 390 - __APP_PLL_425_ENABLE); 364 + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 365 + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 366 + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 367 + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 368 + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 369 + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 370 + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 371 + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 372 + writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET, 373 + rb + APP_PLL_312_CTL_REG); 374 + writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET, 375 + rb + APP_PLL_425_CTL_REG); 376 + writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE, 377 + rb + APP_PLL_312_CTL_REG); 378 + writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE, 379 + rb + APP_PLL_425_CTL_REG); 380 + readl(rb + HOSTFN0_INT_MSK); 381 + udelay(2000); 382 + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 383 + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 384 + writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG); 385 + writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG); 391 386 if (!fcmode) { 392 - bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P); 393 - bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P); 387 + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 388 + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 394 389 } 395 - r32 = bfa_reg_read((rb + PSS_CTL_REG)); 390 + r32 = readl((rb + PSS_CTL_REG)); 396 391 r32 &= ~__PSS_LMEM_RESET; 397 - bfa_reg_write((rb + PSS_CTL_REG), r32); 398 - bfa_os_udelay(1000); 392 + writel(r32, (rb + PSS_CTL_REG)); 393 + udelay(1000); 399 394 if (!fcmode) { 400 - bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0); 401 - bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0); 395 + writel(0, (rb + PMM_1T_RESET_REG_P0)); 396 + writel(0, (rb + PMM_1T_RESET_REG_P1)); 402 397 } 403 398 404 - bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 405 - bfa_os_udelay(1000); 406 - r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 407 - bfa_reg_write((rb + MBIST_CTL_REG), 0); 399 + writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); 400 + udelay(1000); 401 + r32 = readl((rb + MBIST_STAT_REG)); 402 + writel(0, (rb + MBIST_CTL_REG)); 408 403 return BFA_STATUS_OK; 409 404 }
+3 -3
drivers/scsi/bfa/bfa_modules.h
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * bfa_modules.h BFA modules 20 20 */ 21 21 ··· 52 52 }; 53 53 54 54 55 - /** 55 + /* 56 56 * Macro to define a new BFA module 57 57 */ 58 58 #define BFA_MODULE(__mod) \ ··· 80 80 81 81 #define BFA_CACHELINE_SZ (256) 82 82 83 - /** 83 + /* 84 84 * Structure used to interact between different BFA sub modules 85 85 * 86 86 * Each sub module needs to implement only the entry points relevant to it (and
+9 -73
drivers/scsi/bfa/bfa_os_inc.h
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 19 - * Contains declarations all OS Specific files needed for BFA layer 20 - */ 21 - 22 18 #ifndef __BFA_OS_INC_H__ 23 19 #define __BFA_OS_INC_H__ 24 20 ··· 40 44 #define __BIGENDIAN 41 45 #endif 42 46 43 - static inline u64 bfa_os_get_clock(void) 44 - { 45 - return jiffies; 46 - } 47 - 48 47 static inline u64 bfa_os_get_log_time(void) 49 48 { 50 49 u64 system_time = 0; ··· 54 63 #define bfa_io_lat_clock_res_div HZ 55 64 #define bfa_io_lat_clock_res_mul 1000 56 65 57 - #define BFA_ASSERT(p) do { \ 58 - if (!(p)) { \ 59 - printk(KERN_ERR "assert(%s) failed at %s:%d\n", \ 60 - #p, __FILE__, __LINE__); \ 61 - } \ 62 - } while (0) 63 - 64 66 #define BFA_LOG(level, bfad, mask, fmt, arg...) \ 65 67 do { \ 66 68 if (((mask) == 4) || (level[1] <= '4')) \ ··· 64 80 ((((_x) & 0xff) << 16) | \ 65 81 ((_x) & 0x00ff00) | \ 66 82 (((_x) & 0xff0000) >> 16)) 67 - 68 - #define bfa_swap_8b(_x) \ 69 - ((((_x) & 0xff00000000000000ull) >> 56) \ 70 - | (((_x) & 0x00ff000000000000ull) >> 40) \ 71 - | (((_x) & 0x0000ff0000000000ull) >> 24) \ 72 - | (((_x) & 0x000000ff00000000ull) >> 8) \ 73 - | (((_x) & 0x00000000ff000000ull) << 8) \ 74 - | (((_x) & 0x0000000000ff0000ull) << 24) \ 75 - | (((_x) & 0x000000000000ff00ull) << 40) \ 76 - | (((_x) & 0x00000000000000ffull) << 56)) 77 - 78 - #define bfa_os_swap32(_x) \ 79 - ((((_x) & 0xff) << 24) | \ 80 - (((_x) & 0x0000ff00) << 8) | \ 81 - (((_x) & 0x00ff0000) >> 8) | \ 82 - (((_x) & 0xff000000) >> 24)) 83 83 84 84 #define bfa_os_swap_sgaddr(_x) ((u64)( \ 85 85 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ ··· 76 108 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) 77 109 78 110 #ifndef __BIGENDIAN 79 - #define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ 80 - (((_x) & 0x00ff) << 8))) 81 - #define bfa_os_htonl(_x) bfa_os_swap32(_x) 82 - #define bfa_os_htonll(_x) bfa_swap_8b(_x) 83 - #define bfa_os_hton3b(_x) bfa_swap_3b(_x) 84 - #define bfa_os_wtole(_x) (_x) 111 + #define bfa_os_hton3b(_x) bfa_swap_3b(_x) 85 112 #define bfa_os_sgaddr(_x) (_x) 86 - 87 113 #else 88 - 89 - #define bfa_os_htons(_x) (_x) 90 - #define bfa_os_htonl(_x) (_x) 91 114 #define bfa_os_hton3b(_x) (_x) 92 - #define bfa_os_htonll(_x) (_x) 93 - #define bfa_os_wtole(_x) bfa_os_swap32(_x) 94 115 #define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x) 95 - 96 116 #endif 97 117 98 - #define bfa_os_ntohs(_x) bfa_os_htons(_x) 99 - #define bfa_os_ntohl(_x) bfa_os_htonl(_x) 100 - #define bfa_os_ntohll(_x) bfa_os_htonll(_x) 101 118 #define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x) 102 - 103 119 #define bfa_os_u32(__pa64) ((__pa64) >> 32) 104 120 105 - #define bfa_os_memset memset 106 - #define bfa_os_memcpy memcpy 107 - #define bfa_os_udelay udelay 108 - #define bfa_os_vsprintf vsprintf 109 - #define bfa_os_snprintf snprintf 110 - 111 - #define bfa_os_assign(__t, __s) __t = __s 112 - #define bfa_os_addr_t void __iomem * 113 - 114 - #define bfa_os_reg_read(_raddr) readl(_raddr) 115 - #define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr)) 116 - #define bfa_os_mem_read(_raddr, _off) \ 117 - bfa_os_swap32(readl(((_raddr) + (_off)))) 118 - #define bfa_os_mem_write(_raddr, _off, _val) \ 119 - writel(bfa_os_swap32((_val)), ((_raddr) + (_off))) 120 - 121 - #define BFA_TRC_TS(_trcm) \ 122 - ({ \ 123 - struct timeval tv; \ 124 - \ 125 - do_gettimeofday(&tv); \ 126 - (tv.tv_sec*1000000+tv.tv_usec); \ 127 - }) 121 + #define BFA_TRC_TS(_trcm) \ 122 + ({ \ 123 + struct timeval tv; \ 124 + \ 125 + do_gettimeofday(&tv); \ 126 + (tv.tv_sec*1000000+tv.tv_usec); \ 127 + }) 128 128 129 129 #define boolean_t int 130 130 131 - /** 131 + /* 132 132 * For current time stamp, OS API will fill-in 133 133 */ 134 134 struct bfa_timeval_s {
+20 -20
drivers/scsi/bfa/bfa_port.c
··· 37 37 t0 = dip[i]; 38 38 t1 = dip[i + 1]; 39 39 #ifdef __BIGENDIAN 40 - dip[i] = bfa_os_ntohl(t0); 41 - dip[i + 1] = bfa_os_ntohl(t1); 40 + dip[i] = be32_to_cpu(t0); 41 + dip[i + 1] = be32_to_cpu(t1); 42 42 #else 43 - dip[i] = bfa_os_ntohl(t1); 44 - dip[i + 1] = bfa_os_ntohl(t0); 43 + dip[i] = be32_to_cpu(t1); 44 + dip[i + 1] = be32_to_cpu(t0); 45 45 #endif 46 46 } 47 47 } 48 48 49 - /** 49 + /* 50 50 * bfa_port_enable_isr() 51 51 * 52 52 * ··· 63 63 port->endis_cbfn(port->endis_cbarg, status); 64 64 } 65 65 66 - /** 66 + /* 67 67 * bfa_port_disable_isr() 68 68 * 69 69 * ··· 80 80 port->endis_cbfn(port->endis_cbarg, status); 81 81 } 82 82 83 - /** 83 + /* 84 84 * bfa_port_get_stats_isr() 85 85 * 86 86 * ··· 112 112 } 113 113 } 114 114 115 - /** 115 + /* 116 116 * bfa_port_clear_stats_isr() 117 117 * 118 118 * ··· 129 129 port->stats_status = status; 130 130 port->stats_busy = BFA_FALSE; 131 131 132 - /** 132 + /* 133 133 * re-initialize time stamp for stats reset 134 134 */ 135 135 bfa_os_gettimeofday(&tv); ··· 141 141 } 142 142 } 143 143 144 - /** 144 + /* 145 145 * bfa_port_isr() 146 146 * 147 147 * ··· 189 189 } 190 190 } 191 191 192 - /** 192 + /* 193 193 * bfa_port_meminfo() 194 194 * 195 195 * ··· 203 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); 204 204 } 205 205 206 - /** 206 + /* 207 207 * bfa_port_mem_claim() 208 208 * 209 209 * ··· 220 220 port->stats_dma.pa = dma_pa; 221 221 } 222 222 223 - /** 223 + /* 224 224 * bfa_port_enable() 225 225 * 226 226 * Send the Port enable request to the f/w ··· 264 264 return BFA_STATUS_OK; 265 265 } 266 266 267 - /** 267 + /* 268 268 * bfa_port_disable() 269 269 * 270 270 * Send the Port disable request to the f/w ··· 308 308 return BFA_STATUS_OK; 309 309 } 310 310 311 - /** 311 + /* 312 312 * bfa_port_get_stats() 313 313 * 314 314 * Send the request to the f/w to fetch Port statistics. ··· 348 348 return BFA_STATUS_OK; 349 349 } 350 350 351 - /** 351 + /* 352 352 * bfa_port_clear_stats() 353 353 * 354 354 * ··· 385 385 return BFA_STATUS_OK; 386 386 } 387 387 388 - /** 388 + /* 389 389 * bfa_port_hbfail() 390 390 * 391 391 * ··· 415 415 } 416 416 } 417 417 418 - /** 418 + /* 419 419 * bfa_port_attach() 420 420 * 421 421 * ··· 449 449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 450 450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 451 451 452 - /** 452 + /* 453 453 * initialize time stamp for stats reset 454 454 */ 455 455 bfa_os_gettimeofday(&tv); ··· 458 458 bfa_trc(port, 0); 459 459 } 460 460 461 - /** 461 + /* 462 462 * bfa_port_detach() 463 463 * 464 464 *
+251 -253
drivers/scsi/bfa/bfa_svc.c
··· 29 29 BFA_MODULE(rport); 30 30 BFA_MODULE(uf); 31 31 32 - /** 32 + /* 33 33 * LPS related definitions 34 34 */ 35 35 #define BFA_LPS_MIN_LPORTS (1) ··· 41 41 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 42 42 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 43 43 44 - /** 44 + /* 45 45 * lps_pvt BFA LPS private functions 46 46 */ 47 47 ··· 55 55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ 56 56 }; 57 57 58 - /** 58 + /* 59 59 * FC PORT related definitions 60 60 */ 61 61 /* ··· 67 67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 68 68 69 69 70 - /** 70 + /* 71 71 * BFA port state machine events 72 72 */ 73 73 enum bfa_fcport_sm_event { ··· 82 82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 83 83 }; 84 84 85 - /** 85 + /* 86 86 * BFA port link notification state machine events 87 87 */ 88 88 ··· 92 92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 93 93 }; 94 94 95 - /** 95 + /* 96 96 * RPORT related definitions 97 97 */ 98 98 #define bfa_rport_offline_cb(__rp) do { \ ··· 126 126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ 127 127 }; 128 128 129 - /** 129 + /* 130 130 * forward declarations FCXP related functions 131 131 */ 132 132 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); ··· 138 138 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, 139 139 struct bfi_fcxp_send_req_s *send_req); 140 140 141 - /** 141 + /* 142 142 * forward declarations for LPS functions 143 143 */ 144 144 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, ··· 163 163 static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 164 164 static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 165 165 166 - /** 166 + /* 167 167 * forward declaration for LPS state machine 168 168 */ 169 169 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); ··· 175 175 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 176 176 event); 177 177 178 - /** 178 + /* 179 179 * forward declaration for FC Port functions 180 180 */ 181 181 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); ··· 193 193 static void bfa_fcport_stats_clr_timeout(void *cbarg); 194 194 static void bfa_trunk_iocdisable(struct bfa_s *bfa); 195 195 196 - /** 196 + /* 197 197 * forward declaration for FC PORT state machine 198 198 */ 199 199 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, ··· 252 252 }; 253 253 254 254 255 - /** 255 + /* 256 256 * forward declaration for RPORT related functions 257 257 */ 258 258 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); ··· 265 265 static void __bfa_cb_rport_offline(void *cbarg, 266 266 bfa_boolean_t complete); 267 267 268 - /** 268 + /* 269 269 * forward declaration for RPORT state machine 270 270 */ 271 271 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, ··· 295 295 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, 296 296 enum bfa_rport_event event); 297 297 298 - /** 298 + /* 299 299 * PLOG related definitions 300 300 */ 301 301 static int ··· 330 330 331 331 pl_recp = &(plog->plog_recs[tail]); 332 332 333 - bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); 333 + memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); 334 334 335 335 pl_recp->tv = bfa_os_get_log_time(); 336 336 BFA_PL_LOG_REC_INCR(plog->tail); ··· 342 342 void 343 343 bfa_plog_init(struct bfa_plog_s *plog) 344 344 { 345 - bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s)); 345 + memset((char *)plog, 0, sizeof(struct bfa_plog_s)); 346 346 347 - bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); 347 + memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); 348 348 plog->head = plog->tail = 0; 349 349 plog->plog_enabled = 1; 350 350 } ··· 357 357 struct bfa_plog_rec_s lp; 358 358 359 359 if (plog->plog_enabled) { 360 - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 360 + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 361 361 lp.mid = mid; 362 362 lp.eid = event; 363 363 lp.log_type = BFA_PL_LOG_TYPE_STRING; ··· 381 381 num_ints = BFA_PL_INT_LOG_SZ; 382 382 383 383 if (plog->plog_enabled) { 384 - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 384 + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 385 385 lp.mid = mid; 386 386 lp.eid = event; 387 387 lp.log_type = BFA_PL_LOG_TYPE_INT; 388 388 lp.misc = misc; 389 389 390 390 for (i = 0; i < num_ints; i++) 391 - bfa_os_assign(lp.log_entry.int_log[i], 392 - intarr[i]); 391 + lp.log_entry.int_log[i] = intarr[i]; 393 392 394 393 lp.log_num_ints = (u8) num_ints; 395 394 ··· 406 407 u32 ints[BFA_PL_INT_LOG_SZ]; 407 408 408 409 if (plog->plog_enabled) { 409 - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 410 + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 410 411 411 412 ints[0] = tmp_int[0]; 412 413 ints[1] = tmp_int[1]; ··· 426 427 u32 ints[BFA_PL_INT_LOG_SZ]; 427 428 428 429 if (plog->plog_enabled) { 429 - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 430 + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 430 431 431 432 ints[0] = tmp_int[0]; 432 433 ints[1] = tmp_int[1]; ··· 461 462 return (bfa_boolean_t)plog->plog_enabled; 462 463 } 463 464 464 - /** 465 + /* 465 466 * fcxp_pvt BFA FCXP private functions 466 467 */ 467 468 ··· 484 485 mod->req_pld_list_pa = dm_pa; 485 486 dm_kva += buf_pool_sz; 486 487 dm_pa += buf_pool_sz; 487 - bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz); 488 + memset(mod->req_pld_list_kva, 0, buf_pool_sz); 488 489 489 490 /* 490 491 * Initialize the fcxp rsp payload list ··· 494 495 mod->rsp_pld_list_pa = dm_pa; 495 496 dm_kva += buf_pool_sz; 496 497 dm_pa += buf_pool_sz; 497 - bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); 498 + memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); 498 499 499 500 bfa_meminfo_dma_virt(mi) = dm_kva; 500 501 bfa_meminfo_dma_phys(mi) = dm_pa; ··· 507 508 struct bfa_fcxp_s *fcxp; 508 509 509 510 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); 510 - bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); 511 + memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); 511 512 512 513 INIT_LIST_HEAD(&mod->fcxp_free_q); 513 514 INIT_LIST_HEAD(&mod->fcxp_active_q); ··· 558 559 { 559 560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 560 561 561 - bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); 562 + memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); 562 563 mod->bfa = bfa; 563 564 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 564 565 565 - /** 566 + /* 566 567 * Initialize FCXP request and response payload sizes. 567 568 */ 568 569 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; ··· 740 741 { 741 742 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 742 743 struct bfa_fcxp_s *fcxp; 743 - u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag); 744 + u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag); 744 745 745 746 bfa_trc(bfa, fcxp_tag); 746 747 747 - fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len); 748 + fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); 748 749 749 - /** 750 + /* 750 751 * @todo f/w should not set residue to non-0 when everything 751 752 * is received. 752 753 */ 753 754 if (fcxp_rsp->req_status == BFA_STATUS_OK) 754 755 fcxp_rsp->residue_len = 0; 755 756 else 756 - fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len); 757 + fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len); 757 758 758 759 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); 759 760 ··· 855 856 } 856 857 } 857 858 858 - /** 859 + /* 859 860 * Handler to resume sending fcxp when space in available in cpe queue. 860 861 */ 861 862 static void ··· 870 871 bfa_fcxp_queue(fcxp, send_req); 871 872 } 872 873 873 - /** 874 + /* 874 875 * Queue fcxp send request to foimrware. 875 876 */ 876 877 static void ··· 884 885 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, 885 886 bfa_lpuid(bfa)); 886 887 887 - send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag); 888 + send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); 888 889 if (rport) { 889 890 send_req->rport_fw_hndl = rport->fw_handle; 890 - send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz); 891 + send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz); 891 892 if (send_req->max_frmsz == 0) 892 - send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); 893 + send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); 893 894 } else { 894 895 send_req->rport_fw_hndl = 0; 895 - send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); 896 + send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); 896 897 } 897 898 898 - send_req->vf_id = bfa_os_htons(reqi->vf_id); 899 + send_req->vf_id = cpu_to_be16(reqi->vf_id); 899 900 send_req->lp_tag = reqi->lp_tag; 900 901 send_req->class = reqi->class; 901 902 send_req->rsp_timeout = rspi->rsp_timeout; 902 903 send_req->cts = reqi->cts; 903 904 send_req->fchs = reqi->fchs; 904 905 905 - send_req->req_len = bfa_os_htonl(reqi->req_tot_len); 906 - send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen); 906 + send_req->req_len = cpu_to_be32(reqi->req_tot_len); 907 + send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen); 907 908 908 909 /* 909 910 * setup req sgles ··· 954 955 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 955 956 } 956 957 957 - /** 958 + /* 958 959 * hal_fcxp_api BFA FCXP API 959 960 */ 960 961 961 - /** 962 + /* 962 963 * Allocate an FCXP instance to send a response or to send a request 963 964 * that has a response. Request/response buffers are allocated by caller. 964 965 * ··· 1004 1005 return fcxp; 1005 1006 } 1006 1007 1007 - /** 1008 + /* 1008 1009 * Get the internal request buffer pointer 1009 1010 * 1010 1011 * @param[in] fcxp BFA fcxp pointer ··· 1031 1032 return mod->req_pld_sz; 1032 1033 } 1033 1034 1034 - /** 1035 + /* 1035 1036 * Get the internal response buffer pointer 1036 1037 * 1037 1038 * @param[in] fcxp BFA fcxp pointer ··· 1051 1052 return rspbuf; 1052 1053 } 1053 1054 1054 - /** 1055 + /* 1055 1056 * Free the BFA FCXP 1056 1057 * 1057 1058 * @param[in] fcxp BFA fcxp pointer ··· 1068 1069 bfa_fcxp_put(fcxp); 1069 1070 } 1070 1071 1071 - /** 1072 + /* 1072 1073 * Send a FCXP request 1073 1074 * 1074 1075 * @param[in] fcxp BFA fcxp pointer ··· 1102 1103 1103 1104 bfa_trc(bfa, fcxp->fcxp_tag); 1104 1105 1105 - /** 1106 + /* 1106 1107 * setup request/response info 1107 1108 */ 1108 1109 reqi->bfa_rport = rport; ··· 1117 1118 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; 1118 1119 fcxp->send_cbarg = cbarg; 1119 1120 1120 - /** 1121 + /* 1121 1122 * If no room in CPE queue, wait for space in request queue 1122 1123 */ 1123 1124 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); ··· 1131 1132 bfa_fcxp_queue(fcxp, send_req); 1132 1133 } 1133 1134 1134 - /** 1135 + /* 1135 1136 * Abort a BFA FCXP 1136 1137 * 1137 1138 * @param[in] fcxp BFA fcxp pointer ··· 1185 1186 void 1186 1187 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) 1187 1188 { 1188 - /** 1189 + /* 1189 1190 * If waiting for room in request queue, cancel reqq wait 1190 1191 * and free fcxp. 1191 1192 */ ··· 1201 1202 1202 1203 1203 1204 1204 - /** 1205 + /* 1205 1206 * hal_fcxp_public BFA FCXP public functions 1206 1207 */ 1207 1208 ··· 1228 1229 } 1229 1230 1230 1231 1231 - /** 1232 + /* 1232 1233 * BFA LPS state machine functions 1233 1234 */ 1234 1235 1235 - /** 1236 + /* 1236 1237 * Init state -- no login 1237 1238 */ 1238 1239 static void ··· 1284 1285 } 1285 1286 } 1286 1287 1287 - /** 1288 + /* 1288 1289 * login is in progress -- awaiting response from firmware 1289 1290 */ 1290 1291 static void ··· 1326 1327 } 1327 1328 } 1328 1329 1329 - /** 1330 + /* 1330 1331 * login pending - awaiting space in request queue 1331 1332 */ 1332 1333 static void ··· 1358 1359 } 1359 1360 } 1360 1361 1361 - /** 1362 + /* 1362 1363 * login complete 1363 1364 */ 1364 1365 static void ··· 1399 1400 } 1400 1401 } 1401 1402 1402 - /** 1403 + /* 1403 1404 * logout in progress - awaiting firmware response 1404 1405 */ 1405 1406 static void ··· 1423 1424 } 1424 1425 } 1425 1426 1426 - /** 1427 + /* 1427 1428 * logout pending -- awaiting space in request queue 1428 1429 */ 1429 1430 static void ··· 1450 1451 1451 1452 1452 1453 1453 - /** 1454 + /* 1454 1455 * lps_pvt BFA LPS private functions 1455 1456 */ 1456 1457 1457 - /** 1458 + /* 1458 1459 * return memory requirement 1459 1460 */ 1460 1461 static void ··· 1467 1468 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1468 1469 } 1469 1470 1470 - /** 1471 + /* 1471 1472 * bfa module attach at initialization time 1472 1473 */ 1473 1474 static void ··· 1478 1479 struct bfa_lps_s *lps; 1479 1480 int i; 1480 1481 1481 - bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s)); 1482 + memset(mod, 0, sizeof(struct bfa_lps_mod_s)); 1482 1483 mod->num_lps = BFA_LPS_MAX_LPORTS; 1483 1484 if (cfg->drvcfg.min_cfg) 1484 1485 mod->num_lps = BFA_LPS_MIN_LPORTS; ··· 1515 1516 { 1516 1517 } 1517 1518 1518 - /** 1519 + /* 1519 1520 * IOC in disabled state -- consider all lps offline 1520 1521 */ 1521 1522 static void ··· 1531 1532 } 1532 1533 } 1533 1534 1534 - /** 1535 + /* 1535 1536 * Firmware login response 1536 1537 */ 1537 1538 static void ··· 1549 1550 lps->fport = rsp->f_port; 1550 1551 lps->npiv_en = rsp->npiv_en; 1551 1552 lps->lp_pid = rsp->lp_pid; 1552 - lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit); 1553 + lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); 1553 1554 lps->pr_pwwn = rsp->port_name; 1554 1555 lps->pr_nwwn = rsp->node_name; 1555 1556 lps->auth_req = rsp->auth_req; ··· 1578 1579 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1579 1580 } 1580 1581 1581 - /** 1582 + /* 1582 1583 * Firmware logout response 1583 1584 */ 1584 1585 static void ··· 1593 1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1594 1595 } 1595 1596 1596 - /** 1597 + /* 1597 1598 * Firmware received a Clear virtual link request (for FCoE) 1598 1599 */ 1599 1600 static void ··· 1607 1608 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1608 1609 } 1609 1610 1610 - /** 1611 + /* 1611 1612 * Space is available in request queue, resume queueing request to firmware. 1612 1613 */ 1613 1614 static void ··· 1618 1619 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); 1619 1620 } 1620 1621 1621 - /** 1622 + /* 1622 1623 * lps is freed -- triggered by vport delete 1623 1624 */ 1624 1625 static void ··· 1631 1632 list_add_tail(&lps->qe, &mod->lps_free_q); 1632 1633 } 1633 1634 1634 - /** 1635 + /* 1635 1636 * send login request to firmware 1636 1637 */ 1637 1638 static void ··· 1647 1648 1648 1649 m->lp_tag = lps->lp_tag; 1649 1650 m->alpa = lps->alpa; 1650 - m->pdu_size = bfa_os_htons(lps->pdusz); 1651 + m->pdu_size = cpu_to_be16(lps->pdusz); 1651 1652 m->pwwn = lps->pwwn; 1652 1653 m->nwwn = lps->nwwn; 1653 1654 m->fdisc = lps->fdisc; ··· 1656 1657 bfa_reqq_produce(lps->bfa, lps->reqq); 1657 1658 } 1658 1659 1659 - /** 1660 + /* 1660 1661 * send logout request to firmware 1661 1662 */ 1662 1663 static void ··· 1675 1676 bfa_reqq_produce(lps->bfa, lps->reqq); 1676 1677 } 1677 1678 1678 - /** 1679 + /* 1679 1680 * Indirect login completion handler for non-fcs 1680 1681 */ 1681 1682 static void ··· 1692 1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1693 1694 } 1694 1695 1695 - /** 1696 + /* 1696 1697 * Login completion handler -- direct call for fcs, queue for others 1697 1698 */ 1698 1699 static void ··· 1710 1711 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1711 1712 } 1712 1713 1713 - /** 1714 + /* 1714 1715 * Indirect logout completion handler for non-fcs 1715 1716 */ 1716 1717 static void ··· 1725 1726 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1726 1727 } 1727 1728 1728 - /** 1729 + /* 1729 1730 * Logout completion handler -- direct call for fcs, queue for others 1730 1731 */ 1731 1732 static void ··· 1740 1741 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1741 1742 } 1742 1743 1743 - /** 1744 + /* 1744 1745 * Clear virtual link completion handler for non-fcs 1745 1746 */ 1746 1747 static void ··· 1756 1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1757 1758 } 1758 1759 1759 - /** 1760 + /* 1760 1761 * Received Clear virtual link event --direct call for fcs, 1761 1762 * queue for others 1762 1763 */ ··· 1776 1777 1777 1778 1778 1779 1779 - /** 1780 + /* 1780 1781 * lps_public BFA LPS public functions 1781 1782 */ 1782 1783 ··· 1789 1790 return BFA_LPS_MAX_VPORTS_SUPP_CB; 1790 1791 } 1791 1792 1792 - /** 1793 + /* 1793 1794 * Allocate a lport srvice tag. 1794 1795 */ 1795 1796 struct bfa_lps_s * ··· 1809 1810 return lps; 1810 1811 } 1811 1812 1812 - /** 1813 + /* 1813 1814 * Free lport service tag. This can be called anytime after an alloc. 1814 1815 * No need to wait for any pending login/logout completions. 1815 1816 */ ··· 1819 1820 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); 1820 1821 } 1821 1822 1822 - /** 1823 + /* 1823 1824 * Initiate a lport login. 1824 1825 */ 1825 1826 void ··· 1836 1837 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1837 1838 } 1838 1839 1839 - /** 1840 + /* 1840 1841 * Initiate a lport fdisc login. 1841 1842 */ 1842 1843 void ··· 1853 1854 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1854 1855 } 1855 1856 1856 - /** 1857 + /* 1857 1858 * Initiate a lport logout (flogi). 1858 1859 */ 1859 1860 void ··· 1862 1863 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1863 1864 } 1864 1865 1865 - /** 1866 + /* 1866 1867 * Initiate a lport FDSIC logout. 1867 1868 */ 1868 1869 void ··· 1871 1872 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1872 1873 } 1873 1874 1874 - /** 1875 + /* 1875 1876 * Discard a pending login request -- should be called only for 1876 1877 * link down handling. 1877 1878 */ ··· 1881 1882 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1882 1883 } 1883 1884 1884 - /** 1885 + /* 1885 1886 * Return lport services tag 1886 1887 */ 1887 1888 u8 ··· 1890 1891 return lps->lp_tag; 1891 1892 } 1892 1893 1893 - /** 1894 + /* 1894 1895 * Return lport services tag given the pid 1895 1896 */ 1896 1897 u8 ··· 1909 1910 return 0; 1910 1911 } 1911 1912 1912 - /** 1913 + /* 1913 1914 * return if fabric login indicates support for NPIV 1914 1915 */ 1915 1916 bfa_boolean_t ··· 1918 1919 return lps->npiv_en; 1919 1920 } 1920 1921 1921 - /** 1922 + /* 1922 1923 * Return TRUE if attached to F-Port, else return FALSE 1923 1924 */ 1924 1925 bfa_boolean_t ··· 1927 1928 return lps->fport; 1928 1929 } 1929 1930 1930 - /** 1931 + /* 1931 1932 * Return TRUE if attached to a Brocade Fabric 1932 1933 */ 1933 1934 bfa_boolean_t ··· 1935 1936 { 1936 1937 return lps->brcd_switch; 1937 1938 } 1938 - /** 1939 + /* 1939 1940 * return TRUE if authentication is required 1940 1941 */ 1941 1942 bfa_boolean_t ··· 1950 1951 return lps->ext_status; 1951 1952 } 1952 1953 1953 - /** 1954 + /* 1954 1955 * return port id assigned to the lport 1955 1956 */ 1956 1957 u32 ··· 1959 1960 return lps->lp_pid; 1960 1961 } 1961 1962 1962 - /** 1963 + /* 1963 1964 * return port id assigned to the base lport 1964 1965 */ 1965 1966 u32 ··· 1970 1971 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; 1971 1972 } 1972 1973 1973 - /** 1974 + /* 1974 1975 * Return bb_credit assigned in FLOGI response 1975 1976 */ 1976 1977 u16 ··· 1979 1980 return lps->pr_bbcred; 1980 1981 } 1981 1982 1982 - /** 1983 + /* 1983 1984 * Return peer port name 1984 1985 */ 1985 1986 wwn_t ··· 1988 1989 return lps->pr_pwwn; 1989 1990 } 1990 1991 1991 - /** 1992 + /* 1992 1993 * Return peer node name 1993 1994 */ 1994 1995 wwn_t ··· 1997 1998 return lps->pr_nwwn; 1998 1999 } 1999 2000 2000 - /** 2001 + /* 2001 2002 * return reason code if login request is rejected 2002 2003 */ 2003 2004 u8 ··· 2006 2007 return lps->lsrjt_rsn; 2007 2008 } 2008 2009 2009 - /** 2010 + /* 2010 2011 * return explanation code if login request is rejected 2011 2012 */ 2012 2013 u8 ··· 2015 2016 return lps->lsrjt_expl; 2016 2017 } 2017 2018 2018 - /** 2019 + /* 2019 2020 * Return fpma/spma MAC for lport 2020 2021 */ 2021 2022 mac_t ··· 2024 2025 return lps->lp_mac; 2025 2026 } 2026 2027 2027 - /** 2028 + /* 2028 2029 * LPS firmware message class handler. 2029 2030 */ 2030 2031 void ··· 2054 2055 } 2055 2056 } 2056 2057 2057 - /** 2058 + /* 2058 2059 * FC PORT state machine functions 2059 2060 */ 2060 2061 static void ··· 2065 2066 2066 2067 switch (event) { 2067 2068 case BFA_FCPORT_SM_START: 2068 - /** 2069 + /* 2069 2070 * Start event after IOC is configured and BFA is started. 2070 2071 */ 2071 2072 if (bfa_fcport_send_enable(fcport)) { ··· 2079 2080 break; 2080 2081 2081 2082 case BFA_FCPORT_SM_ENABLE: 2082 - /** 2083 + /* 2083 2084 * Port is persistently configured to be in enabled state. Do 2084 2085 * not change state. Port enabling is done when START event is 2085 2086 * received. ··· 2087 2088 break; 2088 2089 2089 2090 case BFA_FCPORT_SM_DISABLE: 2090 - /** 2091 + /* 2091 2092 * If a port is persistently configured to be disabled, the 2092 2093 * first event will a port disable request. 2093 2094 */ ··· 2123 2124 break; 2124 2125 2125 2126 case BFA_FCPORT_SM_ENABLE: 2126 - /** 2127 + /* 2127 2128 * Already enable is in progress. 2128 2129 */ 2129 2130 break; 2130 2131 2131 2132 case BFA_FCPORT_SM_DISABLE: 2132 - /** 2133 + /* 2133 2134 * Just send disable request to firmware when room becomes 2134 2135 * available in request queue. 2135 2136 */ ··· 2144 2145 2145 2146 case BFA_FCPORT_SM_LINKUP: 2146 2147 case BFA_FCPORT_SM_LINKDOWN: 2147 - /** 2148 + /* 2148 2149 * Possible to get link events when doing back-to-back 2149 2150 * enable/disables. 2150 2151 */ ··· 2183 2184 break; 2184 2185 2185 2186 case BFA_FCPORT_SM_ENABLE: 2186 - /** 2187 + /* 2187 2188 * Already being enabled. 2188 2189 */ 2189 2190 break; ··· 2256 2257 break; 2257 2258 2258 2259 case BFA_FCPORT_SM_LINKDOWN: 2259 - /** 2260 + /* 2260 2261 * Possible to get link down event. 2261 2262 */ 2262 2263 break; 2263 2264 2264 2265 case BFA_FCPORT_SM_ENABLE: 2265 - /** 2266 + /* 2266 2267 * Already enabled. 2267 2268 */ 2268 2269 break; ··· 2305 2306 2306 2307 switch (event) { 2307 2308 case BFA_FCPORT_SM_ENABLE: 2308 - /** 2309 + /* 2309 2310 * Already enabled. 2310 2311 */ 2311 2312 break; ··· 2398 2399 break; 2399 2400 2400 2401 case BFA_FCPORT_SM_DISABLE: 2401 - /** 2402 + /* 2402 2403 * Already being disabled. 2403 2404 */ 2404 2405 break; 2405 2406 2406 2407 case BFA_FCPORT_SM_LINKUP: 2407 2408 case BFA_FCPORT_SM_LINKDOWN: 2408 - /** 2409 + /* 2409 2410 * Possible to get link events when doing back-to-back 2410 2411 * enable/disables. 2411 2412 */ ··· 2452 2453 2453 2454 case BFA_FCPORT_SM_LINKUP: 2454 2455 case BFA_FCPORT_SM_LINKDOWN: 2455 - /** 2456 + /* 2456 2457 * Possible to get link events when doing back-to-back 2457 2458 * enable/disables. 2458 2459 */ ··· 2482 2483 break; 2483 2484 2484 2485 case BFA_FCPORT_SM_DISABLE: 2485 - /** 2486 + /* 2486 2487 * Already being disabled. 2487 2488 */ 2488 2489 break; ··· 2507 2508 2508 2509 case BFA_FCPORT_SM_LINKUP: 2509 2510 case BFA_FCPORT_SM_LINKDOWN: 2510 - /** 2511 + /* 2511 2512 * Possible to get link events when doing back-to-back 2512 2513 * enable/disables. 2513 2514 */ ··· 2532 2533 2533 2534 switch (event) { 2534 2535 case BFA_FCPORT_SM_START: 2535 - /** 2536 + /* 2536 2537 * Ignore start event for a port that is disabled. 2537 2538 */ 2538 2539 break; ··· 2556 2557 break; 2557 2558 2558 2559 case BFA_FCPORT_SM_DISABLE: 2559 - /** 2560 + /* 2560 2561 * Already disabled. 2561 2562 */ 2562 2563 break; ··· 2586 2587 break; 2587 2588 2588 2589 default: 2589 - /** 2590 + /* 2590 2591 * Ignore all other events. 2591 2592 */ 2592 2593 ; 2593 2594 } 2594 2595 } 2595 2596 2596 - /** 2597 + /* 2597 2598 * Port is enabled. IOC is down/failed. 2598 2599 */ 2599 2600 static void ··· 2612 2613 break; 2613 2614 2614 2615 default: 2615 - /** 2616 + /* 2616 2617 * Ignore all events. 2617 2618 */ 2618 2619 ; 2619 2620 } 2620 2621 } 2621 2622 2622 - /** 2623 + /* 2623 2624 * Port is disabled. IOC is down/failed. 2624 2625 */ 2625 2626 static void ··· 2638 2639 break; 2639 2640 2640 2641 default: 2641 - /** 2642 + /* 2642 2643 * Ignore all events. 2643 2644 */ 2644 2645 ; 2645 2646 } 2646 2647 } 2647 2648 2648 - /** 2649 + /* 2649 2650 * Link state is down 2650 2651 */ 2651 2652 static void ··· 2665 2666 } 2666 2667 } 2667 2668 2668 - /** 2669 + /* 2669 2670 * Link state is waiting for down notification 2670 2671 */ 2671 2672 static void ··· 2688 2689 } 2689 2690 } 2690 2691 2691 - /** 2692 + /* 2692 2693 * Link state is waiting for down notification and there is a pending up 2693 2694 */ 2694 2695 static void ··· 2712 2713 } 2713 2714 } 2714 2715 2715 - /** 2716 + /* 2716 2717 * Link state is up 2717 2718 */ 2718 2719 static void ··· 2732 2733 } 2733 2734 } 2734 2735 2735 - /** 2736 + /* 2736 2737 * Link state is waiting for up notification 2737 2738 */ 2738 2739 static void ··· 2755 2756 } 2756 2757 } 2757 2758 2758 - /** 2759 + /* 2759 2760 * Link state is waiting for up notification and there is a pending down 2760 2761 */ 2761 2762 static void ··· 2779 2780 } 2780 2781 } 2781 2782 2782 - /** 2783 + /* 2783 2784 * Link state is waiting for up notification and there are pending down and up 2784 2785 */ 2785 2786 static void ··· 2805 2806 2806 2807 2807 2808 2808 - /** 2809 + /* 2809 2810 * hal_port_private 2810 2811 */ 2811 2812 ··· 2820 2821 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2821 2822 } 2822 2823 2823 - /** 2824 + /* 2824 2825 * Send SCN notification to upper layers. 2825 2826 * trunk - false if caller is fcport to ignore fcport event in trunked mode 2826 2827 */ ··· 2896 2897 bfa_meminfo_dma_phys(meminfo) = dm_pa; 2897 2898 } 2898 2899 2899 - /** 2900 + /* 2900 2901 * Memory initialization. 2901 2902 */ 2902 2903 static void ··· 2908 2909 struct bfa_fcport_ln_s *ln = &fcport->ln; 2909 2910 struct bfa_timeval_s tv; 2910 2911 2911 - bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); 2912 + memset(fcport, 0, sizeof(struct bfa_fcport_s)); 2912 2913 fcport->bfa = bfa; 2913 2914 ln->fcport = fcport; 2914 2915 ··· 2917 2918 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2918 2919 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2919 2920 2920 - /** 2921 + /* 2921 2922 * initialize time stamp for stats reset 2922 2923 */ 2923 2924 bfa_os_gettimeofday(&tv); 2924 2925 fcport->stats_reset_time = tv.tv_sec; 2925 2926 2926 - /** 2927 + /* 2927 2928 * initialize and set default configuration 2928 2929 */ 2929 2930 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; ··· 2941 2942 { 2942 2943 } 2943 2944 2944 - /** 2945 + /* 2945 2946 * Called when IOC is ready. 2946 2947 */ 2947 2948 static void ··· 2950 2951 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 2951 2952 } 2952 2953 2953 - /** 2954 + /* 2954 2955 * Called before IOC is stopped. 2955 2956 */ 2956 2957 static void ··· 2960 2961 bfa_trunk_iocdisable(bfa); 2961 2962 } 2962 2963 2963 - /** 2964 + /* 2964 2965 * Called when IOC failure is detected. 2965 2966 */ 2966 2967 static void ··· 2985 2986 fcport->myalpa = 0; 2986 2987 2987 2988 /* QoS Details */ 2988 - bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); 2989 - bfa_os_assign(fcport->qos_vc_attr, 2990 - pevent->link_state.vc_fcf.qos_vc_attr); 2989 + fcport->qos_attr = pevent->link_state.qos_attr; 2990 + fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 2991 2991 2992 - /** 2992 + /* 2993 2993 * update trunk state if applicable 2994 2994 */ 2995 2995 if (!fcport->cfg.trunked) 2996 2996 trunk->attr.state = BFA_TRUNK_DISABLED; 2997 2997 2998 2998 /* update FCoE specific */ 2999 - fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan); 2999 + fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); 3000 3000 3001 3001 bfa_trc(fcport->bfa, fcport->speed); 3002 3002 bfa_trc(fcport->bfa, fcport->topology); ··· 3008 3010 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 3009 3011 } 3010 3012 3011 - /** 3013 + /* 3012 3014 * Send port enable message to firmware. 3013 3015 */ 3014 3016 static bfa_boolean_t ··· 3016 3018 { 3017 3019 struct bfi_fcport_enable_req_s *m; 3018 3020 3019 - /** 3021 + /* 3020 3022 * Increment message tag before queue check, so that responses to old 3021 3023 * requests are discarded. 3022 3024 */ 3023 3025 fcport->msgtag++; 3024 3026 3025 - /** 3027 + /* 3026 3028 * check for room in queue to send request now 3027 3029 */ 3028 3030 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3038 3040 m->pwwn = fcport->pwwn; 3039 3041 m->port_cfg = fcport->cfg; 3040 3042 m->msgtag = fcport->msgtag; 3041 - m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); 3043 + m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); 3042 3044 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); 3043 3045 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 3044 3046 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 3045 3047 3046 - /** 3048 + /* 3047 3049 * queue I/O message to firmware 3048 3050 */ 3049 3051 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3050 3052 return BFA_TRUE; 3051 3053 } 3052 3054 3053 - /** 3055 + /* 3054 3056 * Send port disable message to firmware. 3055 3057 */ 3056 3058 static bfa_boolean_t ··· 3058 3060 { 3059 3061 struct bfi_fcport_req_s *m; 3060 3062 3061 - /** 3063 + /* 3062 3064 * Increment message tag before queue check, so that responses to old 3063 3065 * requests are discarded. 3064 3066 */ 3065 3067 fcport->msgtag++; 3066 3068 3067 - /** 3069 + /* 3068 3070 * check for room in queue to send request now 3069 3071 */ 3070 3072 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3078 3080 bfa_lpuid(fcport->bfa)); 3079 3081 m->msgtag = fcport->msgtag; 3080 3082 3081 - /** 3083 + /* 3082 3084 * queue I/O message to firmware 3083 3085 */ 3084 3086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3103 3105 struct bfa_fcport_s *fcport = port_cbarg; 3104 3106 struct bfi_fcport_set_svc_params_req_s *m; 3105 3107 3106 - /** 3108 + /* 3107 3109 * check for room in queue to send request now 3108 3110 */ 3109 3111 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); ··· 3114 3116 3115 3117 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, 3116 3118 bfa_lpuid(fcport->bfa)); 3117 - m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit); 3119 + m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); 3118 3120 3119 - /** 3121 + /* 3120 3122 * queue I/O message to firmware 3121 3123 */ 3122 3124 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3132 3134 3133 3135 /* Now swap the 32 bit fields */ 3134 3136 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) 3135 - dip[i] = bfa_os_ntohl(sip[i]); 3137 + dip[i] = be32_to_cpu(sip[i]); 3136 3138 } 3137 3139 3138 3140 static void ··· 3146 3148 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); 3147 3149 i = i + 2) { 3148 3150 #ifdef __BIGENDIAN 3149 - dip[i] = bfa_os_ntohl(sip[i]); 3150 - dip[i + 1] = bfa_os_ntohl(sip[i + 1]); 3151 + dip[i] = be32_to_cpu(sip[i]); 3152 + dip[i + 1] = be32_to_cpu(sip[i + 1]); 3151 3153 #else 3152 - dip[i] = bfa_os_ntohl(sip[i + 1]); 3153 - dip[i + 1] = bfa_os_ntohl(sip[i]); 3154 + dip[i] = be32_to_cpu(sip[i + 1]); 3155 + dip[i + 1] = be32_to_cpu(sip[i]); 3154 3156 #endif 3155 3157 } 3156 3158 } ··· 3221 3223 } 3222 3224 fcport->stats_qfull = BFA_FALSE; 3223 3225 3224 - bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3226 + memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3225 3227 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, 3226 3228 bfa_lpuid(fcport->bfa)); 3227 3229 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); ··· 3235 3237 if (complete) { 3236 3238 struct bfa_timeval_s tv; 3237 3239 3238 - /** 3240 + /* 3239 3241 * re-initialize time stamp for stats reset 3240 3242 */ 3241 3243 bfa_os_gettimeofday(&tv); ··· 3283 3285 } 3284 3286 fcport->stats_qfull = BFA_FALSE; 3285 3287 3286 - bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3288 + memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3287 3289 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, 3288 3290 bfa_lpuid(fcport->bfa)); 3289 3291 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3290 3292 } 3291 3293 3292 - /** 3294 + /* 3293 3295 * Handle trunk SCN event from firmware. 3294 3296 */ 3295 3297 static void ··· 3310 3312 bfa_trc(fcport->bfa, scn->trunk_state); 3311 3313 bfa_trc(fcport->bfa, scn->trunk_speed); 3312 3314 3313 - /** 3315 + /* 3314 3316 * Save off new state for trunk attribute query 3315 3317 */ 3316 3318 state_prev = trunk->attr.state; ··· 3325 3327 lattr->trunk_wwn = tlink->trunk_wwn; 3326 3328 lattr->fctl = tlink->fctl; 3327 3329 lattr->speed = tlink->speed; 3328 - lattr->deskew = bfa_os_ntohl(tlink->deskew); 3330 + lattr->deskew = be32_to_cpu(tlink->deskew); 3329 3331 3330 3332 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { 3331 3333 fcport->speed = tlink->speed; ··· 3358 3360 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); 3359 3361 } 3360 3362 3361 - /** 3363 + /* 3362 3364 * Notify upper layers if trunk state changed. 3363 3365 */ 3364 3366 if ((state_prev != trunk->attr.state) || ··· 3374 3376 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3375 3377 int i = 0; 3376 3378 3377 - /** 3379 + /* 3378 3380 * In trunked mode, notify upper layers that link is down 3379 3381 */ 3380 3382 if (fcport->cfg.trunked) { ··· 3398 3400 3399 3401 3400 3402 3401 - /** 3403 + /* 3402 3404 * hal_port_public 3403 3405 */ 3404 3406 3405 - /** 3407 + /* 3406 3408 * Called to initialize port attributes 3407 3409 */ 3408 3410 void ··· 3410 3412 { 3411 3413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3412 3414 3413 - /** 3415 + /* 3414 3416 * Initialize port attributes from IOC hardware data. 3415 3417 */ 3416 3418 bfa_fcport_set_wwns(fcport); ··· 3424 3426 bfa_assert(fcport->speed_sup); 3425 3427 } 3426 3428 3427 - /** 3429 + /* 3428 3430 * Firmware message handler. 3429 3431 */ 3430 3432 void ··· 3505 3507 3506 3508 3507 3509 3508 - /** 3510 + /* 3509 3511 * hal_port_api 3510 3512 */ 3511 3513 3512 - /** 3514 + /* 3513 3515 * Registered callback for port events. 3514 3516 */ 3515 3517 void ··· 3550 3552 return BFA_STATUS_OK; 3551 3553 } 3552 3554 3553 - /** 3555 + /* 3554 3556 * Configure port speed. 3555 3557 */ 3556 3558 bfa_status_t ··· 3572 3574 return BFA_STATUS_OK; 3573 3575 } 3574 3576 3575 - /** 3577 + /* 3576 3578 * Get current speed. 3577 3579 */ 3578 3580 enum bfa_port_speed ··· 3583 3585 return fcport->speed; 3584 3586 } 3585 3587 3586 - /** 3588 + /* 3587 3589 * Configure port topology. 3588 3590 */ 3589 3591 bfa_status_t ··· 3608 3610 return BFA_STATUS_OK; 3609 3611 } 3610 3612 3611 - /** 3613 + /* 3612 3614 * Get current topology. 3613 3615 */ 3614 3616 enum bfa_port_topology ··· 3708 3710 bfa_fcport_send_txcredit(fcport); 3709 3711 } 3710 3712 3711 - /** 3713 + /* 3712 3714 * Get port attributes. 3713 3715 */ 3714 3716 ··· 3727 3729 { 3728 3730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3729 3731 3730 - bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s)); 3732 + memset(attr, 0, sizeof(struct bfa_port_attr_s)); 3731 3733 3732 3734 attr->nwwn = fcport->nwwn; 3733 3735 attr->pwwn = fcport->pwwn; ··· 3735 3737 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); 3736 3738 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); 3737 3739 3738 - bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, 3740 + memcpy(&attr->pport_cfg, &fcport->cfg, 3739 3741 sizeof(struct bfa_port_cfg_s)); 3740 3742 /* speed attributes */ 3741 3743 attr->pport_cfg.speed = fcport->cfg.speed; ··· 3768 3770 3769 3771 #define BFA_FCPORT_STATS_TOV 1000 3770 3772 3771 - /** 3773 + /* 3772 3774 * Fetch port statistics (FCQoS or FCoE). 3773 3775 */ 3774 3776 bfa_status_t ··· 3794 3796 return BFA_STATUS_OK; 3795 3797 } 3796 3798 3797 - /** 3799 + /* 3798 3800 * Reset port statistics (FCQoS or FCoE). 3799 3801 */ 3800 3802 bfa_status_t ··· 3818 3820 return BFA_STATUS_OK; 3819 3821 } 3820 3822 3821 - /** 3823 + /* 3822 3824 * Fetch FCQoS port statistics 3823 3825 */ 3824 3826 bfa_status_t ··· 3831 3833 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3832 3834 } 3833 3835 3834 - /** 3836 + /* 3835 3837 * Reset FCoE port statistics 3836 3838 */ 3837 3839 bfa_status_t ··· 3843 3845 return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 3844 3846 } 3845 3847 3846 - /** 3848 + /* 3847 3849 * Fetch FCQoS port statistics 3848 3850 */ 3849 3851 bfa_status_t ··· 3856 3858 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3857 3859 } 3858 3860 3859 - /** 3861 + /* 3860 3862 * Reset FCoE port statistics 3861 3863 */ 3862 3864 bfa_status_t ··· 3874 3876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3875 3877 3876 3878 qos_attr->state = fcport->qos_attr.state; 3877 - qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); 3879 + qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); 3878 3880 } 3879 3881 3880 3882 void ··· 3885 3887 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 3886 3888 u32 i = 0; 3887 3889 3888 - qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); 3889 - qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit); 3890 + qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); 3891 + qos_vc_attr->shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); 3890 3892 qos_vc_attr->elp_opmode_flags = 3891 - bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags); 3893 + be32_to_cpu(bfa_vc_attr->elp_opmode_flags); 3892 3894 3893 3895 /* Individual VC info */ 3894 3896 while (i < qos_vc_attr->total_vc_count) { ··· 3902 3904 } 3903 3905 } 3904 3906 3905 - /** 3907 + /* 3906 3908 * Fetch port attributes. 3907 3909 */ 3908 3910 bfa_boolean_t ··· 3937 3939 3938 3940 if (ioc_type == BFA_IOC_TYPE_FC) { 3939 3941 fcport->cfg.qos_enabled = on_off; 3940 - /** 3942 + /* 3941 3943 * Notify fcpim of the change in QoS state 3942 3944 */ 3943 3945 bfa_fcpim_update_ioredirect(bfa); ··· 3957 3959 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 3958 3960 } 3959 3961 3960 - /** 3962 + /* 3961 3963 * Configure default minimum ratelim speed 3962 3964 */ 3963 3965 bfa_status_t ··· 3978 3980 return BFA_STATUS_OK; 3979 3981 } 3980 3982 3981 - /** 3983 + /* 3982 3984 * Get default minimum ratelim speed 3983 3985 */ 3984 3986 enum bfa_port_speed ··· 4093 4095 } 4094 4096 4095 4097 4096 - /** 4098 + /* 4097 4099 * Rport State machine functions 4098 4100 */ 4099 - /** 4101 + /* 4100 4102 * Beginning state, only online event expected. 4101 4103 */ 4102 4104 static void ··· 4149 4151 } 4150 4152 } 4151 4153 4152 - /** 4154 + /* 4153 4155 * Waiting for rport create response from firmware. 4154 4156 */ 4155 4157 static void ··· 4186 4188 } 4187 4189 } 4188 4190 4189 - /** 4191 + /* 4190 4192 * Request queue is full, awaiting queue resume to send create request. 4191 4193 */ 4192 4194 static void ··· 4227 4229 } 4228 4230 } 4229 4231 4230 - /** 4232 + /* 4231 4233 * Online state - normal parking state. 4232 4234 */ 4233 4235 static void ··· 4273 4275 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); 4274 4276 4275 4277 qos_scn->old_qos_attr.qos_flow_id = 4276 - bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id); 4278 + be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id); 4277 4279 qos_scn->new_qos_attr.qos_flow_id = 4278 - bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id); 4280 + be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id); 4279 4281 4280 4282 if (qos_scn->old_qos_attr.qos_flow_id != 4281 4283 qos_scn->new_qos_attr.qos_flow_id) ··· 4295 4297 } 4296 4298 } 4297 4299 4298 - /** 4300 + /* 4299 4301 * Firmware rport is being deleted - awaiting f/w response. 4300 4302 */ 4301 4303 static void ··· 4358 4360 } 4359 4361 } 4360 4362 4361 - /** 4363 + /* 4362 4364 * Offline state. 4363 4365 */ 4364 4366 static void ··· 4393 4395 } 4394 4396 } 4395 4397 4396 - /** 4398 + /* 4397 4399 * Rport is deleted, waiting for firmware response to delete. 4398 4400 */ 4399 4401 static void ··· 4445 4447 } 4446 4448 } 4447 4449 4448 - /** 4450 + /* 4449 4451 * Waiting for rport create response from firmware. A delete is pending. 4450 4452 */ 4451 4453 static void ··· 4476 4478 } 4477 4479 } 4478 4480 4479 - /** 4481 + /* 4480 4482 * Waiting for rport create response from firmware. Rport offline is pending. 4481 4483 */ 4482 4484 static void ··· 4511 4513 } 4512 4514 } 4513 4515 4514 - /** 4516 + /* 4515 4517 * IOC h/w failed. 4516 4518 */ 4517 4519 static void ··· 4551 4553 4552 4554 4553 4555 4554 - /** 4556 + /* 4555 4557 * bfa_rport_private BFA rport private functions 4556 4558 */ 4557 4559 ··· 4610 4612 !(mod->num_rports & (mod->num_rports - 1))); 4611 4613 4612 4614 for (i = 0; i < mod->num_rports; i++, rp++) { 4613 - bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s)); 4615 + memset(rp, 0, sizeof(struct bfa_rport_s)); 4614 4616 rp->bfa = bfa; 4615 4617 rp->rport_tag = i; 4616 4618 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4617 4619 4618 - /** 4620 + /* 4619 4621 * - is unused 4620 4622 */ 4621 4623 if (i) ··· 4624 4626 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); 4625 4627 } 4626 4628 4627 - /** 4629 + /* 4628 4630 * consume memory 4629 4631 */ 4630 4632 bfa_meminfo_kva(meminfo) = (u8 *) rp; ··· 4685 4687 { 4686 4688 struct bfi_rport_create_req_s *m; 4687 4689 4688 - /** 4690 + /* 4689 4691 * check for room in queue to send request now 4690 4692 */ 4691 4693 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4697 4699 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, 4698 4700 bfa_lpuid(rp->bfa)); 4699 4701 m->bfa_handle = rp->rport_tag; 4700 - m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz); 4702 + m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); 4701 4703 m->pid = rp->rport_info.pid; 4702 4704 m->lp_tag = rp->rport_info.lp_tag; 4703 4705 m->local_pid = rp->rport_info.local_pid; ··· 4706 4708 m->vf_id = rp->rport_info.vf_id; 4707 4709 m->cisc = rp->rport_info.cisc; 4708 4710 4709 - /** 4711 + /* 4710 4712 * queue I/O message to firmware 4711 4713 */ 4712 4714 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4718 4720 { 4719 4721 struct bfi_rport_delete_req_s *m; 4720 4722 4721 - /** 4723 + /* 4722 4724 * check for room in queue to send request now 4723 4725 */ 4724 4726 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4731 4733 bfa_lpuid(rp->bfa)); 4732 4734 m->fw_handle = rp->fw_handle; 4733 4735 4734 - /** 4736 + /* 4735 4737 * queue I/O message to firmware 4736 4738 */ 4737 4739 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4743 4745 { 4744 4746 struct bfa_rport_speed_req_s *m; 4745 4747 4746 - /** 4748 + /* 4747 4749 * check for room in queue to send request now 4748 4750 */ 4749 4751 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); ··· 4757 4759 m->fw_handle = rp->fw_handle; 4758 4760 m->speed = (u8)rp->rport_info.speed; 4759 4761 4760 - /** 4762 + /* 4761 4763 * queue I/O message to firmware 4762 4764 */ 4763 4765 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); ··· 4766 4768 4767 4769 4768 4770 4769 - /** 4771 + /* 4770 4772 * bfa_rport_public 4771 4773 */ 4772 4774 4773 - /** 4775 + /* 4774 4776 * Rport interrupt processing. 4775 4777 */ 4776 4778 void ··· 4812 4814 4813 4815 4814 4816 4815 - /** 4817 + /* 4816 4818 * bfa_rport_api 4817 4819 */ 4818 4820 ··· 4847 4849 { 4848 4850 bfa_assert(rport_info->max_frmsz != 0); 4849 4851 4850 - /** 4852 + /* 4851 4853 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4852 4854 * responses. Default to minimum size. 4853 4855 */ ··· 4856 4858 rport_info->max_frmsz = FC_MIN_PDUSZ; 4857 4859 } 4858 4860 4859 - bfa_os_assign(rport->rport_info, *rport_info); 4861 + rport->rport_info = *rport_info; 4860 4862 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); 4861 4863 } 4862 4864 ··· 4888 4890 struct bfa_rport_qos_attr_s *qos_attr) 4889 4891 { 4890 4892 qos_attr->qos_priority = rport->qos_attr.qos_priority; 4891 - qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id); 4893 + qos_attr->qos_flow_id = be32_to_cpu(rport->qos_attr.qos_flow_id); 4892 4894 4893 4895 } 4894 4896 4895 4897 void 4896 4898 bfa_rport_clear_stats(struct bfa_rport_s *rport) 4897 4899 { 4898 - bfa_os_memset(&rport->stats, 0, sizeof(rport->stats)); 4900 + memset(&rport->stats, 0, sizeof(rport->stats)); 4899 4901 } 4900 4902 4901 4903 4902 - /** 4904 + /* 4903 4905 * SGPG related functions 4904 4906 */ 4905 4907 4906 - /** 4908 + /* 4907 4909 * Compute and return memory needed by FCP(im) module. 4908 4910 */ 4909 4911 static void ··· 4955 4957 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); 4956 4958 4957 4959 for (i = 0; i < mod->num_sgpgs; i++) { 4958 - bfa_os_memset(hsgpg, 0, sizeof(*hsgpg)); 4959 - bfa_os_memset(sgpg, 0, sizeof(*sgpg)); 4960 + memset(hsgpg, 0, sizeof(*hsgpg)); 4961 + memset(sgpg, 0, sizeof(*sgpg)); 4960 4962 4961 4963 hsgpg->sgpg = sgpg; 4962 4964 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); ··· 4995 4997 4996 4998 4997 4999 4998 - /** 5000 + /* 4999 5001 * hal_sgpg_public BFA SGPG public functions 5000 5002 */ 5001 5003 ··· 5037 5039 if (list_empty(&mod->sgpg_wait_q)) 5038 5040 return; 5039 5041 5040 - /** 5042 + /* 5041 5043 * satisfy as many waiting requests as possible 5042 5044 */ 5043 5045 do { ··· 5065 5067 5066 5068 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 5067 5069 5068 - /** 5070 + /* 5069 5071 * allocate any left to this one first 5070 5072 */ 5071 5073 if (mod->free_sgpgs) { 5072 - /** 5074 + /* 5073 5075 * no one else is waiting for SGPG 5074 5076 */ 5075 5077 bfa_assert(list_empty(&mod->sgpg_wait_q)); ··· 5103 5105 wqe->cbarg = cbarg; 5104 5106 } 5105 5107 5106 - /** 5108 + /* 5107 5109 * UF related functions 5108 5110 */ 5109 5111 /* ··· 5134 5136 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; 5135 5137 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; 5136 5138 5137 - bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); 5139 + memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); 5138 5140 } 5139 5141 5140 5142 static void ··· 5151 5153 5152 5154 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; 5153 5155 i++, uf_bp_msg++) { 5154 - bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); 5156 + memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); 5155 5157 5156 5158 uf_bp_msg->buf_tag = i; 5157 5159 buf_len = sizeof(struct bfa_uf_buf_s); 5158 - uf_bp_msg->buf_len = bfa_os_htons(buf_len); 5160 + uf_bp_msg->buf_len = cpu_to_be16(buf_len); 5159 5161 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, 5160 5162 bfa_lpuid(ufm->bfa)); 5161 5163 ··· 5171 5173 bfa_sge_to_be(&sge[1]); 5172 5174 } 5173 5175 5174 - /** 5176 + /* 5175 5177 * advance pointer beyond consumed memory 5176 5178 */ 5177 5179 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; ··· 5192 5194 * Initialize UFs and queue it in UF free queue 5193 5195 */ 5194 5196 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { 5195 - bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s)); 5197 + memset(uf, 0, sizeof(struct bfa_uf_s)); 5196 5198 uf->bfa = ufm->bfa; 5197 5199 uf->uf_tag = i; 5198 5200 uf->pb_len = sizeof(struct bfa_uf_buf_s); ··· 5201 5203 list_add_tail(&uf->qe, &ufm->uf_free_q); 5202 5204 } 5203 5205 5204 - /** 5206 + /* 5205 5207 * advance memory pointer 5206 5208 */ 5207 5209 bfa_meminfo_kva(mi) = (u8 *) uf; ··· 5239 5241 { 5240 5242 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 5241 5243 5242 - bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); 5244 + memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); 5243 5245 ufm->bfa = bfa; 5244 5246 ufm->num_ufs = cfg->fwcfg.num_uf_bufs; 5245 5247 INIT_LIST_HEAD(&ufm->uf_free_q); ··· 5277 5279 if (!uf_post_msg) 5278 5280 return BFA_STATUS_FAILED; 5279 5281 5280 - bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], 5282 + memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], 5281 5283 sizeof(struct bfi_uf_buf_post_s)); 5282 5284 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); 5283 5285 ··· 5308 5310 u8 *buf = &uf_buf->d[0]; 5309 5311 struct fchs_s *fchs; 5310 5312 5311 - m->frm_len = bfa_os_ntohs(m->frm_len); 5312 - m->xfr_len = bfa_os_ntohs(m->xfr_len); 5313 + m->frm_len = be16_to_cpu(m->frm_len); 5314 + m->xfr_len = be16_to_cpu(m->xfr_len); 5313 5315 5314 5316 fchs = (struct fchs_s *)uf_buf; 5315 5317 ··· 5363 5365 5364 5366 5365 5367 5366 - /** 5368 + /* 5367 5369 * hal_uf_api 5368 5370 */ 5369 5371 5370 - /** 5372 + /* 5371 5373 * Register handler for all unsolicted recieve frames. 5372 5374 * 5373 5375 * @param[in] bfa BFA instance ··· 5383 5385 ufm->cbarg = cbarg; 5384 5386 } 5385 5387 5386 - /** 5388 + /* 5387 5389 * Free an unsolicited frame back to BFA. 5388 5390 * 5389 5391 * @param[in] uf unsolicited frame to be freed ··· 5399 5401 5400 5402 5401 5403 5402 - /** 5404 + /* 5403 5405 * uf_pub BFA uf module public functions 5404 5406 */ 5405 5407 void
+20 -21
drivers/scsi/bfa/bfa_svc.h
··· 22 22 #include "bfi_ms.h" 23 23 24 24 25 - /** 25 + /* 26 26 * Scatter-gather DMA related defines 27 27 */ 28 28 #define BFA_SGPG_MIN (16) 29 29 30 - /** 30 + /* 31 31 * Alignment macro for SG page allocation 32 32 */ 33 33 #define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \ ··· 48 48 union bfi_addr_u sgpg_pa; /* pa of SG page */ 49 49 }; 50 50 51 - /** 51 + /* 52 52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of 53 53 * SG pages required. 54 54 */ ··· 75 75 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); 76 76 77 77 78 - /** 78 + /* 79 79 * FCXP related defines 80 80 */ 81 81 #define BFA_FCXP_MIN (1) ··· 115 115 116 116 117 117 118 - /** 118 + /* 119 119 * Information needed for a FCXP request 120 120 */ 121 121 struct bfa_fcxp_req_info_s { 122 122 struct bfa_rport_s *bfa_rport; 123 - /** Pointer to the bfa rport that was 123 + /* Pointer to the bfa rport that was 124 124 * returned from bfa_rport_create(). 125 125 * This could be left NULL for WKA or 126 126 * for FCXP interactions before the ··· 137 137 138 138 struct bfa_fcxp_rsp_info_s { 139 139 struct fchs_s rsp_fchs; 140 - /** !< Response frame's FC header will 140 + /* Response frame's FC header will 141 141 * be sent back in this field */ 142 142 u8 rsp_timeout; 143 - /** !< timeout in seconds, 0-no response 144 - */ 143 + /* timeout in seconds, 0-no response */ 145 144 u8 rsvd2[3]; 146 145 u32 rsp_maxlen; /* max response length expected */ 147 146 }; ··· 217 218 void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 218 219 219 220 220 - /** 221 + /* 221 222 * RPORT related defines 222 223 */ 223 224 #define BFA_RPORT_MIN 4 ··· 231 232 232 233 #define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) 233 234 234 - /** 235 + /* 235 236 * Convert rport tag to RPORT 236 237 */ 237 238 #define BFA_RPORT_FROM_TAG(__bfa, _tag) \ ··· 243 244 */ 244 245 void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 245 246 246 - /** 247 + /* 247 248 * BFA rport information. 248 249 */ 249 250 struct bfa_rport_info_s { ··· 258 259 enum bfa_port_speed speed; /* Rport's current speed */ 259 260 }; 260 261 261 - /** 262 + /* 262 263 * BFA rport data structure 263 264 */ 264 265 struct bfa_rport_s { ··· 281 282 #define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class) 282 283 283 284 284 - /** 285 + /* 285 286 * UF - unsolicited receive related defines 286 287 */ 287 288 ··· 304 305 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX]; 305 306 }; 306 307 307 - /** 308 + /* 308 309 * Callback prototype for unsolicited frame receive handler. 309 310 * 310 311 * @param[in] cbarg callback arg for receive handler ··· 337 338 338 339 #define BFA_UF_BUFSZ (2 * 1024 + 256) 339 340 340 - /** 341 + /* 341 342 * @todo private 342 343 */ 343 344 struct bfa_uf_buf_s { ··· 345 346 }; 346 347 347 348 348 - /** 349 + /* 349 350 * LPS - bfa lport login/logout service interface 350 351 */ 351 352 struct bfa_lps_s { ··· 396 397 void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 397 398 398 399 399 - /** 400 + /* 400 401 * FCPORT related defines 401 402 */ 402 403 403 404 #define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) 404 405 typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status); 405 406 406 - /** 407 + /* 407 408 * Link notification data structure 408 409 */ 409 410 struct bfa_fcport_ln_s { ··· 417 418 struct bfa_trunk_attr_s attr; 418 419 }; 419 420 420 - /** 421 + /* 421 422 * BFA FC port data structure 422 423 */ 423 424 struct bfa_fcport_s { ··· 612 613 void *cbarg); 613 614 void bfa_uf_free(struct bfa_uf_s *uf); 614 615 615 - /** 616 + /* 616 617 * bfa lport service api 617 618 */ 618 619
+16 -57
drivers/scsi/bfa/bfad.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * bfad.c Linux driver PCI interface module. 20 20 */ 21 21 #include <linux/module.h> ··· 151 151 static void 152 152 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 153 153 154 - /** 154 + /* 155 155 * Beginning state for the driver instance, awaiting the pci_probe event 156 156 */ 157 157 static void ··· 181 181 } 182 182 } 183 183 184 - /** 184 + /* 185 185 * Driver Instance is created, awaiting event INIT to initialize the bfad 186 186 */ 187 187 static void ··· 364 364 } 365 365 } 366 366 367 - /** 367 + /* 368 368 * BFA callbacks 369 369 */ 370 370 void ··· 376 376 complete(&fcomp->comp); 377 377 } 378 378 379 - /** 379 + /* 380 380 * bfa_init callback 381 381 */ 382 382 void ··· 401 401 complete(&bfad->comp); 402 402 } 403 403 404 - /** 404 + /* 405 405 * BFA_FCS callbacks 406 406 */ 407 407 struct bfad_port_s * ··· 457 457 } 458 458 } 459 459 460 - /** 460 + /* 461 461 * FCS RPORT alloc callback, after successful PLOGI by FCS 462 462 */ 463 463 bfa_status_t ··· 478 478 return rc; 479 479 } 480 480 481 - /** 481 + /* 482 482 * FCS PBC VPORT Create 483 483 */ 484 484 void ··· 663 663 return rc; 664 664 } 665 665 666 - /** 666 + /* 667 667 * Create a vport under a vf. 668 668 */ 669 669 bfa_status_t ··· 712 712 wait_for_completion(vport->comp_del); 713 713 ext_free_vport: 714 714 kfree(vport); 715 - ext: 716 - return rc; 717 - } 718 - 719 - /** 720 - * Create a vf and its base vport implicitely. 721 - */ 722 - bfa_status_t 723 - bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 724 - struct bfa_lport_cfg_s *port_cfg) 725 - { 726 - struct bfad_vf_s *vf; 727 - int rc = BFA_STATUS_OK; 728 - 729 - vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); 730 - if (!vf) { 731 - rc = BFA_STATUS_FAILED; 732 - goto ext; 733 - } 734 - 735 - rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg, 736 - vf); 737 - if (rc != BFA_STATUS_OK) 738 - kfree(vf); 739 715 ext: 740 716 return rc; 741 717 } ··· 859 883 pci_release_regions(pdev); 860 884 pci_disable_device(pdev); 861 885 pci_set_drvdata(pdev, NULL); 862 - } 863 - 864 - void 865 - bfad_fcs_port_cfg(struct bfad_s *bfad) 866 - { 867 - struct bfa_lport_cfg_s port_cfg; 868 - struct bfa_port_attr_s attr; 869 - char symname[BFA_SYMNAME_MAXLEN]; 870 - 871 - sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 872 - memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 873 - bfa_fcport_get_attr(&bfad->bfa, &attr); 874 - port_cfg.nwwn = attr.nwwn; 875 - port_cfg.pwwn = attr.pwwn; 876 886 } 877 887 878 888 bfa_status_t ··· 1051 1089 bfa_fcs_init(&bfad->bfa_fcs); 1052 1090 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1053 1091 1054 - /* PPORT FCS config */ 1055 - bfad_fcs_port_cfg(bfad); 1056 - 1057 1092 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 1058 1093 if (retval != BFA_STATUS_OK) { 1059 1094 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) ··· 1140 1181 return 0; 1141 1182 } 1142 1183 1143 - /** 1184 + /* 1144 1185 * BFA driver interrupt functions 1145 1186 */ 1146 1187 irqreturn_t ··· 1199 1240 return IRQ_HANDLED; 1200 1241 } 1201 1242 1202 - /** 1243 + /* 1203 1244 * Initialize the MSIX entry table. 1204 1245 */ 1205 1246 static void ··· 1252 1293 return 0; 1253 1294 } 1254 1295 1255 - /** 1296 + /* 1256 1297 * Setup MSIX based interrupt. 1257 1298 */ 1258 1299 int ··· 1333 1374 } 1334 1375 } 1335 1376 1336 - /** 1377 + /* 1337 1378 * PCI probe entry. 1338 1379 */ 1339 1380 int ··· 1419 1460 return error; 1420 1461 } 1421 1462 1422 - /** 1463 + /* 1423 1464 * PCI remove entry. 1424 1465 */ 1425 1466 void ··· 1500 1541 .remove = __devexit_p(bfad_pci_remove), 1501 1542 }; 1502 1543 1503 - /** 1544 + /* 1504 1545 * Driver module init. 1505 1546 */ 1506 1547 static int __init ··· 1540 1581 return error; 1541 1582 } 1542 1583 1543 - /** 1584 + /* 1544 1585 * Driver module exit. 1545 1586 */ 1546 1587 static void __exit
+19 -19
drivers/scsi/bfa/bfad_attr.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * bfa_attr.c Linux driver configuration interface module. 20 20 */ 21 21 22 22 #include "bfad_drv.h" 23 23 #include "bfad_im.h" 24 24 25 - /** 25 + /* 26 26 * FC transport template entry, get SCSI target port ID. 27 27 */ 28 28 void ··· 48 48 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 49 49 } 50 50 51 - /** 51 + /* 52 52 * FC transport template entry, get SCSI target nwwn. 53 53 */ 54 54 void ··· 70 70 if (itnim) 71 71 node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); 72 72 73 - fc_starget_node_name(starget) = bfa_os_htonll(node_name); 73 + fc_starget_node_name(starget) = cpu_to_be64(node_name); 74 74 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 75 75 } 76 76 77 - /** 77 + /* 78 78 * FC transport template entry, get SCSI target pwwn. 79 79 */ 80 80 void ··· 96 96 if (itnim) 97 97 port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 98 98 99 - fc_starget_port_name(starget) = bfa_os_htonll(port_name); 99 + fc_starget_port_name(starget) = cpu_to_be64(port_name); 100 100 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 101 101 } 102 102 103 - /** 103 + /* 104 104 * FC transport template entry, get SCSI host port ID. 105 105 */ 106 106 void ··· 114 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); 115 115 } 116 116 117 - /** 117 + /* 118 118 * FC transport template entry, get SCSI host port type. 119 119 */ 120 120 static void ··· 146 146 } 147 147 } 148 148 149 - /** 149 + /* 150 150 * FC transport template entry, get SCSI host port state. 151 151 */ 152 152 static void ··· 183 183 } 184 184 } 185 185 186 - /** 186 + /* 187 187 * FC transport template entry, get SCSI host active fc4s. 188 188 */ 189 189 static void ··· 202 202 fc_host_active_fc4s(shost)[7] = 1; 203 203 } 204 204 205 - /** 205 + /* 206 206 * FC transport template entry, get SCSI host link speed. 207 207 */ 208 208 static void ··· 236 236 } 237 237 } 238 238 239 - /** 239 + /* 240 240 * FC transport template entry, get SCSI host port type. 241 241 */ 242 242 static void ··· 249 249 250 250 fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); 251 251 252 - fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn); 252 + fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); 253 253 254 254 } 255 255 256 - /** 256 + /* 257 257 * FC transport template entry, get BFAD statistics. 258 258 */ 259 259 static struct fc_host_statistics * ··· 304 304 return hstats; 305 305 } 306 306 307 - /** 307 + /* 308 308 * FC transport template entry, reset BFAD statistics. 309 309 */ 310 310 static void ··· 331 331 return; 332 332 } 333 333 334 - /** 334 + /* 335 335 * FC transport template entry, get rport loss timeout. 336 336 */ 337 337 static void ··· 347 347 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 348 348 } 349 349 350 - /** 350 + /* 351 351 * FC transport template entry, set rport loss timeout. 352 352 */ 353 353 static void ··· 633 633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 634 634 }; 635 635 636 - /** 636 + /* 637 637 * Scsi_Host_attrs SCSI host attributes 638 638 */ 639 639 static ssize_t ··· 733 733 u64 nwwn; 734 734 735 735 nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); 736 - return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn)); 736 + return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); 737 737 } 738 738 739 739 static ssize_t
+2 -2
drivers/scsi/bfa/bfad_debugfs.c
··· 318 318 regbuf = (u32 *)bfad->regdata; 319 319 spin_lock_irqsave(&bfad->bfad_lock, flags); 320 320 for (i = 0; i < len; i++) { 321 - *regbuf = bfa_reg_read(reg_addr); 321 + *regbuf = readl(reg_addr); 322 322 regbuf++; 323 323 reg_addr += sizeof(u32); 324 324 } ··· 361 361 362 362 reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr); 363 363 spin_lock_irqsave(&bfad->bfad_lock, flags); 364 - bfa_reg_write(reg_addr, val); 364 + writel(val, reg_addr); 365 365 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 366 366 367 367 return nbytes;
+2 -3
drivers/scsi/bfa/bfad_drv.h
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * Contains base driver definitions. 20 20 */ 21 21 22 - /** 22 + /* 23 23 * bfa_drv.h Linux driver data structures. 24 24 */ 25 25 ··· 309 309 void bfad_init_timer(struct bfad_s *bfad); 310 310 int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); 311 311 void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); 312 - void bfad_fcs_port_cfg(struct bfad_s *bfad); 313 312 void bfad_drv_uninit(struct bfad_s *bfad); 314 313 int bfad_worker(void *ptr); 315 314 void bfad_debugfs_init(struct bfad_port_s *port);
+36 -35
drivers/scsi/bfa/bfad_im.c
··· 15 15 * General Public License for more details. 16 16 */ 17 17 18 - /** 18 + /* 19 19 * bfad_im.c Linux driver IM module. 20 20 */ 21 21 ··· 164 164 wake_up(wq); 165 165 } 166 166 167 - /** 167 + /* 168 168 * Scsi_Host_template SCSI host template 169 169 */ 170 - /** 170 + /* 171 171 * Scsi_Host template entry, returns BFAD PCI info. 172 172 */ 173 173 static const char * ··· 196 196 return bfa_buf; 197 197 } 198 198 199 - /** 199 + /* 200 200 * Scsi_Host template entry, aborts the specified SCSI command. 201 201 * 202 202 * Returns: SUCCESS or FAILED. ··· 280 280 return rc; 281 281 } 282 282 283 - /** 283 + /* 284 284 * Scsi_Host template entry, resets a LUN and abort its all commands. 285 285 * 286 286 * Returns: SUCCESS or FAILED. ··· 319 319 goto out; 320 320 } 321 321 322 - /** 322 + /* 323 323 * Set host_scribble to NULL to avoid aborting a task command 324 324 * if happens. 325 325 */ ··· 346 346 return rc; 347 347 } 348 348 349 - /** 349 + /* 350 350 * Scsi_Host template entry, resets the bus and abort all commands. 351 351 */ 352 352 static int ··· 396 396 return SUCCESS; 397 397 } 398 398 399 - /** 399 + /* 400 400 * Scsi_Host template entry slave_destroy. 401 401 */ 402 402 static void ··· 406 406 return; 407 407 } 408 408 409 - /** 409 + /* 410 410 * BFA FCS itnim callbacks 411 411 */ 412 412 413 - /** 413 + /* 414 414 * BFA FCS itnim alloc callback, after successful PRLI 415 415 * Context: Interrupt 416 416 */ ··· 433 433 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 434 434 } 435 435 436 - /** 436 + /* 437 437 * BFA FCS itnim free callback. 438 438 * Context: Interrupt. bfad_lock is held 439 439 */ ··· 471 471 queue_work(im->drv_workq, &itnim_drv->itnim_work); 472 472 } 473 473 474 - /** 474 + /* 475 475 * BFA FCS itnim online callback. 476 476 * Context: Interrupt. bfad_lock is held 477 477 */ ··· 492 492 queue_work(im->drv_workq, &itnim_drv->itnim_work); 493 493 } 494 494 495 - /** 495 + /* 496 496 * BFA FCS itnim offline callback. 497 497 * Context: Interrupt. bfad_lock is held 498 498 */ ··· 519 519 queue_work(im->drv_workq, &itnim_drv->itnim_work); 520 520 } 521 521 522 - /** 522 + /* 523 523 * Allocate a Scsi_Host for a port. 524 524 */ 525 525 int ··· 751 751 return BFA_STATUS_OK; 752 752 } 753 753 754 - /** 754 + /* 755 755 * Scsi_Host template entry. 756 756 * 757 757 * Description: ··· 896 896 return NULL; 897 897 } 898 898 899 - /** 899 + /* 900 900 * Scsi_Host template entry slave_alloc 901 901 */ 902 902 static int ··· 915 915 static u32 916 916 bfad_im_supported_speeds(struct bfa_s *bfa) 917 917 { 918 - struct bfa_ioc_attr_s ioc_attr; 918 + struct bfa_ioc_attr_s *ioc_attr; 919 919 u32 supported_speed = 0; 920 920 921 - bfa_get_attr(bfa, &ioc_attr); 922 - if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 923 - if (ioc_attr.adapter_attr.is_mezz) { 921 + ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); 922 + if (!ioc_attr) 923 + return 0; 924 + 925 + bfa_get_attr(bfa, ioc_attr); 926 + if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 927 + if (ioc_attr->adapter_attr.is_mezz) { 924 928 supported_speed |= FC_PORTSPEED_8GBIT | 925 929 FC_PORTSPEED_4GBIT | 926 930 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; ··· 933 929 FC_PORTSPEED_4GBIT | 934 930 FC_PORTSPEED_2GBIT; 935 931 } 936 - } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { 932 + } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { 937 933 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 938 934 FC_PORTSPEED_1GBIT; 939 - } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { 935 + } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { 940 936 supported_speed |= FC_PORTSPEED_10GBIT; 941 937 } 938 + kfree(ioc_attr); 942 939 return supported_speed; 943 940 } 944 941 ··· 949 944 struct Scsi_Host *host = im_port->shost; 950 945 struct bfad_s *bfad = im_port->bfad; 951 946 struct bfad_port_s *port = im_port->port; 952 - struct bfa_port_attr_s pattr; 953 - struct bfa_lport_attr_s port_attr; 954 947 char symname[BFA_SYMNAME_MAXLEN]; 948 + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 955 949 956 950 fc_host_node_name(host) = 957 - bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port))); 951 + cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); 958 952 fc_host_port_name(host) = 959 - bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port))); 953 + cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); 960 954 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 961 955 962 956 fc_host_supported_classes(host) = FC_COS_CLASS3; ··· 968 964 /* For fibre channel services type 0x20 */ 969 965 fc_host_supported_fc4s(host)[7] = 1; 970 966 971 - bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 972 - strncpy(symname, port_attr.port_cfg.sym_name.symname, 967 + strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, 973 968 BFA_SYMNAME_MAXLEN); 974 969 sprintf(fc_host_symbolic_name(host), "%s", symname); 975 970 976 971 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 977 - 978 - bfa_fcport_get_attr(&bfad->bfa, &pattr); 979 - fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; 972 + fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 980 973 } 981 974 982 975 static void ··· 984 983 struct bfad_itnim_data_s *itnim_data; 985 984 986 985 rport_ids.node_name = 987 - bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); 986 + cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); 988 987 rport_ids.port_name = 989 - bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 988 + cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 990 989 rport_ids.port_id = 991 990 bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); 992 991 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; ··· 1016 1015 return; 1017 1016 } 1018 1017 1019 - /** 1018 + /* 1020 1019 * Work queue handler using FC transport service 1021 1020 * Context: kernel 1022 1021 */ ··· 1116 1115 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1117 1116 } 1118 1117 1119 - /** 1118 + /* 1120 1119 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1121 1120 */ 1122 1121 static int
+29 -29
drivers/scsi/bfa/bfi.h
··· 23 23 24 24 #pragma pack(1) 25 25 26 - /** 26 + /* 27 27 * BFI FW image type 28 28 */ 29 29 #define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ ··· 35 35 BFI_IMAGE_MAX, 36 36 }; 37 37 38 - /** 38 + /* 39 39 * Msg header common to all msgs 40 40 */ 41 41 struct bfi_mhdr_s { ··· 68 68 #define BFI_I2H_OPCODE_BASE 128 69 69 #define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) 70 70 71 - /** 71 + /* 72 72 **************************************************************************** 73 73 * 74 74 * Scatter Gather Element and Page definition ··· 79 79 #define BFI_SGE_INLINE 1 80 80 #define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) 81 81 82 - /** 82 + /* 83 83 * SG Flags 84 84 */ 85 85 enum { ··· 90 90 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */ 91 91 }; 92 92 93 - /** 93 + /* 94 94 * DMA addresses 95 95 */ 96 96 union bfi_addr_u { ··· 100 100 } a32; 101 101 }; 102 102 103 - /** 103 + /* 104 104 * Scatter Gather Element 105 105 */ 106 106 struct bfi_sge_s { ··· 116 116 union bfi_addr_u sga; 117 117 }; 118 118 119 - /** 119 + /* 120 120 * Scatter Gather Page 121 121 */ 122 122 #define BFI_SGPG_DATA_SGES 7 ··· 139 139 u32 pl[BFI_LMSG_PL_WSZ]; 140 140 }; 141 141 142 - /** 142 + /* 143 143 * Mailbox message structure 144 144 */ 145 145 #define BFI_MBMSG_SZ 7 ··· 148 148 u32 pl[BFI_MBMSG_SZ]; 149 149 }; 150 150 151 - /** 151 + /* 152 152 * Message Classes 153 153 */ 154 154 enum bfi_mclass { ··· 186 186 #define BFI_BOOT_LOADER_BIOS 1 187 187 #define BFI_BOOT_LOADER_UEFI 2 188 188 189 - /** 189 + /* 190 190 *---------------------------------------------------------------------- 191 191 * IOC 192 192 *---------------------------------------------------------------------- ··· 208 208 BFI_IOC_I2H_HBEAT = BFA_I2HM(5), 209 209 }; 210 210 211 - /** 211 + /* 212 212 * BFI_IOC_H2I_GETATTR_REQ message 213 213 */ 214 214 struct bfi_ioc_getattr_req_s { ··· 242 242 u32 card_type; /* card type */ 243 243 }; 244 244 245 - /** 245 + /* 246 246 * BFI_IOC_I2H_GETATTR_REPLY message 247 247 */ 248 248 struct bfi_ioc_getattr_reply_s { ··· 251 251 u8 rsvd[3]; 252 252 }; 253 253 254 - /** 254 + /* 255 255 * Firmware memory page offsets 256 256 */ 257 257 #define BFI_IOC_SMEM_PG0_CB (0x40) 258 258 #define BFI_IOC_SMEM_PG0_CT (0x180) 259 259 260 - /** 260 + /* 261 261 * Firmware statistic offset 262 262 */ 263 263 #define BFI_IOC_FWSTATS_OFF (0x6B40) 264 264 #define BFI_IOC_FWSTATS_SZ (4096) 265 265 266 - /** 266 + /* 267 267 * Firmware trace offset 268 268 */ 269 269 #define BFI_IOC_TRC_OFF (0x4b00) ··· 280 280 u32 md5sum[BFI_IOC_MD5SUM_SZ]; 281 281 }; 282 282 283 - /** 283 + /* 284 284 * BFI_IOC_I2H_READY_EVENT message 285 285 */ 286 286 struct bfi_ioc_rdy_event_s { ··· 294 294 u32 hb_count; /* current heart beat count */ 295 295 }; 296 296 297 - /** 297 + /* 298 298 * IOC hardware/firmware state 299 299 */ 300 300 enum bfi_ioc_state { ··· 340 340 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ 341 341 BFI_ADAPTER_UNSUPP)) 342 342 343 - /** 343 + /* 344 344 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages 345 345 */ 346 346 struct bfi_ioc_ctrl_req_s { ··· 352 352 #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; 353 353 #define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s; 354 354 355 - /** 355 + /* 356 356 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages 357 357 */ 358 358 struct bfi_ioc_ctrl_reply_s { ··· 364 364 #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; 365 365 366 366 #define BFI_IOC_MSGSZ 8 367 - /** 367 + /* 368 368 * H2I Messages 369 369 */ 370 370 union bfi_ioc_h2i_msg_u { ··· 375 375 u32 mboxmsg[BFI_IOC_MSGSZ]; 376 376 }; 377 377 378 - /** 378 + /* 379 379 * I2H Messages 380 380 */ 381 381 union bfi_ioc_i2h_msg_u { ··· 385 385 }; 386 386 387 387 388 - /** 388 + /* 389 389 *---------------------------------------------------------------------- 390 390 * PBC 391 391 *---------------------------------------------------------------------- ··· 394 394 #define BFI_PBC_MAX_BLUNS 8 395 395 #define BFI_PBC_MAX_VPORTS 16 396 396 397 - /** 397 + /* 398 398 * PBC boot lun configuration 399 399 */ 400 400 struct bfi_pbc_blun_s { ··· 402 402 lun_t tgt_lun; 403 403 }; 404 404 405 - /** 405 + /* 406 406 * PBC virtual port configuration 407 407 */ 408 408 struct bfi_pbc_vport_s { ··· 410 410 wwn_t vp_nwwn; 411 411 }; 412 412 413 - /** 413 + /* 414 414 * BFI pre-boot configuration information 415 415 */ 416 416 struct bfi_pbc_s { ··· 427 427 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS]; 428 428 }; 429 429 430 - /** 430 + /* 431 431 *---------------------------------------------------------------------- 432 432 * MSGQ 433 433 *---------------------------------------------------------------------- ··· 531 531 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 532 532 }; 533 533 534 - /** 534 + /* 535 535 * Generic REQ type 536 536 */ 537 537 struct bfi_port_generic_req_s { ··· 540 540 u32 rsvd; 541 541 }; 542 542 543 - /** 543 + /* 544 544 * Generic RSP type 545 545 */ 546 546 struct bfi_port_generic_rsp_s { ··· 550 550 u32 msgtag; /* msgtag for reply */ 551 551 }; 552 552 553 - /** 553 + /* 554 554 * BFI_PORT_H2I_GET_STATS_REQ 555 555 */ 556 556 struct bfi_port_get_stats_req_s {
+25 -25
drivers/scsi/bfa/bfi_ms.h
··· 41 41 u16 rsvd_1; 42 42 u32 endian_sig; /* endian signature of host */ 43 43 44 - /** 44 + /* 45 45 * Request and response circular queue base addresses, size and 46 46 * shadow index pointers. 47 47 */ ··· 58 58 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ 59 59 }; 60 60 61 - /** 61 + /* 62 62 * Boot target wwn information for this port. This contains either the stored 63 63 * or discovered boot target port wwns for the port. 64 64 */ ··· 75 75 struct bfi_pbc_s pbc_cfg; 76 76 }; 77 77 78 - /** 78 + /* 79 79 * BFI_IOCFC_H2I_CFG_REQ message 80 80 */ 81 81 struct bfi_iocfc_cfg_req_s { ··· 84 84 }; 85 85 86 86 87 - /** 87 + /* 88 88 * BFI_IOCFC_I2H_CFG_REPLY message 89 89 */ 90 90 struct bfi_iocfc_cfg_reply_s { ··· 95 95 }; 96 96 97 97 98 - /** 98 + /* 99 99 * BFI_IOCFC_H2I_SET_INTR_REQ message 100 100 */ 101 101 struct bfi_iocfc_set_intr_req_s { ··· 107 107 }; 108 108 109 109 110 - /** 110 + /* 111 111 * BFI_IOCFC_H2I_UPDATEQ_REQ message 112 112 */ 113 113 struct bfi_iocfc_updateq_req_s { ··· 119 119 }; 120 120 121 121 122 - /** 122 + /* 123 123 * BFI_IOCFC_I2H_UPDATEQ_RSP message 124 124 */ 125 125 struct bfi_iocfc_updateq_rsp_s { ··· 129 129 }; 130 130 131 131 132 - /** 132 + /* 133 133 * H2I Messages 134 134 */ 135 135 union bfi_iocfc_h2i_msg_u { ··· 140 140 }; 141 141 142 142 143 - /** 143 + /* 144 144 * I2H Messages 145 145 */ 146 146 union bfi_iocfc_i2h_msg_u { ··· 173 173 }; 174 174 175 175 176 - /** 176 + /* 177 177 * Generic REQ type 178 178 */ 179 179 struct bfi_fcport_req_s { ··· 181 181 u32 msgtag; /* msgtag for reply */ 182 182 }; 183 183 184 - /** 184 + /* 185 185 * Generic RSP type 186 186 */ 187 187 struct bfi_fcport_rsp_s { ··· 191 191 u32 msgtag; /* msgtag for reply */ 192 192 }; 193 193 194 - /** 194 + /* 195 195 * BFI_FCPORT_H2I_ENABLE_REQ 196 196 */ 197 197 struct bfi_fcport_enable_req_s { ··· 205 205 u32 rsvd2; 206 206 }; 207 207 208 - /** 208 + /* 209 209 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ 210 210 */ 211 211 struct bfi_fcport_set_svc_params_req_s { ··· 214 214 u16 rsvd; 215 215 }; 216 216 217 - /** 217 + /* 218 218 * BFI_FCPORT_I2H_EVENT 219 219 */ 220 220 struct bfi_fcport_event_s { ··· 222 222 struct bfa_port_link_s link_state; 223 223 }; 224 224 225 - /** 225 + /* 226 226 * BFI_FCPORT_I2H_TRUNK_SCN 227 227 */ 228 228 struct bfi_fcport_trunk_link_s { ··· 243 243 struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS]; 244 244 }; 245 245 246 - /** 246 + /* 247 247 * fcport H2I message 248 248 */ 249 249 union bfi_fcport_h2i_msg_u { ··· 255 255 struct bfi_fcport_req_s *pstatsclear; 256 256 }; 257 257 258 - /** 258 + /* 259 259 * fcport I2H message 260 260 */ 261 261 union bfi_fcport_i2h_msg_u { ··· 279 279 280 280 #define BFA_FCXP_MAX_SGES 2 281 281 282 - /** 282 + /* 283 283 * FCXP send request structure 284 284 */ 285 285 struct bfi_fcxp_send_req_s { ··· 299 299 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ 300 300 }; 301 301 302 - /** 302 + /* 303 303 * FCXP send response structure 304 304 */ 305 305 struct bfi_fcxp_send_rsp_s { ··· 565 565 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */ 566 566 }; 567 567 568 - /** 568 + /* 569 569 * IO command DIF info 570 570 */ 571 571 struct bfi_ioim_dif_s { 572 572 u32 dif_info[4]; 573 573 }; 574 574 575 - /** 575 + /* 576 576 * FCP IO messages overview 577 577 * 578 578 * @note ··· 587 587 u16 rport_hdl; /* itnim/rport firmware handle */ 588 588 struct fcp_cmnd_s cmnd; /* IO request info */ 589 589 590 - /** 590 + /* 591 591 * SG elements array within the IO request must be double word 592 592 * aligned. This aligment is required to optimize SGM setup for the IO. 593 593 */ ··· 598 598 struct bfi_ioim_dif_s dif; 599 599 }; 600 600 601 - /** 601 + /* 602 602 * This table shows various IO status codes from firmware and their 603 603 * meaning. Host driver can use these status codes to further process 604 604 * IO completions. ··· 684 684 }; 685 685 686 686 #define BFI_IOIM_SNSLEN (256) 687 - /** 687 + /* 688 688 * I/O response message 689 689 */ 690 690 struct bfi_ioim_rsp_s { ··· 746 746 BFI_TSKIM_STS_NOT_SUPP = 4, 747 747 BFI_TSKIM_STS_FAILED = 5, 748 748 749 - /** 749 + /* 750 750 * Defined by BFA 751 751 */ 752 752 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
+3
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 692 692 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), 693 693 atid, tid, status, csk, csk->state, csk->flags); 694 694 695 + if (status == CPL_ERR_RTX_NEG_ADVICE) 696 + goto rel_skb; 697 + 695 698 if (status && status != CPL_ERR_TCAM_FULL && 696 699 status != CPL_ERR_CONN_EXIST && 697 700 status != CPL_ERR_ARP_MISS)
+2
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 773 773 {"ENGENIO", "INF-01-00"}, 774 774 {"STK", "FLEXLINE 380"}, 775 775 {"SUN", "CSM100_R_FC"}, 776 + {"SUN", "STK6580_6780"}, 777 + {"SUN", "SUN_6180"}, 776 778 {NULL, NULL}, 777 779 }; 778 780
+7 -11
drivers/scsi/fcoe/fcoe.c
··· 117 117 118 118 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); 119 119 120 - module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR); 120 + module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); 121 121 __MODULE_PARM_TYPE(create, "string"); 122 122 MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); 123 123 module_param_call(create_vn2vn, fcoe_create, NULL, ··· 1243 1243 struct fcoe_interface *fcoe; 1244 1244 struct fc_frame_header *fh; 1245 1245 struct fcoe_percpu_s *fps; 1246 - struct fcoe_port *port; 1247 1246 struct ethhdr *eh; 1248 1247 unsigned int cpu; 1249 1248 ··· 1261 1262 skb_tail_pointer(skb), skb_end_pointer(skb), 1262 1263 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1263 1264 1264 - /* check for mac addresses */ 1265 1265 eh = eth_hdr(skb); 1266 - port = lport_priv(lport); 1267 - if (compare_ether_addr(eh->h_dest, port->data_src_addr) && 1268 - compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) && 1269 - compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) { 1270 - FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n", 1271 - eh->h_dest); 1272 - goto err; 1273 - } 1274 1266 1275 1267 if (is_fip_mode(&fcoe->ctlr) && 1276 1268 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { ··· 1280 1290 1281 1291 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1282 1292 fh = (struct fc_frame_header *) skb_transport_header(skb); 1293 + 1294 + if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) { 1295 + FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n", 1296 + eh->h_dest); 1297 + goto err; 1298 + } 1283 1299 1284 1300 fr = fcoe_dev_from_skb(skb); 1285 1301 fr->fr_dev = lport;
+1 -1
drivers/scsi/fcoe/libfcoe.c
··· 2296 2296 { 2297 2297 struct fip_header *fiph; 2298 2298 enum fip_vn2vn_subcode sub; 2299 - union { 2299 + struct { 2300 2300 struct fc_rport_priv rdata; 2301 2301 struct fcoe_rport frport; 2302 2302 } buf;
+8
drivers/scsi/gdth.c
··· 4177 4177 ha = gdth_find_ha(gen.ionode); 4178 4178 if (!ha) 4179 4179 return -EFAULT; 4180 + 4181 + if (gen.data_len > INT_MAX) 4182 + return -EINVAL; 4183 + if (gen.sense_len > INT_MAX) 4184 + return -EINVAL; 4185 + if (gen.data_len + gen.sense_len > INT_MAX) 4186 + return -EINVAL; 4187 + 4180 4188 if (gen.data_len + gen.sense_len != 0) { 4181 4189 if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, 4182 4190 FALSE, &paddr)))
+2
drivers/scsi/ipr.c
··· 9025 9025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 9026 9026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9027 9027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 9028 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9029 + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 9028 9030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9029 9031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 9030 9032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+1
drivers/scsi/ipr.h
··· 82 82 83 83 #define IPR_SUBS_DEV_ID_57B4 0x033B 84 84 #define IPR_SUBS_DEV_ID_57B2 0x035F 85 + #define IPR_SUBS_DEV_ID_57C4 0x0354 85 86 #define IPR_SUBS_DEV_ID_57C6 0x0357 86 87 #define IPR_SUBS_DEV_ID_57CC 0x035C 87 88
+2 -3
drivers/scsi/libfc/fc_disc.c
··· 684 684 { 685 685 struct fc_disc *disc = &lport->disc; 686 686 687 - if (disc) { 687 + if (disc->pending) 688 688 cancel_delayed_work_sync(&disc->disc_work); 689 - fc_disc_stop_rports(disc); 690 - } 689 + fc_disc_stop_rports(disc); 691 690 } 692 691 693 692 /**
+8 -16
drivers/scsi/libfc/fc_fcp.c
··· 58 58 #define FC_SRB_WRITE (1 << 0) 59 59 60 60 /* 61 - * The SCp.ptr should be tested and set under the host lock. NULL indicates 62 - * that the command has been retruned to the scsi layer. 61 + * The SCp.ptr should be tested and set under the scsi_pkt_queue lock 63 62 */ 64 63 #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 65 64 #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) ··· 1879 1880 1880 1881 lport = fsp->lp; 1881 1882 si = fc_get_scsi_internal(lport); 1882 - if (!fsp->cmd) 1883 - return; 1884 1883 1885 1884 /* 1886 1885 * if can_queue ramp down is done then try can_queue ramp up ··· 1888 1891 fc_fcp_can_queue_ramp_up(lport); 1889 1892 1890 1893 sc_cmd = fsp->cmd; 1891 - fsp->cmd = NULL; 1892 - 1893 - if (!sc_cmd->SCp.ptr) 1894 - return; 1895 - 1896 1894 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1897 1895 switch (fsp->status_code) { 1898 1896 case FC_COMPLETE: ··· 1963 1971 break; 1964 1972 } 1965 1973 1966 - if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) { 1967 - sc_cmd->result = (DID_REQUEUE << 16); 1968 - FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n"); 1969 - } 1974 + if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) 1975 + sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16); 1970 1976 1971 1977 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1972 1978 list_del(&fsp->list); 1973 - spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1974 1979 sc_cmd->SCp.ptr = NULL; 1980 + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1975 1981 sc_cmd->scsi_done(sc_cmd); 1976 1982 1977 1983 /* release ref from initial allocation in queue command */ ··· 1987 1997 { 1988 1998 struct fc_fcp_pkt *fsp; 1989 1999 struct fc_lport *lport; 2000 + struct fc_fcp_internal *si; 1990 2001 int rc = FAILED; 1991 2002 unsigned long flags; 1992 2003 ··· 1997 2006 else if (!lport->link_up) 1998 2007 return rc; 1999 2008 2000 - spin_lock_irqsave(lport->host->host_lock, flags); 2009 + si = fc_get_scsi_internal(lport); 2010 + spin_lock_irqsave(&si->scsi_queue_lock, flags); 2001 2011 fsp = CMD_SP(sc_cmd); 2002 2012 if (!fsp) { 2003 2013 /* command completed while scsi eh was setting up */ ··· 2007 2015 } 2008 2016 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 2009 2017 fc_fcp_pkt_hold(fsp); 2010 - spin_unlock_irqrestore(lport->host->host_lock, flags); 2018 + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 2011 2019 2012 2020 if (fc_fcp_lock_pkt(fsp)) { 2013 2021 /* completed while we were waiting for timer to be deleted */
+4 -8
drivers/scsi/libfc/fc_lport.c
··· 1447 1447 } 1448 1448 1449 1449 did = fc_frame_did(fp); 1450 - 1451 - if (!did) { 1452 - FC_LPORT_DBG(lport, "Bad FLOGI response\n"); 1453 - goto out; 1454 - } 1455 - 1456 - if (fc_frame_payload_op(fp) == ELS_LS_ACC) { 1450 + if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { 1457 1451 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1458 1452 if (flp) { 1459 1453 mfs = ntohs(flp->fl_csp.sp_bb_data) & ··· 1486 1492 fc_lport_enter_dns(lport); 1487 1493 } 1488 1494 } 1489 - } else 1495 + } else { 1496 + FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); 1490 1497 fc_lport_error(lport, fp); 1498 + } 1491 1499 1492 1500 out: 1493 1501 fc_frame_free(fp);
+2 -2
drivers/scsi/libfc/fc_rport.c
··· 196 196 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) 197 197 { 198 198 if (timeout) 199 - rport->dev_loss_tmo = timeout + 5; 199 + rport->dev_loss_tmo = timeout; 200 200 else 201 - rport->dev_loss_tmo = 30; 201 + rport->dev_loss_tmo = 1; 202 202 } 203 203 EXPORT_SYMBOL(fc_set_rport_loss_tmo); 204 204
+9 -3
drivers/scsi/lpfc/lpfc.h
··· 202 202 uint32_t elsRcvPRLO; 203 203 uint32_t elsRcvPRLI; 204 204 uint32_t elsRcvLIRR; 205 + uint32_t elsRcvRLS; 205 206 uint32_t elsRcvRPS; 206 207 uint32_t elsRcvRPL; 207 208 uint32_t elsRcvRRQ; 209 + uint32_t elsRcvRTV; 210 + uint32_t elsRcvECHO; 208 211 uint32_t elsXmitFLOGI; 209 212 uint32_t elsXmitFDISC; 210 213 uint32_t elsXmitPLOGI; ··· 552 549 #define ELS_XRI_ABORT_EVENT 0x40 553 550 #define ASYNC_EVENT 0x80 554 551 #define LINK_DISABLED 0x100 /* Link disabled by user */ 555 - #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 556 - #define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ 557 - #define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ 552 + #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ 553 + #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ 554 + #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ 555 + #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 556 + #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 558 557 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 559 558 struct lpfc_dmabuf slim2p; 560 559 ··· 578 573 /* These fields used to be binfo */ 579 574 uint32_t fc_pref_DID; /* preferred D_ID */ 580 575 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 576 + uint32_t fc_edtovResol; /* E_D_TOV timer resolution */ 581 577 uint32_t fc_edtov; /* E_D_TOV timer value */ 582 578 uint32_t fc_arbtov; /* ARB_TOV timer value */ 583 579 uint32_t fc_ratov; /* R_A_TOV timer value */
+6 -1
drivers/scsi/lpfc/lpfc_attr.c
··· 3789 3789 break; 3790 3790 case MBX_SECURITY_MGMT: 3791 3791 case MBX_AUTH_PORT: 3792 - if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) 3792 + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 3793 + printk(KERN_WARNING "mbox_read:Command 0x%x " 3794 + "is not permitted\n", pmb->mbxCommand); 3795 + sysfs_mbox_idle(phba); 3796 + spin_unlock_irq(&phba->hbalock); 3793 3797 return -EPERM; 3798 + } 3794 3799 break; 3795 3800 case MBX_READ_SPARM64: 3796 3801 case MBX_READ_LA:
+2 -2
drivers/scsi/lpfc/lpfc_bsg.c
··· 3142 3142 job = menlo->set_job; 3143 3143 job->dd_data = NULL; /* so timeout handler does not reply */ 3144 3144 3145 - spin_lock_irqsave(&phba->hbalock, flags); 3145 + spin_lock(&phba->hbalock); 3146 3146 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3147 3147 if (cmdiocbq->context2 && rspiocbq) 3148 3148 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3149 3149 &rspiocbq->iocb, sizeof(IOCB_t)); 3150 - spin_unlock_irqrestore(&phba->hbalock, flags); 3150 + spin_unlock(&phba->hbalock); 3151 3151 3152 3152 bmp = menlo->bmp; 3153 3153 rspiocbq = menlo->rspiocbq;
+4
drivers/scsi/lpfc/lpfc_crtn.h
··· 44 44 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 45 45 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46 46 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 47 + void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *); 48 + 47 49 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 48 50 void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, 49 51 struct lpfc_nodelist *); ··· 231 229 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 232 230 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 233 231 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 232 + int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 234 233 235 234 int lpfc_mem_alloc(struct lpfc_hba *, int align); 236 235 void lpfc_mem_free(struct lpfc_hba *); ··· 274 271 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 275 272 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 276 273 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 274 + void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 277 275 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 278 276 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 279 277 struct lpfc_dmabuf *);
+382 -57
drivers/scsi/lpfc/lpfc_els.c
··· 177 177 (elscmd == ELS_CMD_LOGO))) 178 178 switch (elscmd) { 179 179 case ELS_CMD_FLOGI: 180 - elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 180 + elsiocb->iocb_flag |= 181 + ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 181 182 & LPFC_FIP_ELS_ID_MASK); 182 183 break; 183 184 case ELS_CMD_FDISC: 184 - elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 185 + elsiocb->iocb_flag |= 186 + ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 185 187 & LPFC_FIP_ELS_ID_MASK); 186 188 break; 187 189 case ELS_CMD_LOGO: 188 - elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 190 + elsiocb->iocb_flag |= 191 + ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 189 192 & LPFC_FIP_ELS_ID_MASK); 190 193 break; 191 194 } ··· 520 517 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 521 518 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 522 519 520 + phba->fc_edtovResol = sp->cmn.edtovResolution; 523 521 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 524 522 525 523 if (phba->fc_topology == TOPOLOGY_LOOP) { 526 524 spin_lock_irq(shost->host_lock); 527 525 vport->fc_flag |= FC_PUBLIC_LOOP; 528 526 spin_unlock_irq(shost->host_lock); 529 - } else { 530 - /* 531 - * If we are a N-port connected to a Fabric, fixup sparam's so 532 - * logins to devices on remote loops work. 533 - */ 534 - vport->fc_sparam.cmn.altBbCredit = 1; 535 527 } 536 528 537 529 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; ··· 583 585 lpfc_unreg_rpi(vport, np); 584 586 } 585 587 lpfc_cleanup_pending_mbox(vport); 588 + 589 + if (phba->sli_rev == LPFC_SLI_REV4) 590 + lpfc_sli4_unreg_all_rpis(vport); 591 + 586 592 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 587 593 lpfc_mbx_unreg_vpi(vport); 588 594 spin_lock_irq(shost->host_lock); ··· 802 800 803 801 if (irsp->ulpStatus) { 804 802 /* 805 - * In case of FIP mode, perform round robin FCF failover 803 + * In case of FIP mode, perform roundrobin FCF failover 806 804 * due to new FCF discovery 807 805 */ 808 806 if ((phba->hba_flag & HBA_FIP_SUPPORT) && ··· 810 808 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 811 809 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 812 810 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 813 - "2611 FLOGI failed on registered " 814 - "FCF record fcf_index(%d), status: " 815 - "x%x/x%x, tmo:x%x, trying to perform " 816 - "round robin failover\n", 811 + "2611 FLOGI failed on FCF (x%x), " 812 + "status:x%x/x%x, tmo:x%x, perform " 813 + "roundrobin FCF failover\n", 817 814 phba->fcf.current_rec.fcf_indx, 818 815 irsp->ulpStatus, irsp->un.ulpWord[4], 819 816 irsp->ulpTimeout); 820 817 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 821 - if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 822 - /* 823 - * Exhausted the eligible FCF record list, 824 - * fail through to retry FLOGI on current 825 - * FCF record. 826 - */ 827 - lpfc_printf_log(phba, KERN_WARNING, 828 - LOG_FIP | LOG_ELS, 829 - "2760 Completed one round " 830 - "of FLOGI FCF round robin " 831 - "failover list, retry FLOGI " 832 - "on currently registered " 833 - "FCF index:%d\n", 834 - phba->fcf.current_rec.fcf_indx); 835 - } else { 836 - lpfc_printf_log(phba, KERN_INFO, 837 - LOG_FIP | LOG_ELS, 838 - "2794 FLOGI FCF round robin " 839 - "failover to FCF index x%x\n", 840 - fcf_index); 841 - rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, 842 - fcf_index); 843 - if (rc) 844 - lpfc_printf_log(phba, KERN_WARNING, 845 - LOG_FIP | LOG_ELS, 846 - "2761 FLOGI round " 847 - "robin FCF failover " 848 - "read FCF failed " 849 - "rc:x%x, fcf_index:" 850 - "%d\n", rc, 851 - phba->fcf.current_rec.fcf_indx); 852 - else 853 - goto out; 854 - } 818 + rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 819 + if (rc) 820 + goto out; 855 821 } 856 822 857 823 /* FLOGI failure */ ··· 909 939 lpfc_nlp_put(ndlp); 910 940 spin_lock_irq(&phba->hbalock); 911 941 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 942 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 912 943 spin_unlock_irq(&phba->hbalock); 913 944 goto out; 914 945 } ··· 918 947 if (phba->hba_flag & HBA_FIP_SUPPORT) 919 948 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 920 949 LOG_ELS, 921 - "2769 FLOGI successful on FCF " 922 - "record: current_fcf_index:" 923 - "x%x, terminate FCF round " 924 - "robin failover process\n", 950 + "2769 FLOGI to FCF (x%x) " 951 + "completed successfully\n", 925 952 phba->fcf.current_rec.fcf_indx); 926 953 spin_lock_irq(&phba->hbalock); 927 954 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 955 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 928 956 spin_unlock_irq(&phba->hbalock); 929 957 goto out; 930 958 } ··· 1145 1175 return 0; 1146 1176 } 1147 1177 1148 - if (lpfc_issue_els_flogi(vport, ndlp, 0)) 1178 + if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1149 1179 /* This decrement of reference count to node shall kick off 1150 1180 * the release of the node. 1151 1181 */ 1152 1182 lpfc_nlp_put(ndlp); 1153 - 1183 + return 0; 1184 + } 1154 1185 return 1; 1155 1186 } 1156 1187 ··· 1615 1644 pcmd += sizeof(uint32_t); 1616 1645 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1617 1646 sp = (struct serv_parm *) pcmd; 1647 + 1648 + /* 1649 + * If we are a N-port connected to a Fabric, fix-up paramm's so logins 1650 + * to device on remote loops work. 1651 + */ 1652 + if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 1653 + sp->cmn.altBbCredit = 1; 1618 1654 1619 1655 if (sp->cmn.fcphLow < FC_PH_4_3) 1620 1656 sp->cmn.fcphLow = FC_PH_4_3; ··· 3904 3926 } 3905 3927 3906 3928 /** 3929 + * lpfc_els_rsp_echo_acc - Issue echo acc response 3930 + * @vport: pointer to a virtual N_Port data structure. 3931 + * @data: pointer to echo data to return in the accept. 3932 + * @oldiocb: pointer to the original lpfc command iocb data structure. 3933 + * @ndlp: pointer to a node-list data structure. 3934 + * 3935 + * Return code 3936 + * 0 - Successfully issued acc echo response 3937 + * 1 - Failed to issue acc echo response 3938 + **/ 3939 + static int 3940 + lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 3941 + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 3942 + { 3943 + struct lpfc_hba *phba = vport->phba; 3944 + struct lpfc_iocbq *elsiocb; 3945 + struct lpfc_sli *psli; 3946 + uint8_t *pcmd; 3947 + uint16_t cmdsize; 3948 + int rc; 3949 + 3950 + psli = &phba->sli; 3951 + cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 3952 + 3953 + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3954 + ndlp->nlp_DID, ELS_CMD_ACC); 3955 + if (!elsiocb) 3956 + return 1; 3957 + 3958 + elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ 3959 + /* Xmit ECHO ACC response tag <ulpIoTag> */ 3960 + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3961 + "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 3962 + elsiocb->iotag, elsiocb->iocb.ulpContext); 3963 + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3964 + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3965 + pcmd += sizeof(uint32_t); 3966 + memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 3967 + 3968 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3969 + "Issue ACC ECHO: did:x%x flg:x%x", 3970 + ndlp->nlp_DID, ndlp->nlp_flag, 0); 3971 + 3972 + phba->fc_stat.elsXmitACC++; 3973 + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3974 + lpfc_nlp_put(ndlp); 3975 + elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3976 + * it could be freed */ 3977 + 3978 + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3979 + if (rc == IOCB_ERROR) { 3980 + lpfc_els_free_iocb(phba, elsiocb); 3981 + return 1; 3982 + } 3983 + return 0; 3984 + } 3985 + 3986 + /** 3907 3987 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 3908 3988 * @vport: pointer to a host virtual N_Port data structure. 3909 3989 * ··· 4720 4684 } 4721 4685 4722 4686 /** 4687 + * lpfc_els_rcv_echo - Process an unsolicited echo iocb 4688 + * @vport: pointer to a host virtual N_Port data structure. 4689 + * @cmdiocb: pointer to lpfc command iocb data structure. 4690 + * @ndlp: pointer to a node-list data structure. 4691 + * 4692 + * Return code 4693 + * 0 - Successfully processed echo iocb (currently always return 0) 4694 + **/ 4695 + static int 4696 + lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4697 + struct lpfc_nodelist *ndlp) 4698 + { 4699 + uint8_t *pcmd; 4700 + 4701 + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 4702 + 4703 + /* skip over first word of echo command to find echo data */ 4704 + pcmd += sizeof(uint32_t); 4705 + 4706 + lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 4707 + return 0; 4708 + } 4709 + 4710 + /** 4723 4711 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 4724 4712 * @vport: pointer to a host virtual N_Port data structure. 4725 4713 * @cmdiocb: pointer to lpfc command iocb data structure. ··· 4792 4732 struct lpfc_nodelist *ndlp) 4793 4733 { 4794 4734 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4735 + } 4736 + 4737 + /** 4738 + * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 4739 + * @phba: pointer to lpfc hba data structure. 4740 + * @pmb: pointer to the driver internal queue element for mailbox command. 4741 + * 4742 + * This routine is the completion callback function for the MBX_READ_LNK_STAT 4743 + * mailbox command. This callback function is to actually send the Accept 4744 + * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 4745 + * collects the link statistics from the completion of the MBX_READ_LNK_STAT 4746 + * mailbox command, constructs the RPS response with the link statistics 4747 + * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 4748 + * response to the RPS. 4749 + * 4750 + * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4751 + * will be incremented by 1 for holding the ndlp and the reference to ndlp 4752 + * will be stored into the context1 field of the IOCB for the completion 4753 + * callback function to the RPS Accept Response ELS IOCB command. 4754 + * 4755 + **/ 4756 + static void 4757 + lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4758 + { 4759 + MAILBOX_t *mb; 4760 + IOCB_t *icmd; 4761 + struct RLS_RSP *rls_rsp; 4762 + uint8_t *pcmd; 4763 + struct lpfc_iocbq *elsiocb; 4764 + struct lpfc_nodelist *ndlp; 4765 + uint16_t xri; 4766 + uint32_t cmdsize; 4767 + 4768 + mb = &pmb->u.mb; 4769 + 4770 + ndlp = (struct lpfc_nodelist *) pmb->context2; 4771 + xri = (uint16_t) ((unsigned long)(pmb->context1)); 4772 + pmb->context1 = NULL; 4773 + pmb->context2 = NULL; 4774 + 4775 + if (mb->mbxStatus) { 4776 + mempool_free(pmb, phba->mbox_mem_pool); 4777 + return; 4778 + } 4779 + 4780 + cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 4781 + mempool_free(pmb, phba->mbox_mem_pool); 4782 + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 4783 + lpfc_max_els_tries, ndlp, 4784 + ndlp->nlp_DID, ELS_CMD_ACC); 4785 + 4786 + /* Decrement the ndlp reference count from previous mbox command */ 4787 + lpfc_nlp_put(ndlp); 4788 + 4789 + if (!elsiocb) 4790 + return; 4791 + 4792 + icmd = &elsiocb->iocb; 4793 + icmd->ulpContext = xri; 4794 + 4795 + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4796 + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4797 + pcmd += sizeof(uint32_t); /* Skip past command */ 4798 + rls_rsp = (struct RLS_RSP *)pcmd; 4799 + 4800 + rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 4801 + rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 4802 + rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 4803 + rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 4804 + rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 4805 + rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 4806 + 4807 + /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 4808 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 4809 + "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 4810 + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 4811 + elsiocb->iotag, elsiocb->iocb.ulpContext, 4812 + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4813 + ndlp->nlp_rpi); 4814 + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4815 + phba->fc_stat.elsXmitACC++; 4816 + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 4817 + lpfc_els_free_iocb(phba, elsiocb); 4795 4818 } 4796 4819 4797 4820 /** ··· 4970 4827 } 4971 4828 4972 4829 /** 4973 - * lpfc_els_rcv_rps - Process an unsolicited rps iocb 4830 + * lpfc_els_rcv_rls - Process an unsolicited rls iocb 4831 + * @vport: pointer to a host virtual N_Port data structure. 4832 + * @cmdiocb: pointer to lpfc command iocb data structure. 4833 + * @ndlp: pointer to a node-list data structure. 4834 + * 4835 + * This routine processes Read Port Status (RPL) IOCB received as an 4836 + * ELS unsolicited event. It first checks the remote port state. If the 4837 + * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 4838 + * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 4839 + * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 4840 + * for reading the HBA link statistics. It is for the callback function, 4841 + * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 4842 + * to actually sending out RPL Accept (ACC) response. 4843 + * 4844 + * Return codes 4845 + * 0 - Successfully processed rls iocb (currently always return 0) 4846 + **/ 4847 + static int 4848 + lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4849 + struct lpfc_nodelist *ndlp) 4850 + { 4851 + struct lpfc_hba *phba = vport->phba; 4852 + LPFC_MBOXQ_t *mbox; 4853 + struct lpfc_dmabuf *pcmd; 4854 + struct ls_rjt stat; 4855 + 4856 + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 4857 + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 4858 + /* reject the unsolicited RPS request and done with it */ 4859 + goto reject_out; 4860 + 4861 + pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4862 + 4863 + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 4864 + if (mbox) { 4865 + lpfc_read_lnk_stat(phba, mbox); 4866 + mbox->context1 = 4867 + (void *)((unsigned long) cmdiocb->iocb.ulpContext); 4868 + mbox->context2 = lpfc_nlp_get(ndlp); 4869 + mbox->vport = vport; 4870 + mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 4871 + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4872 + != MBX_NOT_FINISHED) 4873 + /* Mbox completion will send ELS Response */ 4874 + return 0; 4875 + /* Decrement reference count used for the failed mbox 4876 + * command. 4877 + */ 4878 + lpfc_nlp_put(ndlp); 4879 + mempool_free(mbox, phba->mbox_mem_pool); 4880 + } 4881 + reject_out: 4882 + /* issue rejection response */ 4883 + stat.un.b.lsRjtRsvd0 = 0; 4884 + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4885 + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 4886 + stat.un.b.vendorUnique = 0; 4887 + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 4888 + return 0; 4889 + } 4890 + 4891 + /** 4892 + * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 4893 + * @vport: pointer to a host virtual N_Port data structure. 4894 + * @cmdiocb: pointer to lpfc command iocb data structure. 4895 + * @ndlp: pointer to a node-list data structure. 4896 + * 4897 + * This routine processes Read Timout Value (RTV) IOCB received as an 4898 + * ELS unsolicited event. It first checks the remote port state. If the 4899 + * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 4900 + * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 4901 + * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 4902 + * Value (RTV) unsolicited IOCB event. 4903 + * 4904 + * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4905 + * will be incremented by 1 for holding the ndlp and the reference to ndlp 4906 + * will be stored into the context1 field of the IOCB for the completion 4907 + * callback function to the RPS Accept Response ELS IOCB command. 4908 + * 4909 + * Return codes 4910 + * 0 - Successfully processed rtv iocb (currently always return 0) 4911 + **/ 4912 + static int 4913 + lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4914 + struct lpfc_nodelist *ndlp) 4915 + { 4916 + struct lpfc_hba *phba = vport->phba; 4917 + struct ls_rjt stat; 4918 + struct RTV_RSP *rtv_rsp; 4919 + uint8_t *pcmd; 4920 + struct lpfc_iocbq *elsiocb; 4921 + uint32_t cmdsize; 4922 + 4923 + 4924 + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 4925 + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 4926 + /* reject the unsolicited RPS request and done with it */ 4927 + goto reject_out; 4928 + 4929 + cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 4930 + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 4931 + lpfc_max_els_tries, ndlp, 4932 + ndlp->nlp_DID, ELS_CMD_ACC); 4933 + 4934 + if (!elsiocb) 4935 + return 1; 4936 + 4937 + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4938 + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4939 + pcmd += sizeof(uint32_t); /* Skip past command */ 4940 + 4941 + /* use the command's xri in the response */ 4942 + elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; 4943 + 4944 + rtv_rsp = (struct RTV_RSP *)pcmd; 4945 + 4946 + /* populate RTV payload */ 4947 + rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 4948 + rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 4949 + bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 4950 + bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 4951 + rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 4952 + 4953 + /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 4954 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 4955 + "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 4956 + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 4957 + "Data: x%x x%x x%x\n", 4958 + elsiocb->iotag, elsiocb->iocb.ulpContext, 4959 + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4960 + ndlp->nlp_rpi, 4961 + rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 4962 + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4963 + phba->fc_stat.elsXmitACC++; 4964 + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 4965 + lpfc_els_free_iocb(phba, elsiocb); 4966 + return 0; 4967 + 4968 + reject_out: 4969 + /* issue rejection response */ 4970 + stat.un.b.lsRjtRsvd0 = 0; 4971 + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4972 + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 4973 + stat.un.b.vendorUnique = 0; 4974 + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 4975 + return 0; 4976 + } 4977 + 4978 + /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 4974 4979 * @vport: pointer to a host virtual N_Port data structure. 4975 4980 * @cmdiocb: pointer to lpfc command iocb data structure. 4976 4981 * @ndlp: pointer to a node-list data structure. ··· 5308 5017 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5309 5018 lp = (uint32_t *) pcmd->virt; 5310 5019 rpl = (RPL *) (lp + 1); 5311 - 5312 5020 maxsize = be32_to_cpu(rpl->maxsize); 5313 5021 5314 5022 /* We support only one port */ ··· 6126 5836 if (newnode) 6127 5837 lpfc_nlp_put(ndlp); 6128 5838 break; 5839 + case ELS_CMD_RLS: 5840 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 5841 + "RCV RLS: did:x%x/ste:x%x flg:x%x", 5842 + did, vport->port_state, ndlp->nlp_flag); 5843 + 5844 + phba->fc_stat.elsRcvRLS++; 5845 + lpfc_els_rcv_rls(vport, elsiocb, ndlp); 5846 + if (newnode) 5847 + lpfc_nlp_put(ndlp); 5848 + break; 6129 5849 case ELS_CMD_RPS: 6130 5850 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6131 5851 "RCV RPS: did:x%x/ste:x%x flg:x%x", ··· 6166 5866 if (newnode) 6167 5867 lpfc_nlp_put(ndlp); 6168 5868 break; 5869 + case ELS_CMD_RTV: 5870 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 5871 + "RCV RTV: did:x%x/ste:x%x flg:x%x", 5872 + did, vport->port_state, ndlp->nlp_flag); 5873 + phba->fc_stat.elsRcvRTV++; 5874 + lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 5875 + if (newnode) 5876 + lpfc_nlp_put(ndlp); 5877 + break; 6169 5878 case ELS_CMD_RRQ: 6170 5879 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6171 5880 "RCV RRQ: did:x%x/ste:x%x flg:x%x", ··· 6182 5873 6183 5874 phba->fc_stat.elsRcvRRQ++; 6184 5875 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 5876 + if (newnode) 5877 + lpfc_nlp_put(ndlp); 5878 + break; 5879 + case ELS_CMD_ECHO: 5880 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 5881 + "RCV ECHO: did:x%x/ste:x%x flg:x%x", 5882 + did, vport->port_state, ndlp->nlp_flag); 5883 + 5884 + phba->fc_stat.elsRcvECHO++; 5885 + lpfc_els_rcv_echo(vport, elsiocb, ndlp); 6185 5886 if (newnode) 6186 5887 lpfc_nlp_put(ndlp); 6187 5888 break; ··· 6489 6170 6490 6171 default: 6491 6172 /* Try to recover from this error */ 6173 + if (phba->sli_rev == LPFC_SLI_REV4) 6174 + lpfc_sli4_unreg_all_rpis(vport); 6492 6175 lpfc_mbx_unreg_vpi(vport); 6493 6176 spin_lock_irq(shost->host_lock); 6494 6177 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 6758 6437 lpfc_unreg_rpi(vport, np); 6759 6438 } 6760 6439 lpfc_cleanup_pending_mbox(vport); 6440 + 6441 + if (phba->sli_rev == LPFC_SLI_REV4) 6442 + lpfc_sli4_unreg_all_rpis(vport); 6443 + 6761 6444 lpfc_mbx_unreg_vpi(vport); 6762 6445 spin_lock_irq(shost->host_lock); 6763 6446 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 6777 6452 * to update the MAC address. 6778 6453 */ 6779 6454 lpfc_register_new_vport(phba, vport, ndlp); 6780 - return ; 6455 + goto out; 6781 6456 } 6782 6457 6783 6458 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+316 -123
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 20 20 *******************************************************************/ 21 21 22 22 #include <linux/blkdev.h> 23 + #include <linux/delay.h> 23 24 #include <linux/slab.h> 24 25 #include <linux/pci.h> 25 26 #include <linux/kthread.h> ··· 64 63 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 65 64 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 66 65 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66 + static int lpfc_fcf_inuse(struct lpfc_hba *); 67 67 68 68 void 69 69 lpfc_terminate_rport_io(struct fc_rport *rport) ··· 162 160 return; 163 161 } 164 162 165 - /* 166 - * This function is called from the worker thread when dev_loss_tmo 167 - * expire. 168 - */ 169 - static void 163 + /** 164 + * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 165 + * @ndlp: Pointer to remote node object. 166 + * 167 + * This function is called from the worker thread when devloss timeout timer 168 + * expires. For SLI4 host, this routine shall return 1 when at lease one 169 + * remote node, including this @ndlp, is still in use of FCF; otherwise, this 170 + * routine shall return 0 when there is no remote node is still in use of FCF 171 + * when devloss timeout happened to this @ndlp. 172 + **/ 173 + static int 170 174 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 171 175 { 172 176 struct lpfc_rport_data *rdata; ··· 183 175 int put_node; 184 176 int put_rport; 185 177 int warn_on = 0; 178 + int fcf_inuse = 0; 186 179 187 180 rport = ndlp->rport; 188 181 189 182 if (!rport) 190 - return; 183 + return fcf_inuse; 191 184 192 185 rdata = rport->dd_data; 193 186 name = (uint8_t *) &ndlp->nlp_portname; 194 187 vport = ndlp->vport; 195 188 phba = vport->phba; 189 + 190 + if (phba->sli_rev == LPFC_SLI_REV4) 191 + fcf_inuse = lpfc_fcf_inuse(phba); 196 192 197 193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 198 194 "rport devlosstmo:did:x%x type:x%x id:x%x", ··· 221 209 lpfc_nlp_put(ndlp); 222 210 if (put_rport) 223 211 put_device(&rport->dev); 224 - return; 212 + return fcf_inuse; 225 213 } 226 214 227 215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { ··· 232 220 *name, *(name+1), *(name+2), *(name+3), 233 221 *(name+4), *(name+5), *(name+6), *(name+7), 234 222 ndlp->nlp_DID); 235 - return; 223 + return fcf_inuse; 236 224 } 237 225 238 226 if (ndlp->nlp_type & NLP_FABRIC) { ··· 245 233 lpfc_nlp_put(ndlp); 246 234 if (put_rport) 247 235 put_device(&rport->dev); 248 - return; 236 + return fcf_inuse; 249 237 } 250 238 251 239 if (ndlp->nlp_sid != NLP_NO_SID) { ··· 292 280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 293 281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 294 282 283 + return fcf_inuse; 284 + } 285 + 286 + /** 287 + * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 288 + * @phba: Pointer to hba context object. 289 + * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 290 + * @nlp_did: remote node identifer with devloss timeout. 291 + * 292 + * This function is called from the worker thread after invoking devloss 293 + * timeout handler and releasing the reference count for the ndlp with 294 + * which the devloss timeout was handled for SLI4 host. For the devloss 295 + * timeout of the last remote node which had been in use of FCF, when this 296 + * routine is invoked, it shall be guaranteed that none of the remote are 297 + * in-use of FCF. When devloss timeout to the last remote using the FCF, 298 + * if the FIP engine is neither in FCF table scan process nor roundrobin 299 + * failover process, the in-use FCF shall be unregistered. If the FIP 300 + * engine is in FCF discovery process, the devloss timeout state shall 301 + * be set for either the FCF table scan process or roundrobin failover 302 + * process to unregister the in-use FCF. 303 + **/ 304 + static void 305 + lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 306 + uint32_t nlp_did) 307 + { 308 + /* If devloss timeout happened to a remote node when FCF had no 309 + * longer been in-use, do nothing. 310 + */ 311 + if (!fcf_inuse) 312 + return; 313 + 314 + if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 315 + spin_lock_irq(&phba->hbalock); 316 + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 317 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 318 + spin_unlock_irq(&phba->hbalock); 319 + return; 320 + } 321 + phba->hba_flag |= HBA_DEVLOSS_TMO; 322 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 323 + "2847 Last remote node (x%x) using " 324 + "FCF devloss tmo\n", nlp_did); 325 + } 326 + if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 327 + spin_unlock_irq(&phba->hbalock); 328 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 329 + "2868 Devloss tmo to FCF rediscovery " 330 + "in progress\n"); 331 + return; 332 + } 333 + if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 334 + spin_unlock_irq(&phba->hbalock); 335 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 336 + "2869 Devloss tmo to idle FIP engine, " 337 + "unreg in-use FCF and rescan.\n"); 338 + /* Unregister in-use FCF and rescan */ 339 + lpfc_unregister_fcf_rescan(phba); 340 + return; 341 + } 342 + spin_unlock_irq(&phba->hbalock); 343 + if (phba->hba_flag & FCF_TS_INPROG) 344 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 345 + "2870 FCF table scan in progress\n"); 346 + if (phba->hba_flag & FCF_RR_INPROG) 347 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 348 + "2871 FLOGI roundrobin FCF failover " 349 + "in progress\n"); 350 + } 295 351 lpfc_unregister_unused_fcf(phba); 296 352 } 297 353 ··· 488 408 struct lpfc_work_evt *evtp = NULL; 489 409 struct lpfc_nodelist *ndlp; 490 410 int free_evt; 411 + int fcf_inuse; 412 + uint32_t nlp_did; 491 413 492 414 spin_lock_irq(&phba->hbalock); 493 415 while (!list_empty(&phba->work_list)) { ··· 509 427 break; 510 428 case LPFC_EVT_DEV_LOSS: 511 429 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 512 - lpfc_dev_loss_tmo_handler(ndlp); 430 + fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 513 431 free_evt = 0; 514 432 /* decrement the node reference count held for 515 433 * this queued work 516 434 */ 435 + nlp_did = ndlp->nlp_DID; 517 436 lpfc_nlp_put(ndlp); 437 + if (phba->sli_rev == LPFC_SLI_REV4) 438 + lpfc_sli4_post_dev_loss_tmo_handler(phba, 439 + fcf_inuse, 440 + nlp_did); 518 441 break; 519 442 case LPFC_EVT_ONLINE: 520 443 if (phba->link_state < LPFC_LINK_DOWN) ··· 794 707 : NLP_EVT_DEVICE_RECOVERY); 795 708 } 796 709 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 710 + if (phba->sli_rev == LPFC_SLI_REV4) 711 + lpfc_sli4_unreg_all_rpis(vport); 797 712 lpfc_mbx_unreg_vpi(vport); 798 713 spin_lock_irq(shost->host_lock); 799 714 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; ··· 1110 1021 "2017 REG_FCFI mbxStatus error x%x " 1111 1022 "HBA state x%x\n", 1112 1023 mboxq->u.mb.mbxStatus, vport->port_state); 1113 - mempool_free(mboxq, phba->mbox_mem_pool); 1114 - return; 1024 + goto fail_out; 1115 1025 } 1116 1026 1117 1027 /* Start FCoE discovery by sending a FLOGI. */ ··· 1119 1031 spin_lock_irq(&phba->hbalock); 1120 1032 phba->fcf.fcf_flag |= FCF_REGISTERED; 1121 1033 spin_unlock_irq(&phba->hbalock); 1034 + 1122 1035 /* If there is a pending FCoE event, restart FCF table scan. */ 1123 - if (lpfc_check_pending_fcoe_event(phba, 1)) { 1124 - mempool_free(mboxq, phba->mbox_mem_pool); 1125 - return; 1126 - } 1036 + if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1037 + goto fail_out; 1038 + 1039 + /* Mark successful completion of FCF table scan */ 1127 1040 spin_lock_irq(&phba->hbalock); 1128 1041 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1129 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1130 - spin_unlock_irq(&phba->hbalock); 1131 - if (vport->port_state != LPFC_FLOGI) 1042 + phba->hba_flag &= ~FCF_TS_INPROG; 1043 + if (vport->port_state != LPFC_FLOGI) { 1044 + phba->hba_flag |= FCF_RR_INPROG; 1045 + spin_unlock_irq(&phba->hbalock); 1132 1046 lpfc_initial_flogi(vport); 1047 + goto out; 1048 + } 1049 + spin_unlock_irq(&phba->hbalock); 1050 + goto out; 1133 1051 1052 + fail_out: 1053 + spin_lock_irq(&phba->hbalock); 1054 + phba->hba_flag &= ~FCF_RR_INPROG; 1055 + spin_unlock_irq(&phba->hbalock); 1056 + out: 1134 1057 mempool_free(mboxq, phba->mbox_mem_pool); 1135 - return; 1136 1058 } 1137 1059 1138 1060 /** ··· 1339 1241 int rc; 1340 1242 1341 1243 spin_lock_irq(&phba->hbalock); 1342 - 1343 1244 /* If the FCF is not availabe do nothing. */ 1344 1245 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1345 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1246 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1346 1247 spin_unlock_irq(&phba->hbalock); 1347 1248 return; 1348 1249 } ··· 1349 1252 /* The FCF is already registered, start discovery */ 1350 1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1351 1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1352 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1353 - spin_unlock_irq(&phba->hbalock); 1354 - if (phba->pport->port_state != LPFC_FLOGI) 1255 + phba->hba_flag &= ~FCF_TS_INPROG; 1256 + if (phba->pport->port_state != LPFC_FLOGI) { 1257 + phba->hba_flag |= FCF_RR_INPROG; 1258 + spin_unlock_irq(&phba->hbalock); 1355 1259 lpfc_initial_flogi(phba->pport); 1260 + return; 1261 + } 1262 + spin_unlock_irq(&phba->hbalock); 1356 1263 return; 1357 1264 } 1358 1265 spin_unlock_irq(&phba->hbalock); 1359 1266 1360 - fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1361 - GFP_KERNEL); 1267 + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1362 1268 if (!fcf_mbxq) { 1363 1269 spin_lock_irq(&phba->hbalock); 1364 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1270 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1365 1271 spin_unlock_irq(&phba->hbalock); 1366 1272 return; 1367 1273 } ··· 1375 1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1376 1276 if (rc == MBX_NOT_FINISHED) { 1377 1277 spin_lock_irq(&phba->hbalock); 1378 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1278 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1379 1279 spin_unlock_irq(&phba->hbalock); 1380 1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1381 1281 } ··· 1593 1493 * FCF discovery, no need to restart FCF discovery. 1594 1494 */ 1595 1495 if ((phba->link_state >= LPFC_LINK_UP) && 1596 - (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1496 + (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1597 1497 return 0; 1598 1498 1599 1499 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, ··· 1617 1517 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1618 1518 } else { 1619 1519 /* 1620 - * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1520 + * Do not continue FCF discovery and clear FCF_TS_INPROG 1621 1521 * flag 1622 1522 */ 1623 1523 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1624 1524 "2833 Stop FCF discovery process due to link " 1625 1525 "state change (x%x)\n", phba->link_state); 1626 1526 spin_lock_irq(&phba->hbalock); 1627 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1527 + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1628 1528 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1629 1529 spin_unlock_irq(&phba->hbalock); 1630 1530 } ··· 1829 1729 } 1830 1730 1831 1731 /** 1732 + * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 1733 + * @vport: Pointer to vport object. 1734 + * @fcf_index: index to next fcf. 1735 + * 1736 + * This function processing the roundrobin fcf failover to next fcf index. 1737 + * When this function is invoked, there will be a current fcf registered 1738 + * for flogi. 1739 + * Return: 0 for continue retrying flogi on currently registered fcf; 1740 + * 1 for stop flogi on currently registered fcf; 1741 + */ 1742 + int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 1743 + { 1744 + struct lpfc_hba *phba = vport->phba; 1745 + int rc; 1746 + 1747 + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 1748 + spin_lock_irq(&phba->hbalock); 1749 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 1750 + spin_unlock_irq(&phba->hbalock); 1751 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1752 + "2872 Devloss tmo with no eligible " 1753 + "FCF, unregister in-use FCF (x%x) " 1754 + "and rescan FCF table\n", 1755 + phba->fcf.current_rec.fcf_indx); 1756 + lpfc_unregister_fcf_rescan(phba); 1757 + goto stop_flogi_current_fcf; 1758 + } 1759 + /* Mark the end to FLOGI roundrobin failover */ 1760 + phba->hba_flag &= ~FCF_RR_INPROG; 1761 + /* Allow action to new fcf asynchronous event */ 1762 + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1763 + spin_unlock_irq(&phba->hbalock); 1764 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1765 + "2865 No FCF available, stop roundrobin FCF " 1766 + "failover and change port state:x%x/x%x\n", 1767 + phba->pport->port_state, LPFC_VPORT_UNKNOWN); 1768 + phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1769 + goto stop_flogi_current_fcf; 1770 + } else { 1771 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 1772 + "2794 Try FLOGI roundrobin FCF failover to " 1773 + "(x%x)\n", fcf_index); 1774 + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 1775 + if (rc) 1776 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1777 + "2761 FLOGI roundrobin FCF failover " 1778 + "failed (rc:x%x) to read FCF (x%x)\n", 1779 + rc, phba->fcf.current_rec.fcf_indx); 1780 + else 1781 + goto stop_flogi_current_fcf; 1782 + } 1783 + return 0; 1784 + 1785 + stop_flogi_current_fcf: 1786 + lpfc_can_disctmo(vport); 1787 + return 1; 1788 + } 1789 + 1790 + /** 1832 1791 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1833 1792 * @phba: pointer to lpfc hba data structure. 1834 1793 * @mboxq: pointer to mailbox object. ··· 1915 1756 int rc; 1916 1757 1917 1758 /* If there is pending FCoE event restart FCF table scan */ 1918 - if (lpfc_check_pending_fcoe_event(phba, 0)) { 1759 + if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 1919 1760 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1920 1761 return; 1921 1762 } ··· 1924 1765 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1925 1766 &next_fcf_index); 1926 1767 if (!new_fcf_record) { 1927 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1768 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1928 1769 "2765 Mailbox command READ_FCF_RECORD " 1929 1770 "failed to retrieve a FCF record.\n"); 1930 1771 /* Let next new FCF event trigger fast failover */ 1931 1772 spin_lock_irq(&phba->hbalock); 1932 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1773 + phba->hba_flag &= ~FCF_TS_INPROG; 1933 1774 spin_unlock_irq(&phba->hbalock); 1934 1775 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1935 1776 return; ··· 1946 1787 /* 1947 1788 * If the fcf record does not match with connect list entries 1948 1789 * read the next entry; otherwise, this is an eligible FCF 1949 - * record for round robin FCF failover. 1790 + * record for roundrobin FCF failover. 1950 1791 */ 1951 1792 if (!rc) { 1952 1793 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1953 - "2781 FCF record (x%x) failed FCF " 1954 - "connection list check, fcf_avail:x%x, " 1955 - "fcf_valid:x%x\n", 1794 + "2781 FCF (x%x) failed connection " 1795 + "list check: (x%x/x%x)\n", 1956 1796 bf_get(lpfc_fcf_record_fcf_index, 1957 1797 new_fcf_record), 1958 1798 bf_get(lpfc_fcf_record_fcf_avail, ··· 1961 1803 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 1962 1804 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 1963 1805 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 1806 + if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != 1807 + phba->fcf.current_rec.fcf_indx) { 1808 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1809 + "2862 FCF (x%x) matches property " 1810 + "of in-use FCF (x%x)\n", 1811 + bf_get(lpfc_fcf_record_fcf_index, 1812 + new_fcf_record), 1813 + phba->fcf.current_rec.fcf_indx); 1814 + goto read_next_fcf; 1815 + } 1964 1816 /* 1965 1817 * In case the current in-use FCF record becomes 1966 1818 * invalid/unavailable during FCF discovery that ··· 1981 1813 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1982 1814 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1983 1815 "2835 Invalid in-use FCF " 1984 - "record (x%x) reported, " 1985 - "entering fast FCF failover " 1986 - "mode scanning.\n", 1816 + "(x%x), enter FCF failover " 1817 + "table scan.\n", 1987 1818 phba->fcf.current_rec.fcf_indx); 1988 1819 spin_lock_irq(&phba->hbalock); 1989 1820 phba->fcf.fcf_flag |= FCF_REDISC_FOV; ··· 2011 1844 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2012 1845 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2013 1846 new_fcf_record, vlan_id)) { 2014 - phba->fcf.fcf_flag |= FCF_AVAILABLE; 2015 - if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 2016 - /* Stop FCF redisc wait timer if pending */ 2017 - __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2018 - else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2019 - /* If in fast failover, mark it's completed */ 2020 - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2021 - spin_unlock_irq(&phba->hbalock); 2022 - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2023 - "2836 The new FCF record (x%x) " 2024 - "matches the in-use FCF record " 2025 - "(x%x)\n", 2026 - phba->fcf.current_rec.fcf_indx, 1847 + if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 1848 + phba->fcf.current_rec.fcf_indx) { 1849 + phba->fcf.fcf_flag |= FCF_AVAILABLE; 1850 + if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 1851 + /* Stop FCF redisc wait timer */ 1852 + __lpfc_sli4_stop_fcf_redisc_wait_timer( 1853 + phba); 1854 + else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 1855 + /* Fast failover, mark completed */ 1856 + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1857 + spin_unlock_irq(&phba->hbalock); 1858 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1859 + "2836 New FCF matches in-use " 1860 + "FCF (x%x)\n", 1861 + phba->fcf.current_rec.fcf_indx); 1862 + goto out; 1863 + } else 1864 + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1865 + "2863 New FCF (x%x) matches " 1866 + "property of in-use FCF (x%x)\n", 2027 1867 bf_get(lpfc_fcf_record_fcf_index, 2028 - new_fcf_record)); 2029 - goto out; 1868 + new_fcf_record), 1869 + phba->fcf.current_rec.fcf_indx); 2030 1870 } 2031 1871 /* 2032 1872 * Read next FCF record from HBA searching for the matching ··· 2127 1953 */ 2128 1954 if (fcf_rec) { 2129 1955 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2130 - "2840 Update current FCF record " 2131 - "with initial FCF record (x%x)\n", 1956 + "2840 Update initial FCF candidate " 1957 + "with FCF (x%x)\n", 2132 1958 bf_get(lpfc_fcf_record_fcf_index, 2133 1959 new_fcf_record)); 2134 1960 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, ··· 2158 1984 */ 2159 1985 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2160 1986 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2161 - "2782 No suitable FCF record " 2162 - "found during this round of " 2163 - "post FCF rediscovery scan: " 2164 - "fcf_evt_tag:x%x, fcf_index: " 2165 - "x%x\n", 1987 + "2782 No suitable FCF found: " 1988 + "(x%x/x%x)\n", 2166 1989 phba->fcoe_eventtag_at_fcf_scan, 2167 1990 bf_get(lpfc_fcf_record_fcf_index, 2168 1991 new_fcf_record)); 2169 - /* 2170 - * Let next new FCF event trigger fast 2171 - * failover 2172 - */ 2173 1992 spin_lock_irq(&phba->hbalock); 2174 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1993 + if (phba->hba_flag & HBA_DEVLOSS_TMO) { 1994 + phba->hba_flag &= ~FCF_TS_INPROG; 1995 + spin_unlock_irq(&phba->hbalock); 1996 + /* Unregister in-use FCF and rescan */ 1997 + lpfc_printf_log(phba, KERN_INFO, 1998 + LOG_FIP, 1999 + "2864 On devloss tmo " 2000 + "unreg in-use FCF and " 2001 + "rescan FCF table\n"); 2002 + lpfc_unregister_fcf_rescan(phba); 2003 + return; 2004 + } 2005 + /* 2006 + * Let next new FCF event trigger fast failover 2007 + */ 2008 + phba->hba_flag &= ~FCF_TS_INPROG; 2175 2009 spin_unlock_irq(&phba->hbalock); 2176 2010 return; 2177 2011 } ··· 2197 2015 2198 2016 /* Replace in-use record with the new record */ 2199 2017 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2200 - "2842 Replace the current in-use " 2201 - "FCF record (x%x) with failover FCF " 2202 - "record (x%x)\n", 2018 + "2842 Replace in-use FCF (x%x) " 2019 + "with failover FCF (x%x)\n", 2203 2020 phba->fcf.current_rec.fcf_indx, 2204 2021 phba->fcf.failover_rec.fcf_indx); 2205 2022 memcpy(&phba->fcf.current_rec, ··· 2210 2029 * FCF failover. 2211 2030 */ 2212 2031 spin_lock_irq(&phba->hbalock); 2213 - phba->fcf.fcf_flag &= 2214 - ~(FCF_REDISC_FOV | FCF_REDISC_RRU); 2032 + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2215 2033 spin_unlock_irq(&phba->hbalock); 2216 - /* 2217 - * Set up the initial registered FCF index for FLOGI 2218 - * round robin FCF failover. 2219 - */ 2220 - phba->fcf.fcf_rr_init_indx = 2221 - phba->fcf.failover_rec.fcf_indx; 2222 2034 /* Register to the new FCF record */ 2223 2035 lpfc_register_fcf(phba); 2224 2036 } else { ··· 2243 2069 LPFC_FCOE_FCF_GET_FIRST); 2244 2070 return; 2245 2071 } 2246 - 2247 - /* 2248 - * Otherwise, initial scan or post linkdown rescan, 2249 - * register with the best FCF record found so far 2250 - * through the FCF scanning process. 2251 - */ 2252 - 2253 - /* 2254 - * Mark the initial FCF discovery completed and 2255 - * the start of the first round of the roundrobin 2256 - * FCF failover. 2257 - */ 2258 - spin_lock_irq(&phba->hbalock); 2259 - phba->fcf.fcf_flag &= 2260 - ~(FCF_INIT_DISC | FCF_REDISC_RRU); 2261 - spin_unlock_irq(&phba->hbalock); 2262 - /* 2263 - * Set up the initial registered FCF index for FLOGI 2264 - * round robin FCF failover 2265 - */ 2266 - phba->fcf.fcf_rr_init_indx = 2267 - phba->fcf.current_rec.fcf_indx; 2268 2072 /* Register to the new FCF record */ 2269 2073 lpfc_register_fcf(phba); 2270 2074 } ··· 2258 2106 } 2259 2107 2260 2108 /** 2261 - * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler 2109 + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2262 2110 * @phba: pointer to lpfc hba data structure. 2263 2111 * @mboxq: pointer to mailbox object. 2264 2112 * 2265 - * This is the callback function for FLOGI failure round robin FCF failover 2113 + * This is the callback function for FLOGI failure roundrobin FCF failover 2266 2114 * read FCF record mailbox command from the eligible FCF record bmask for 2267 2115 * performing the failover. If the FCF read back is not valid/available, it 2268 2116 * fails through to retrying FLOGI to the currently registered FCF again. ··· 2277 2125 { 2278 2126 struct fcf_record *new_fcf_record; 2279 2127 uint32_t boot_flag, addr_mode; 2280 - uint16_t next_fcf_index; 2128 + uint16_t next_fcf_index, fcf_index; 2281 2129 uint16_t current_fcf_index; 2282 2130 uint16_t vlan_id; 2131 + int rc; 2283 2132 2284 - /* If link state is not up, stop the round robin failover process */ 2133 + /* If link state is not up, stop the roundrobin failover process */ 2285 2134 if (phba->link_state < LPFC_LINK_UP) { 2286 2135 spin_lock_irq(&phba->hbalock); 2287 2136 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2137 + phba->hba_flag &= ~FCF_RR_INPROG; 2288 2138 spin_unlock_irq(&phba->hbalock); 2289 - lpfc_sli4_mbox_cmd_free(phba, mboxq); 2290 - return; 2139 + goto out; 2291 2140 } 2292 2141 2293 2142 /* Parse the FCF record from the non-embedded mailbox command */ ··· 2298 2145 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2299 2146 "2766 Mailbox command READ_FCF_RECORD " 2300 2147 "failed to retrieve a FCF record.\n"); 2301 - goto out; 2148 + goto error_out; 2302 2149 } 2303 2150 2304 2151 /* Get the needed parameters from FCF record */ 2305 - lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2306 - &addr_mode, &vlan_id); 2152 + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2153 + &addr_mode, &vlan_id); 2307 2154 2308 2155 /* Log the FCF record information if turned on */ 2309 2156 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2310 2157 next_fcf_index); 2311 2158 2159 + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2160 + if (!rc) { 2161 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2162 + "2848 Remove ineligible FCF (x%x) from " 2163 + "from roundrobin bmask\n", fcf_index); 2164 + /* Clear roundrobin bmask bit for ineligible FCF */ 2165 + lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 2166 + /* Perform next round of roundrobin FCF failover */ 2167 + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 2168 + rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 2169 + if (rc) 2170 + goto out; 2171 + goto error_out; 2172 + } 2173 + 2174 + if (fcf_index == phba->fcf.current_rec.fcf_indx) { 2175 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2176 + "2760 Perform FLOGI roundrobin FCF failover: " 2177 + "FCF (x%x) back to FCF (x%x)\n", 2178 + phba->fcf.current_rec.fcf_indx, fcf_index); 2179 + /* Wait 500 ms before retrying FLOGI to current FCF */ 2180 + msleep(500); 2181 + lpfc_initial_flogi(phba->pport); 2182 + goto out; 2183 + } 2184 + 2312 2185 /* Upload new FCF record to the failover FCF record */ 2313 2186 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2314 - "2834 Update the current FCF record (x%x) " 2315 - "with the next FCF record (x%x)\n", 2316 - phba->fcf.failover_rec.fcf_indx, 2317 - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2187 + "2834 Update current FCF (x%x) with new FCF (x%x)\n", 2188 + phba->fcf.failover_rec.fcf_indx, fcf_index); 2318 2189 spin_lock_irq(&phba->hbalock); 2319 2190 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2320 2191 new_fcf_record, addr_mode, vlan_id, ··· 2355 2178 sizeof(struct lpfc_fcf_rec)); 2356 2179 2357 2180 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2358 - "2783 FLOGI round robin FCF failover from FCF " 2359 - "(x%x) to FCF (x%x).\n", 2360 - current_fcf_index, 2361 - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); 2181 + "2783 Perform FLOGI roundrobin FCF failover: FCF " 2182 + "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 2362 2183 2184 + error_out: 2185 + lpfc_register_fcf(phba); 2363 2186 out: 2364 2187 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2365 - lpfc_register_fcf(phba); 2366 2188 } 2367 2189 2368 2190 /** ··· 2370 2194 * @mboxq: pointer to mailbox object. 2371 2195 * 2372 2196 * This is the callback function of read FCF record mailbox command for 2373 - * updating the eligible FCF bmask for FLOGI failure round robin FCF 2197 + * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 2374 2198 * failover when a new FCF event happened. If the FCF read back is 2375 2199 * valid/available and it passes the connection list check, it updates 2376 - * the bmask for the eligible FCF record for round robin failover. 2200 + * the bmask for the eligible FCF record for roundrobin failover. 2377 2201 */ 2378 2202 void 2379 2203 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ··· 2815 2639 * and get the FCF Table. 2816 2640 */ 2817 2641 spin_lock_irq(&phba->hbalock); 2818 - if (phba->hba_flag & FCF_DISC_INPROGRESS) { 2642 + if (phba->hba_flag & FCF_TS_INPROG) { 2819 2643 spin_unlock_irq(&phba->hbalock); 2820 2644 return; 2821 2645 } ··· 4082 3906 LPFC_MBOXQ_t *mbox; 4083 3907 int rc; 4084 3908 3909 + if (phba->sli_rev == LPFC_SLI_REV4) { 3910 + lpfc_sli4_unreg_all_rpis(vport); 3911 + return; 3912 + } 3913 + 4085 3914 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4086 3915 if (mbox) { 4087 3916 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); ··· 4173 3992 } 4174 3993 4175 3994 spin_lock_irq(&phba->hbalock); 3995 + /* Cleanup REG_LOGIN completions which are not yet processed */ 3996 + list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 3997 + if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 3998 + (ndlp != (struct lpfc_nodelist *) mb->context2)) 3999 + continue; 4000 + 4001 + mb->context2 = NULL; 4002 + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4003 + } 4004 + 4176 4005 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 4177 4006 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4178 4007 (ndlp == (struct lpfc_nodelist *) mb->context2)) { ··· 5361 5170 if (ndlp) 5362 5171 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5363 5172 lpfc_cleanup_pending_mbox(vports[i]); 5173 + if (phba->sli_rev == LPFC_SLI_REV4) 5174 + lpfc_sli4_unreg_all_rpis(vports[i]); 5364 5175 lpfc_mbx_unreg_vpi(vports[i]); 5365 5176 shost = lpfc_shost_from_vport(vports[i]); 5366 5177 spin_lock_irq(shost->host_lock);
+41
drivers/scsi/lpfc/lpfc_hw.h
··· 861 861 uint32_t crcCnt; 862 862 } RPS_RSP; 863 863 864 + struct RLS { /* Structure is in Big Endian format */ 865 + uint32_t rls; 866 + #define rls_rsvd_SHIFT 24 867 + #define rls_rsvd_MASK 0x000000ff 868 + #define rls_rsvd_WORD rls 869 + #define rls_did_SHIFT 0 870 + #define rls_did_MASK 0x00ffffff 871 + #define rls_did_WORD rls 872 + }; 873 + 874 + struct RLS_RSP { /* Structure is in Big Endian format */ 875 + uint32_t linkFailureCnt; 876 + uint32_t lossSyncCnt; 877 + uint32_t lossSignalCnt; 878 + uint32_t primSeqErrCnt; 879 + uint32_t invalidXmitWord; 880 + uint32_t crcCnt; 881 + }; 882 + 883 + struct RTV_RSP { /* Structure is in Big Endian format */ 884 + uint32_t ratov; 885 + uint32_t edtov; 886 + uint32_t qtov; 887 + #define qtov_rsvd0_SHIFT 28 888 + #define qtov_rsvd0_MASK 0x0000000f 889 + #define qtov_rsvd0_WORD qtov /* reserved */ 890 + #define qtov_edtovres_SHIFT 27 891 + #define qtov_edtovres_MASK 0x00000001 892 + #define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */ 893 + #define qtov__rsvd1_SHIFT 19 894 + #define qtov_rsvd1_MASK 0x0000003f 895 + #define qtov_rsvd1_WORD qtov /* reserved */ 896 + #define qtov_rttov_SHIFT 18 897 + #define qtov_rttov_MASK 0x00000001 898 + #define qtov_rttov_WORD qtov /* R_T_TOV value */ 899 + #define qtov_rsvd2_SHIFT 0 900 + #define qtov_rsvd2_MASK 0x0003ffff 901 + #define qtov_rsvd2_WORD qtov /* reserved */ 902 + }; 903 + 904 + 864 905 typedef struct _RPL { /* Structure is in Big Endian format */ 865 906 uint32_t maxsize; 866 907 uint32_t index;
+70 -97
drivers/scsi/lpfc/lpfc_hw4.h
··· 424 424 #define FCOE_SOFn3 0x36 425 425 }; 426 426 427 - struct lpfc_wqe_generic{ 428 - struct ulp_bde64 bde; 429 - uint32_t word3; 430 - uint32_t word4; 431 - uint32_t word5; 432 - uint32_t word6; 433 - #define lpfc_wqe_gen_context_SHIFT 16 434 - #define lpfc_wqe_gen_context_MASK 0x0000FFFF 435 - #define lpfc_wqe_gen_context_WORD word6 436 - #define lpfc_wqe_gen_xri_SHIFT 0 437 - #define lpfc_wqe_gen_xri_MASK 0x0000FFFF 438 - #define lpfc_wqe_gen_xri_WORD word6 439 - uint32_t word7; 440 - #define lpfc_wqe_gen_lnk_SHIFT 23 441 - #define lpfc_wqe_gen_lnk_MASK 0x00000001 442 - #define lpfc_wqe_gen_lnk_WORD word7 443 - #define lpfc_wqe_gen_erp_SHIFT 22 444 - #define lpfc_wqe_gen_erp_MASK 0x00000001 445 - #define lpfc_wqe_gen_erp_WORD word7 446 - #define lpfc_wqe_gen_pu_SHIFT 20 447 - #define lpfc_wqe_gen_pu_MASK 0x00000003 448 - #define lpfc_wqe_gen_pu_WORD word7 449 - #define lpfc_wqe_gen_class_SHIFT 16 450 - #define lpfc_wqe_gen_class_MASK 0x00000007 451 - #define lpfc_wqe_gen_class_WORD word7 452 - #define lpfc_wqe_gen_command_SHIFT 8 453 - #define lpfc_wqe_gen_command_MASK 0x000000FF 454 - #define lpfc_wqe_gen_command_WORD word7 455 - #define lpfc_wqe_gen_status_SHIFT 4 456 - #define lpfc_wqe_gen_status_MASK 0x0000000F 457 - #define lpfc_wqe_gen_status_WORD word7 458 - #define lpfc_wqe_gen_ct_SHIFT 2 459 - #define lpfc_wqe_gen_ct_MASK 0x00000003 460 - #define lpfc_wqe_gen_ct_WORD word7 461 - uint32_t abort_tag; 462 - uint32_t word9; 463 - #define lpfc_wqe_gen_request_tag_SHIFT 0 464 - #define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF 465 - #define lpfc_wqe_gen_request_tag_WORD word9 466 - uint32_t word10; 467 - #define lpfc_wqe_gen_ccp_SHIFT 24 468 - #define lpfc_wqe_gen_ccp_MASK 0x000000FF 469 - #define lpfc_wqe_gen_ccp_WORD word10 470 - #define lpfc_wqe_gen_ccpe_SHIFT 23 471 - #define lpfc_wqe_gen_ccpe_MASK 0x00000001 472 - #define lpfc_wqe_gen_ccpe_WORD word10 473 - #define lpfc_wqe_gen_pv_SHIFT 19 474 - #define lpfc_wqe_gen_pv_MASK 0x00000001 475 - #define lpfc_wqe_gen_pv_WORD word10 476 - #define lpfc_wqe_gen_pri_SHIFT 16 477 - #define lpfc_wqe_gen_pri_MASK 0x00000007 478 - #define lpfc_wqe_gen_pri_WORD word10 479 - uint32_t word11; 480 - #define lpfc_wqe_gen_cq_id_SHIFT 16 481 - #define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF 482 - #define lpfc_wqe_gen_cq_id_WORD word11 483 - #define LPFC_WQE_CQ_ID_DEFAULT 0xffff 484 - #define lpfc_wqe_gen_wqec_SHIFT 7 485 - #define lpfc_wqe_gen_wqec_MASK 0x00000001 486 - #define lpfc_wqe_gen_wqec_WORD word11 487 - #define ELS_ID_FLOGI 3 488 - #define ELS_ID_FDISC 2 489 - #define ELS_ID_LOGO 1 490 - #define ELS_ID_DEFAULT 0 491 - #define lpfc_wqe_gen_els_id_SHIFT 4 492 - #define lpfc_wqe_gen_els_id_MASK 0x00000003 493 - #define lpfc_wqe_gen_els_id_WORD word11 494 - #define lpfc_wqe_gen_cmd_type_SHIFT 0 495 - #define lpfc_wqe_gen_cmd_type_MASK 0x0000000F 496 - #define lpfc_wqe_gen_cmd_type_WORD word11 497 - uint32_t payload[4]; 498 - }; 499 - 500 427 struct lpfc_rqe { 501 428 uint32_t address_hi; 502 429 uint32_t address_lo; ··· 2206 2279 #define wqe_reqtag_MASK 0x0000FFFF 2207 2280 #define wqe_reqtag_WORD word9 2208 2281 #define wqe_rcvoxid_SHIFT 16 2209 - #define wqe_rcvoxid_MASK 0x0000FFFF 2210 - #define wqe_rcvoxid_WORD word9 2282 + #define wqe_rcvoxid_MASK 0x0000FFFF 2283 + #define wqe_rcvoxid_WORD word9 2211 2284 uint32_t word10; 2285 + #define wqe_ebde_cnt_SHIFT 0 2286 + #define wqe_ebde_cnt_MASK 0x00000007 2287 + #define wqe_ebde_cnt_WORD word10 2288 + #define wqe_lenloc_SHIFT 7 2289 + #define wqe_lenloc_MASK 0x00000003 2290 + #define wqe_lenloc_WORD word10 2291 + #define LPFC_WQE_LENLOC_NONE 0 2292 + #define LPFC_WQE_LENLOC_WORD3 1 2293 + #define LPFC_WQE_LENLOC_WORD12 2 2294 + #define LPFC_WQE_LENLOC_WORD4 3 2295 + #define wqe_qosd_SHIFT 9 2296 + #define wqe_qosd_MASK 0x00000001 2297 + #define wqe_qosd_WORD word10 2298 + #define wqe_xbl_SHIFT 11 2299 + #define wqe_xbl_MASK 0x00000001 2300 + #define wqe_xbl_WORD word10 2301 + #define wqe_iod_SHIFT 13 2302 + #define wqe_iod_MASK 0x00000001 2303 + #define wqe_iod_WORD word10 2304 + #define LPFC_WQE_IOD_WRITE 0 2305 + #define LPFC_WQE_IOD_READ 1 2306 + #define wqe_dbde_SHIFT 14 2307 + #define wqe_dbde_MASK 0x00000001 2308 + #define wqe_dbde_WORD word10 2309 + #define wqe_wqes_SHIFT 15 2310 + #define wqe_wqes_MASK 0x00000001 2311 + #define wqe_wqes_WORD word10 2212 2312 #define wqe_pri_SHIFT 16 2213 2313 #define wqe_pri_MASK 0x00000007 2214 2314 #define wqe_pri_WORD word10 ··· 2249 2295 #define wqe_ccpe_MASK 0x00000001 2250 2296 #define wqe_ccpe_WORD word10 2251 2297 #define wqe_ccp_SHIFT 24 2252 - #define wqe_ccp_MASK 0x000000ff 2253 - #define wqe_ccp_WORD word10 2298 + #define wqe_ccp_MASK 0x000000ff 2299 + #define wqe_ccp_WORD word10 2254 2300 uint32_t word11; 2255 - #define wqe_cmd_type_SHIFT 0 2256 - #define wqe_cmd_type_MASK 0x0000000f 2257 - #define wqe_cmd_type_WORD word11 2258 - #define wqe_wqec_SHIFT 7 2259 - #define wqe_wqec_MASK 0x00000001 2260 - #define wqe_wqec_WORD word11 2261 - #define wqe_cqid_SHIFT 16 2262 - #define wqe_cqid_MASK 0x0000ffff 2263 - #define wqe_cqid_WORD word11 2301 + #define wqe_cmd_type_SHIFT 0 2302 + #define wqe_cmd_type_MASK 0x0000000f 2303 + #define wqe_cmd_type_WORD word11 2304 + #define wqe_els_id_SHIFT 4 2305 + #define wqe_els_id_MASK 0x00000003 2306 + #define wqe_els_id_WORD word11 2307 + #define LPFC_ELS_ID_FLOGI 3 2308 + #define LPFC_ELS_ID_FDISC 2 2309 + #define LPFC_ELS_ID_LOGO 1 2310 + #define LPFC_ELS_ID_DEFAULT 0 2311 + #define wqe_wqec_SHIFT 7 2312 + #define wqe_wqec_MASK 0x00000001 2313 + #define wqe_wqec_WORD word11 2314 + #define wqe_cqid_SHIFT 16 2315 + #define wqe_cqid_MASK 0x0000ffff 2316 + #define wqe_cqid_WORD word11 2317 + #define LPFC_WQE_CQ_ID_DEFAULT 0xffff 2264 2318 }; 2265 2319 2266 2320 struct wqe_did { ··· 2285 2323 #define wqe_xmit_bls_xo_SHIFT 31 2286 2324 #define wqe_xmit_bls_xo_MASK 0x00000001 2287 2325 #define wqe_xmit_bls_xo_WORD word5 2326 + }; 2327 + 2328 + struct lpfc_wqe_generic{ 2329 + struct ulp_bde64 bde; 2330 + uint32_t word3; 2331 + uint32_t word4; 2332 + uint32_t word5; 2333 + struct wqe_common wqe_com; 2334 + uint32_t payload[4]; 2288 2335 }; 2289 2336 2290 2337 struct els_request64_wqe { ··· 2327 2356 2328 2357 struct xmit_els_rsp64_wqe { 2329 2358 struct ulp_bde64 bde; 2330 - uint32_t rsvd3; 2359 + uint32_t response_payload_len; 2331 2360 uint32_t rsvd4; 2332 - struct wqe_did wqe_dest; 2361 + struct wqe_did wqe_dest; 2333 2362 struct wqe_common wqe_com; /* words 6-11 */ 2334 2363 uint32_t rsvd_12_15[4]; 2335 2364 }; ··· 2398 2427 2399 2428 struct xmit_seq64_wqe { 2400 2429 struct ulp_bde64 bde; 2401 - uint32_t paylaod_offset; 2430 + uint32_t rsvd3; 2402 2431 uint32_t relative_offset; 2403 2432 struct wqe_rctl_dfctl wge_ctl; 2404 2433 struct wqe_common wqe_com; /* words 6-11 */ ··· 2408 2437 }; 2409 2438 struct xmit_bcast64_wqe { 2410 2439 struct ulp_bde64 bde; 2411 - uint32_t paylaod_len; 2440 + uint32_t seq_payload_len; 2412 2441 uint32_t rsvd4; 2413 2442 struct wqe_rctl_dfctl wge_ctl; /* word 5 */ 2414 2443 struct wqe_common wqe_com; /* words 6-11 */ ··· 2417 2446 2418 2447 struct gen_req64_wqe { 2419 2448 struct ulp_bde64 bde; 2420 - uint32_t command_len; 2421 - uint32_t payload_len; 2449 + uint32_t request_payload_len; 2450 + uint32_t relative_offset; 2422 2451 struct wqe_rctl_dfctl wge_ctl; /* word 5 */ 2423 2452 struct wqe_common wqe_com; /* words 6-11 */ 2424 2453 uint32_t rsvd_12_15[4]; ··· 2451 2480 2452 2481 struct fcp_iwrite64_wqe { 2453 2482 struct ulp_bde64 bde; 2454 - uint32_t payload_len; 2483 + uint32_t payload_offset_len; 2455 2484 uint32_t total_xfer_len; 2456 2485 uint32_t initial_xfer_len; 2457 2486 struct wqe_common wqe_com; /* words 6-11 */ ··· 2460 2489 2461 2490 struct fcp_iread64_wqe { 2462 2491 struct ulp_bde64 bde; 2463 - uint32_t payload_len; /* word 3 */ 2492 + uint32_t payload_offset_len; /* word 3 */ 2464 2493 uint32_t total_xfer_len; /* word 4 */ 2465 2494 uint32_t rsrvd5; /* word 5 */ 2466 2495 struct wqe_common wqe_com; /* words 6-11 */ ··· 2468 2497 }; 2469 2498 2470 2499 struct fcp_icmnd64_wqe { 2471 - struct ulp_bde64 bde; /* words 0-2 */ 2472 - uint32_t rsrvd[3]; /* words 3-5 */ 2500 + struct ulp_bde64 bde; /* words 0-2 */ 2501 + uint32_t rsrvd3; /* word 3 */ 2502 + uint32_t rsrvd4; /* word 4 */ 2503 + uint32_t rsrvd5; /* word 5 */ 2473 2504 struct wqe_common wqe_com; /* words 6-11 */ 2474 - uint32_t rsvd_12_15[4]; /* word 12-15 */ 2505 + uint32_t rsvd_12_15[4]; /* word 12-15 */ 2475 2506 }; 2476 2507 2477 2508
+75 -27
drivers/scsi/lpfc/lpfc_init.c
··· 813 813 814 814 return 0; 815 815 } 816 + 816 817 /** 817 818 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 819 * @phba: pointer to lpfc HBA data structure. ··· 2235 2234 void 2236 2235 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2237 2236 { 2238 - /* Clear pending FCF rediscovery wait and failover in progress flags */ 2239 - phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2240 - FCF_DEAD_DISC | 2241 - FCF_ACVL_DISC); 2237 + /* Clear pending FCF rediscovery wait flag */ 2238 + phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2239 + 2242 2240 /* Now, try to stop the timer */ 2243 2241 del_timer(&phba->fcf.redisc_wait); 2244 2242 } ··· 2261 2261 return; 2262 2262 } 2263 2263 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2264 + /* Clear failover in progress flags */ 2265 + phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2264 2266 spin_unlock_irq(&phba->hbalock); 2265 2267 } 2266 2268 ··· 2937 2935 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2938 2936 spin_unlock_irq(&phba->hbalock); 2939 2937 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2940 - "2776 FCF rediscover wait timer expired, post " 2941 - "a worker thread event for FCF table scan\n"); 2938 + "2776 FCF rediscover quiescent timer expired\n"); 2942 2939 /* wake up worker thread */ 2943 2940 lpfc_worker_wake_up(phba); 2944 2941 } ··· 3312 3311 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3313 3312 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3314 3313 LOG_DISCOVERY, 3315 - "2546 New FCF found event: " 3316 - "evt_tag:x%x, fcf_index:x%x\n", 3314 + "2546 New FCF event, evt_tag:x%x, " 3315 + "index:x%x\n", 3317 3316 acqe_fcoe->event_tag, 3318 3317 acqe_fcoe->index); 3319 3318 else 3320 3319 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3321 3320 LOG_DISCOVERY, 3322 - "2788 FCF parameter modified event: " 3323 - "evt_tag:x%x, fcf_index:x%x\n", 3321 + "2788 FCF param modified event, " 3322 + "evt_tag:x%x, index:x%x\n", 3324 3323 acqe_fcoe->event_tag, 3325 3324 acqe_fcoe->index); 3326 3325 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3327 3326 /* 3328 3327 * During period of FCF discovery, read the FCF 3329 3328 * table record indexed by the event to update 3330 - * FCF round robin failover eligible FCF bmask. 3329 + * FCF roundrobin failover eligible FCF bmask. 3331 3330 */ 3332 3331 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3333 3332 LOG_DISCOVERY, 3334 - "2779 Read new FCF record with " 3335 - "fcf_index:x%x for updating FCF " 3336 - "round robin failover bmask\n", 3333 + "2779 Read FCF (x%x) for updating " 3334 + "roundrobin FCF failover bmask\n", 3337 3335 acqe_fcoe->index); 3338 3336 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3339 3337 } 3340 3338 3341 3339 /* If the FCF discovery is in progress, do nothing. */ 3342 3340 spin_lock_irq(&phba->hbalock); 3343 - if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3341 + if (phba->hba_flag & FCF_TS_INPROG) { 3344 3342 spin_unlock_irq(&phba->hbalock); 3345 3343 break; 3346 3344 } ··· 3358 3358 3359 3359 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3360 3360 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3361 - "2770 Start FCF table scan due to new FCF " 3362 - "event: evt_tag:x%x, fcf_index:x%x\n", 3361 + "2770 Start FCF table scan per async FCF " 3362 + "event, evt_tag:x%x, index:x%x\n", 3363 3363 acqe_fcoe->event_tag, acqe_fcoe->index); 3364 3364 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3365 3365 LPFC_FCOE_FCF_GET_FIRST); 3366 3366 if (rc) 3367 3367 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3368 3368 "2547 Issue FCF scan read FCF mailbox " 3369 - "command failed 0x%x\n", rc); 3369 + "command failed (x%x)\n", rc); 3370 3370 break; 3371 3371 3372 3372 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: ··· 3378 3378 3379 3379 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3380 3380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3381 - "2549 FCF disconnected from network index 0x%x" 3382 - " tag 0x%x\n", acqe_fcoe->index, 3383 - acqe_fcoe->event_tag); 3381 + "2549 FCF (x%x) disconnected from network, " 3382 + "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3384 3383 /* 3385 3384 * If we are in the middle of FCF failover process, clear 3386 3385 * the corresponding FCF bit in the roundrobin bitmap. ··· 3493 3494 spin_unlock_irq(&phba->hbalock); 3494 3495 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3495 3496 LOG_DISCOVERY, 3496 - "2773 Start FCF fast failover due " 3497 - "to CVL event: evt_tag:x%x\n", 3498 - acqe_fcoe->event_tag); 3497 + "2773 Start FCF failover per CVL, " 3498 + "evt_tag:x%x\n", acqe_fcoe->event_tag); 3499 3499 rc = lpfc_sli4_redisc_fcf_table(phba); 3500 3500 if (rc) { 3501 3501 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | ··· 3644 3646 3645 3647 /* Scan FCF table from the first entry to re-discover SAN */ 3646 3648 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3647 - "2777 Start FCF table scan after FCF " 3648 - "rediscovery quiescent period over\n"); 3649 + "2777 Start post-quiescent FCF table scan\n"); 3649 3650 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3650 3651 if (rc) 3651 3652 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, ··· 4162 4165 goto out_free_active_sgl; 4163 4166 } 4164 4167 4165 - /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4168 + /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4166 4169 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4167 4170 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4168 4171 GFP_KERNEL); ··· 7268 7271 } 7269 7272 7270 7273 /** 7274 + * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7275 + * @phba: Pointer to HBA context object. 7276 + * 7277 + * This function is called in the SLI4 code path to wait for completion 7278 + * of device's XRIs exchange busy. It will check the XRI exchange busy 7279 + * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7280 + * that, it will check the XRI exchange busy on outstanding FCP and ELS 7281 + * I/Os every 30 seconds, log error message, and wait forever. Only when 7282 + * all XRI exchange busy complete, the driver unload shall proceed with 7283 + * invoking the function reset ioctl mailbox command to the CNA and the 7284 + * the rest of the driver unload resource release. 7285 + **/ 7286 + static void 7287 + lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7288 + { 7289 + int wait_time = 0; 7290 + int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7291 + int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7292 + 7293 + while (!fcp_xri_cmpl || !els_xri_cmpl) { 7294 + if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7295 + if (!fcp_xri_cmpl) 7296 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7297 + "2877 FCP XRI exchange busy " 7298 + "wait time: %d seconds.\n", 7299 + wait_time/1000); 7300 + if (!els_xri_cmpl) 7301 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7302 + "2878 ELS XRI exchange busy " 7303 + "wait time: %d seconds.\n", 7304 + wait_time/1000); 7305 + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7306 + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7307 + } else { 7308 + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7309 + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7310 + } 7311 + fcp_xri_cmpl = 7312 + list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7313 + els_xri_cmpl = 7314 + list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7315 + } 7316 + } 7317 + 7318 + /** 7271 7319 * lpfc_sli4_hba_unset - Unset the fcoe hba 7272 7320 * @phba: Pointer to HBA context object. 7273 7321 * ··· 7356 7314 phba->sli.mbox_active = NULL; 7357 7315 spin_unlock_irq(&phba->hbalock); 7358 7316 } 7317 + 7318 + /* Abort all iocbs associated with the hba */ 7319 + lpfc_sli_hba_iocb_abort(phba); 7320 + 7321 + /* Wait for completion of device XRI exchange busy */ 7322 + lpfc_sli4_xri_exchange_busy_wait(phba); 7359 7323 7360 7324 /* Disable PCI subsystem interrupt */ 7361 7325 lpfc_sli4_disable_intr(phba);
+28
drivers/scsi/lpfc/lpfc_mbox.c
··· 797 797 } 798 798 799 799 /** 800 + * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. 801 + * @vport: pointer to a vport object. 802 + * 803 + * This routine sends mailbox command to unregister all active RPIs for 804 + * a vport. 805 + **/ 806 + void 807 + lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) 808 + { 809 + struct lpfc_hba *phba = vport->phba; 810 + LPFC_MBOXQ_t *mbox; 811 + int rc; 812 + 813 + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 814 + if (mbox) { 815 + lpfc_unreg_login(phba, vport->vpi, 816 + vport->vpi + phba->vpi_base, mbox); 817 + mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 818 + mbox->vport = vport; 819 + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 820 + mbox->context1 = NULL; 821 + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 822 + if (rc == MBX_NOT_FINISHED) 823 + mempool_free(mbox, phba->mbox_mem_pool); 824 + } 825 + } 826 + 827 + /** 800 828 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier 801 829 * @phba: pointer to lpfc hba data structure. 802 830 * @vpi: virtual N_Port identifier.
+7 -2
drivers/scsi/lpfc/lpfc_scsi.c
··· 169 169 spin_lock_irqsave(shost->host_lock, flags); 170 170 if (!vport->stat_data_enabled || 171 171 vport->stat_data_blocked || 172 + !pnode || 172 173 !pnode->lat_data || 173 174 (phba->bucket_type == LPFC_NO_BUCKET)) { 174 175 spin_unlock_irqrestore(shost->host_lock, flags); ··· 2041 2040 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 2042 2041 unsigned long flags; 2043 2042 2043 + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 2044 + return; 2045 + 2044 2046 /* If there is queuefull or busy condition send a scsi event */ 2045 2047 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 2046 2048 (cmnd->result == SAM_STAT_BUSY)) { ··· 3230 3226 struct lpfc_scsi_buf *lpfc_cmd; 3231 3227 struct lpfc_iocbq *iocbq; 3232 3228 struct lpfc_iocbq *iocbqrsp; 3229 + struct lpfc_nodelist *pnode = rdata->pnode; 3233 3230 int ret; 3234 3231 int status; 3235 3232 3236 - if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 3233 + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3237 3234 return FAILED; 3238 3235 3239 3236 lpfc_cmd = lpfc_get_scsi_buf(phba); ··· 3261 3256 "0702 Issue %s to TGT %d LUN %d " 3262 3257 "rpi x%x nlp_flag x%x\n", 3263 3258 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3264 - rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 3259 + pnode->nlp_rpi, pnode->nlp_flag); 3265 3260 3266 3261 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3267 3262 iocbq, iocbqrsp, lpfc_cmd->timeout);
+276 -184
drivers/scsi/lpfc/lpfc_sli.c
··· 95 95 return -ENOMEM; 96 96 /* set consumption flag every once in a while */ 97 97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 98 - bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); 98 + bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 99 99 100 100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 101 101 ··· 1735 1735 struct lpfc_vport *vport = pmb->vport; 1736 1736 struct lpfc_dmabuf *mp; 1737 1737 struct lpfc_nodelist *ndlp; 1738 + struct Scsi_Host *shost; 1738 1739 uint16_t rpi, vpi; 1739 1740 int rc; 1740 1741 ··· 1747 1746 } 1748 1747 1749 1748 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1750 - (phba->sli_rev == LPFC_SLI_REV4)) 1749 + (phba->sli_rev == LPFC_SLI_REV4) && 1750 + (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) 1751 1751 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1752 1752 1753 1753 /* ··· 1767 1765 return; 1768 1766 } 1769 1767 1770 - /* Unreg VPI, if the REG_VPI succeed after VLink failure */ 1771 1768 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1772 1769 !(phba->pport->load_flag & FC_UNLOADING) && 1773 1770 !pmb->u.mb.mbxStatus) { 1774 - lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); 1775 - pmb->vport = vport; 1776 - pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1777 - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1778 - if (rc != MBX_NOT_FINISHED) 1779 - return; 1771 + shost = lpfc_shost_from_vport(vport); 1772 + spin_lock_irq(shost->host_lock); 1773 + vport->vpi_state |= LPFC_VPI_REGISTERED; 1774 + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 1775 + spin_unlock_irq(shost->host_lock); 1780 1776 } 1781 1777 1782 1778 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ··· 5921 5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5922 5922 * @phba: Pointer to HBA context object. 5923 5923 * 5924 - * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5924 + * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 5925 5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5926 5926 * held. 5927 5927 * ··· 5965 5965 uint16_t abrt_iotag; 5966 5966 struct lpfc_iocbq *abrtiocbq; 5967 5967 struct ulp_bde64 *bpl = NULL; 5968 - uint32_t els_id = ELS_ID_DEFAULT; 5968 + uint32_t els_id = LPFC_ELS_ID_DEFAULT; 5969 5969 int numBdes, i; 5970 5970 struct ulp_bde64 bde; 5971 5971 ··· 5982 5982 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 5983 5983 abort_tag = (uint32_t) iocbq->iotag; 5984 5984 xritag = iocbq->sli4_xritag; 5985 - wqe->words[7] = 0; /* The ct field has moved so reset */ 5985 + wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 5986 5986 /* words0-2 bpl convert bde */ 5987 5987 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5988 5988 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / ··· 6033 6033 * contains the FCFI and remote N_Port_ID is 6034 6034 * in word 5. 6035 6035 */ 6036 - 6037 6036 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6038 - bf_set(lpfc_wqe_gen_context, &wqe->generic, 6039 - iocbq->iocb.ulpContext); 6040 - 6041 - bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 6042 - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6037 + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 6038 + iocbq->iocb.ulpContext); 6039 + bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 6040 + bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 6043 6041 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 6044 - 6045 6042 if (command_type == ELS_COMMAND_FIP) { 6046 6043 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6047 6044 >> LPFC_FIP_ELS_ID_SHIFT); 6048 6045 } 6049 - bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); 6050 - 6046 + bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 6047 + bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 6048 + bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 6049 + bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 6050 + bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 6051 + bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 6051 6052 break; 6052 6053 case CMD_XMIT_SEQUENCE64_CX: 6053 - bf_set(lpfc_wqe_gen_context, &wqe->generic, 6054 - iocbq->iocb.un.ulpWord[3]); 6055 - wqe->generic.word3 = 0; 6056 - bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 6054 + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 6055 + iocbq->iocb.un.ulpWord[3]); 6056 + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 6057 + iocbq->iocb.ulpContext); 6057 6058 /* The entire sequence is transmitted for this IOCB */ 6058 6059 xmit_len = total_len; 6059 6060 cmnd = CMD_XMIT_SEQUENCE64_CR; 6060 6061 case CMD_XMIT_SEQUENCE64_CR: 6061 - /* word3 iocb=io_tag32 wqe=payload_offset */ 6062 - /* payload offset used for multilpe outstanding 6063 - * sequences on the same exchange 6064 - */ 6065 - wqe->words[3] = 0; 6062 + /* word3 iocb=io_tag32 wqe=reserved */ 6063 + wqe->xmit_sequence.rsvd3 = 0; 6066 6064 /* word4 relative_offset memcpy */ 6067 6065 /* word5 r_ctl/df_ctl memcpy */ 6068 - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6066 + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 6067 + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 6068 + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 6069 + LPFC_WQE_IOD_WRITE); 6070 + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 6071 + LPFC_WQE_LENLOC_WORD12); 6072 + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 6069 6073 wqe->xmit_sequence.xmit_len = xmit_len; 6070 6074 command_type = OTHER_COMMAND; 6071 6075 break; 6072 6076 case CMD_XMIT_BCAST64_CN: 6073 - /* word3 iocb=iotag32 wqe=payload_len */ 6074 - wqe->words[3] = 0; /* no definition for this in wqe */ 6077 + /* word3 iocb=iotag32 wqe=seq_payload_len */ 6078 + wqe->xmit_bcast64.seq_payload_len = xmit_len; 6075 6079 /* word4 iocb=rsvd wqe=rsvd */ 6076 6080 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 6077 6081 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 6078 - bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6082 + bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 6079 6083 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6084 + bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 6085 + bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 6086 + bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 6087 + LPFC_WQE_LENLOC_WORD3); 6088 + bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 6080 6089 break; 6081 6090 case CMD_FCP_IWRITE64_CR: 6082 6091 command_type = FCP_COMMAND_DATA_OUT; 6083 - /* The struct for wqe fcp_iwrite has 3 fields that are somewhat 6084 - * confusing. 6085 - * word3 is payload_len: byte offset to the sgl entry for the 6086 - * fcp_command. 6087 - * word4 is total xfer len, same as the IOCB->ulpParameter. 6088 - * word5 is initial xfer len 0 = wait for xfer-ready 6089 - */ 6090 - 6091 - /* Always wait for xfer-ready before sending data */ 6092 - wqe->fcp_iwrite.initial_xfer_len = 0; 6093 - /* word 4 (xfer length) should have been set on the memcpy */ 6094 - 6095 - /* allow write to fall through to read */ 6096 - case CMD_FCP_IREAD64_CR: 6097 - /* FCP_CMD is always the 1st sgl entry */ 6098 - wqe->fcp_iread.payload_len = 6092 + /* word3 iocb=iotag wqe=payload_offset_len */ 6093 + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 6094 + wqe->fcp_iwrite.payload_offset_len = 6099 6095 xmit_len + sizeof(struct fcp_rsp); 6100 - 6101 - /* word 4 (xfer length) should have been set on the memcpy */ 6102 - 6103 - bf_set(lpfc_wqe_gen_erp, &wqe->generic, 6104 - iocbq->iocb.ulpFCP2Rcvy); 6105 - bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); 6106 - /* The XC bit and the XS bit are similar. The driver never 6107 - * tracked whether or not the exchange was previouslly open. 6108 - * XC = Exchange create, 0 is create. 1 is already open. 6109 - * XS = link cmd: 1 do not close the exchange after command. 6110 - * XS = 0 close exchange when command completes. 6111 - * The only time we would not set the XC bit is when the XS bit 6112 - * is set and we are sending our 2nd or greater command on 6113 - * this exchange. 6114 - */ 6096 + /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 6097 + /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 6098 + bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 6099 + iocbq->iocb.ulpFCP2Rcvy); 6100 + bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 6101 + /* Always open the exchange */ 6102 + bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 6103 + bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 6104 + bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 6105 + bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 6106 + LPFC_WQE_LENLOC_WORD4); 6107 + bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 6108 + bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 6109 + break; 6110 + case CMD_FCP_IREAD64_CR: 6111 + /* word3 iocb=iotag wqe=payload_offset_len */ 6112 + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 6113 + wqe->fcp_iread.payload_offset_len = 6114 + xmit_len + sizeof(struct fcp_rsp); 6115 + /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 6116 + /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 6117 + bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 6118 + iocbq->iocb.ulpFCP2Rcvy); 6119 + bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 6115 6120 /* Always open the exchange */ 6116 6121 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6117 - 6118 - wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 6119 - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6120 - break; 6122 + bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 6123 + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 6124 + bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 6125 + LPFC_WQE_LENLOC_WORD4); 6126 + bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 6127 + bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 6128 + break; 6121 6129 case CMD_FCP_ICMND64_CR: 6130 + /* word3 iocb=IO_TAG wqe=reserved */ 6131 + wqe->fcp_icmd.rsrvd3 = 0; 6132 + bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 6122 6133 /* Always open the exchange */ 6123 - bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6124 - 6125 - wqe->words[4] = 0; 6126 - wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 6127 - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6134 + bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 6135 + bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 6136 + bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 6137 + bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 6138 + bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 6139 + LPFC_WQE_LENLOC_NONE); 6140 + bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 6128 6141 break; 6129 6142 case CMD_GEN_REQUEST64_CR: 6130 - /* word3 command length is described as byte offset to the 6131 - * rsp_data. Would always be 16, sizeof(struct sli4_sge) 6132 - * sgl[0] = cmnd 6133 - * sgl[1] = rsp. 6134 - * 6135 - */ 6136 - wqe->gen_req.command_len = xmit_len; 6137 - /* Word4 parameter copied in the memcpy */ 6138 - /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ 6143 + /* word3 iocb=IO_TAG wqe=request_payload_len */ 6144 + wqe->gen_req.request_payload_len = xmit_len; 6145 + /* word4 iocb=parameter wqe=relative_offset memcpy */ 6146 + /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 6139 6147 /* word6 context tag copied in memcpy */ 6140 6148 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 6141 6149 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); ··· 6152 6144 ct, iocbq->iocb.ulpCommand); 6153 6145 return IOCB_ERROR; 6154 6146 } 6155 - bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); 6156 - bf_set(wqe_tmo, &wqe->gen_req.wqe_com, 6157 - iocbq->iocb.ulpTimeout); 6158 - 6159 - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6147 + bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 6148 + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 6149 + bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 6150 + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 6151 + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 6152 + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 6153 + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 6154 + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 6160 6155 command_type = OTHER_COMMAND; 6161 6156 break; 6162 6157 case CMD_XMIT_ELS_RSP64_CX: 6163 6158 /* words0-2 BDE memcpy */ 6164 - /* word3 iocb=iotag32 wqe=rsvd */ 6165 - wqe->words[3] = 0; 6159 + /* word3 iocb=iotag32 wqe=response_payload_len */ 6160 + wqe->xmit_els_rsp.response_payload_len = xmit_len; 6166 6161 /* word4 iocb=did wge=rsvd. */ 6167 - wqe->words[4] = 0; 6162 + wqe->xmit_els_rsp.rsvd4 = 0; 6168 6163 /* word5 iocb=rsvd wge=did */ 6169 6164 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 6170 6165 iocbq->iocb.un.elsreq64.remoteID); 6171 - 6172 - bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6173 - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6174 - 6175 - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6176 - bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 6166 + bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 6167 + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6168 + bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 6169 + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6170 + iocbq->iocb.ulpContext); 6177 6171 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 6178 - bf_set(lpfc_wqe_gen_context, &wqe->generic, 6172 + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 6179 6173 iocbq->vport->vpi + phba->vpi_base); 6174 + bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 6175 + bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 6176 + bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 6177 + bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 6178 + LPFC_WQE_LENLOC_WORD3); 6179 + bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 6180 6180 command_type = OTHER_COMMAND; 6181 6181 break; 6182 6182 case CMD_CLOSE_XRI_CN: ··· 6209 6193 else 6210 6194 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6211 6195 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6212 - wqe->words[5] = 0; 6213 - bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6196 + /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 6197 + wqe->abort_cmd.rsrvd5 = 0; 6198 + bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 6214 6199 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6215 6200 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6216 6201 /* 6217 6202 * The abort handler will send us CMD_ABORT_XRI_CN or 6218 6203 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6219 6204 */ 6220 - bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); 6205 + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 6206 + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 6207 + bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 6208 + LPFC_WQE_LENLOC_NONE); 6221 6209 cmnd = CMD_ABORT_XRI_CX; 6222 6210 command_type = OTHER_COMMAND; 6223 6211 xritag = 0; ··· 6255 6235 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 6256 6236 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 6257 6237 iocbq->iocb.ulpContext); 6238 + bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 6239 + bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 6240 + LPFC_WQE_LENLOC_NONE); 6258 6241 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6259 6242 command_type = OTHER_COMMAND; 6260 6243 break; 6261 6244 case CMD_XRI_ABORTED_CX: 6262 6245 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6263 - /* words0-2 are all 0's no bde */ 6264 - /* word3 and word4 are rsvrd */ 6265 - wqe->words[3] = 0; 6266 - wqe->words[4] = 0; 6267 - /* word5 iocb=rsvd wge=did */ 6268 - /* There is no remote port id in the IOCB? */ 6269 - /* Let this fall through and fail */ 6270 6246 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 6271 6247 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 6272 6248 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ ··· 6273 6257 iocbq->iocb.ulpCommand); 6274 6258 return IOCB_ERROR; 6275 6259 break; 6276 - 6277 6260 } 6278 - bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); 6279 - bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); 6280 - wqe->generic.abort_tag = abort_tag; 6281 - bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); 6282 - bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); 6283 - bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); 6284 - bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); 6285 - 6261 + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 6262 + bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 6263 + wqe->generic.wqe_com.abort_tag = abort_tag; 6264 + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 6265 + bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 6266 + bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 6267 + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 6286 6268 return 0; 6287 6269 } 6288 6270 ··· 7271 7257 } 7272 7258 7273 7259 /** 7274 - * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7260 + * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 7275 7261 * @phba: Pointer to HBA context object. 7276 7262 * @pring: Pointer to driver SLI ring object. 7277 7263 * @cmdiocb: Pointer to driver command iocb object. 7278 7264 * 7279 - * This function issues an abort iocb for the provided command 7280 - * iocb. This function is called with hbalock held. 7281 - * The function returns 0 when it fails due to memory allocation 7282 - * failure or when the command iocb is an abort request. 7265 + * This function issues an abort iocb for the provided command iocb down to 7266 + * the port. Other than the case the outstanding command iocb is an abort 7267 + * request, this function issues abort out unconditionally. This function is 7268 + * called with hbalock held. The function returns 0 when it fails due to 7269 + * memory allocation failure or when the command iocb is an abort request. 7283 7270 **/ 7284 - int 7285 - lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7271 + static int 7272 + lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7286 7273 struct lpfc_iocbq *cmdiocb) 7287 7274 { 7288 7275 struct lpfc_vport *vport = cmdiocb->vport; 7289 7276 struct lpfc_iocbq *abtsiocbp; 7290 7277 IOCB_t *icmd = NULL; 7291 7278 IOCB_t *iabt = NULL; 7292 - int retval = IOCB_ERROR; 7279 + int retval; 7293 7280 7294 7281 /* 7295 7282 * There are certain command types we don't want to abort. And we ··· 7302 7287 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7303 7288 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7304 7289 return 0; 7305 - 7306 - /* If we're unloading, don't abort iocb on the ELS ring, but change the 7307 - * callback so that nothing happens when it finishes. 7308 - */ 7309 - if ((vport->load_flag & FC_UNLOADING) && 7310 - (pring->ringno == LPFC_ELS_RING)) { 7311 - if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7312 - cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7313 - else 7314 - cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7315 - goto abort_iotag_exit; 7316 - } 7317 7290 7318 7291 /* issue ABTS for this IOCB based on iotag */ 7319 7292 abtsiocbp = __lpfc_sli_get_iocbq(phba); ··· 7347 7344 7348 7345 if (retval) 7349 7346 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7347 + 7348 + /* 7349 + * Caller to this routine should check for IOCB_ERROR 7350 + * and handle it properly. This routine no longer removes 7351 + * iocb off txcmplq and call compl in case of IOCB_ERROR. 7352 + */ 7353 + return retval; 7354 + } 7355 + 7356 + /** 7357 + * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7358 + * @phba: Pointer to HBA context object. 7359 + * @pring: Pointer to driver SLI ring object. 7360 + * @cmdiocb: Pointer to driver command iocb object. 7361 + * 7362 + * This function issues an abort iocb for the provided command iocb. In case 7363 + * of unloading, the abort iocb will not be issued to commands on the ELS 7364 + * ring. Instead, the callback function shall be changed to those commands 7365 + * so that nothing happens when them finishes. This function is called with 7366 + * hbalock held. The function returns 0 when the command iocb is an abort 7367 + * request. 7368 + **/ 7369 + int 7370 + lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7371 + struct lpfc_iocbq *cmdiocb) 7372 + { 7373 + struct lpfc_vport *vport = cmdiocb->vport; 7374 + int retval = IOCB_ERROR; 7375 + IOCB_t *icmd = NULL; 7376 + 7377 + /* 7378 + * There are certain command types we don't want to abort. And we 7379 + * don't want to abort commands that are already in the process of 7380 + * being aborted. 7381 + */ 7382 + icmd = &cmdiocb->iocb; 7383 + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7384 + icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7385 + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7386 + return 0; 7387 + 7388 + /* 7389 + * If we're unloading, don't abort iocb on the ELS ring, but change 7390 + * the callback so that nothing happens when it finishes. 7391 + */ 7392 + if ((vport->load_flag & FC_UNLOADING) && 7393 + (pring->ringno == LPFC_ELS_RING)) { 7394 + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7395 + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7396 + else 7397 + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7398 + goto abort_iotag_exit; 7399 + } 7400 + 7401 + /* Now, we try to issue the abort to the cmdiocb out */ 7402 + retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 7403 + 7350 7404 abort_iotag_exit: 7351 7405 /* 7352 7406 * Caller to this routine should check for IOCB_ERROR ··· 7411 7351 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7412 7352 */ 7413 7353 return retval; 7354 + } 7355 + 7356 + /** 7357 + * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 7358 + * @phba: Pointer to HBA context object. 7359 + * @pring: Pointer to driver SLI ring object. 7360 + * 7361 + * This function aborts all iocbs in the given ring and frees all the iocb 7362 + * objects in txq. This function issues abort iocbs unconditionally for all 7363 + * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 7364 + * to complete before the return of this function. The caller is not required 7365 + * to hold any locks. 7366 + **/ 7367 + static void 7368 + lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 7369 + { 7370 + LIST_HEAD(completions); 7371 + struct lpfc_iocbq *iocb, *next_iocb; 7372 + 7373 + if (pring->ringno == LPFC_ELS_RING) 7374 + lpfc_fabric_abort_hba(phba); 7375 + 7376 + spin_lock_irq(&phba->hbalock); 7377 + 7378 + /* Take off all the iocbs on txq for cancelling */ 7379 + list_splice_init(&pring->txq, &completions); 7380 + pring->txq_cnt = 0; 7381 + 7382 + /* Next issue ABTS for everything on the txcmplq */ 7383 + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 7384 + lpfc_sli_abort_iotag_issue(phba, pring, iocb); 7385 + 7386 + spin_unlock_irq(&phba->hbalock); 7387 + 7388 + /* Cancel all the IOCBs from the completions list */ 7389 + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7390 + IOERR_SLI_ABORTED); 7391 + } 7392 + 7393 + /** 7394 + * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 7395 + * @phba: pointer to lpfc HBA data structure. 7396 + * 7397 + * This routine will abort all pending and outstanding iocbs to an HBA. 7398 + **/ 7399 + void 7400 + lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 7401 + { 7402 + struct lpfc_sli *psli = &phba->sli; 7403 + struct lpfc_sli_ring *pring; 7404 + int i; 7405 + 7406 + for (i = 0; i < psli->num_rings; i++) { 7407 + pring = &psli->ring[i]; 7408 + lpfc_sli_iocb_ring_abort(phba, pring); 7409 + } 7414 7410 } 7415 7411 7416 7412 /** ··· 12358 12242 /* Issue the mailbox command asynchronously */ 12359 12243 mboxq->vport = phba->pport; 12360 12244 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12245 + 12246 + spin_lock_irq(&phba->hbalock); 12247 + phba->hba_flag |= FCF_TS_INPROG; 12248 + spin_unlock_irq(&phba->hbalock); 12249 + 12361 12250 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12362 12251 if (rc == MBX_NOT_FINISHED) 12363 12252 error = -EIO; 12364 12253 else { 12365 - spin_lock_irq(&phba->hbalock); 12366 - phba->hba_flag |= FCF_DISC_INPROGRESS; 12367 - spin_unlock_irq(&phba->hbalock); 12368 12254 /* Reset eligible FCF count for new scan */ 12369 12255 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12370 12256 phba->fcf.eligible_fcf_cnt = 0; ··· 12376 12258 if (error) { 12377 12259 if (mboxq) 12378 12260 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12379 - /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ 12261 + /* FCF scan failed, clear FCF_TS_INPROG flag */ 12380 12262 spin_lock_irq(&phba->hbalock); 12381 - phba->hba_flag &= ~FCF_DISC_INPROGRESS; 12263 + phba->hba_flag &= ~FCF_TS_INPROG; 12382 12264 spin_unlock_irq(&phba->hbalock); 12383 12265 } 12384 12266 return error; 12385 12267 } 12386 12268 12387 12269 /** 12388 - * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. 12270 + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 12389 12271 * @phba: pointer to lpfc hba data structure. 12390 12272 * @fcf_index: FCF table entry offset. 12391 12273 * 12392 12274 * This routine is invoked to read an FCF record indicated by @fcf_index 12393 - * and to use it for FLOGI round robin FCF failover. 12275 + * and to use it for FLOGI roundrobin FCF failover. 12394 12276 * 12395 12277 * Return 0 if the mailbox command is submitted sucessfully, none 0 12396 12278 * otherwise. ··· 12436 12318 * @fcf_index: FCF table entry offset. 12437 12319 * 12438 12320 * This routine is invoked to read an FCF record indicated by @fcf_index to 12439 - * determine whether it's eligible for FLOGI round robin failover list. 12321 + * determine whether it's eligible for FLOGI roundrobin failover list. 12440 12322 * 12441 12323 * Return 0 if the mailbox command is submitted sucessfully, none 0 12442 12324 * otherwise. ··· 12482 12364 * 12483 12365 * This routine is to get the next eligible FCF record index in a round 12484 12366 * robin fashion. If the next eligible FCF record index equals to the 12485 - * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12367 + * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12486 12368 * shall be returned, otherwise, the next eligible FCF record's index 12487 12369 * shall be returned. 12488 12370 **/ ··· 12510 12392 return LPFC_FCOE_FCF_NEXT_NONE; 12511 12393 } 12512 12394 12513 - /* Check roundrobin failover index bmask stop condition */ 12514 - if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { 12515 - if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { 12516 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12517 - "2847 Round robin failover FCF index " 12518 - "search hit stop condition:x%x\n", 12519 - next_fcf_index); 12520 - return LPFC_FCOE_FCF_NEXT_NONE; 12521 - } 12522 - /* The roundrobin failover index bmask updated, start over */ 12523 - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12524 - "2848 Round robin failover FCF index bmask " 12525 - "updated, start over\n"); 12526 - spin_lock_irq(&phba->hbalock); 12527 - phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; 12528 - spin_unlock_irq(&phba->hbalock); 12529 - return phba->fcf.fcf_rr_init_indx; 12530 - } 12531 - 12532 12395 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12533 - "2845 Get next round robin failover " 12534 - "FCF index x%x\n", next_fcf_index); 12396 + "2845 Get next roundrobin failover FCF (x%x)\n", 12397 + next_fcf_index); 12398 + 12535 12399 return next_fcf_index; 12536 12400 } 12537 12401 ··· 12522 12422 * @phba: pointer to lpfc hba data structure. 12523 12423 * 12524 12424 * This routine sets the FCF record index in to the eligible bmask for 12525 - * round robin failover search. It checks to make sure that the index 12425 + * roundrobin failover search. It checks to make sure that the index 12526 12426 * does not go beyond the range of the driver allocated bmask dimension 12527 12427 * before setting the bit. 12528 12428 * ··· 12534 12434 { 12535 12435 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12536 12436 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12537 - "2610 HBA FCF index reached driver's " 12538 - "book keeping dimension: fcf_index:%d, " 12539 - "driver_bmask_max:%d\n", 12437 + "2610 FCF (x%x) reached driver's book " 12438 + "keeping dimension:x%x\n", 12540 12439 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12541 12440 return -EINVAL; 12542 12441 } 12543 12442 /* Set the eligible FCF record index bmask */ 12544 12443 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12545 12444 12546 - /* Set the roundrobin index bmask updated */ 12547 - spin_lock_irq(&phba->hbalock); 12548 - phba->fcf.fcf_flag |= FCF_REDISC_RRU; 12549 - spin_unlock_irq(&phba->hbalock); 12550 - 12551 12445 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12552 - "2790 Set FCF index x%x to round robin failover " 12446 + "2790 Set FCF (x%x) to roundrobin FCF failover " 12553 12447 "bmask\n", fcf_index); 12554 12448 12555 12449 return 0; ··· 12554 12460 * @phba: pointer to lpfc hba data structure. 12555 12461 * 12556 12462 * This routine clears the FCF record index from the eligible bmask for 12557 - * round robin failover search. It checks to make sure that the index 12463 + * roundrobin failover search. It checks to make sure that the index 12558 12464 * does not go beyond the range of the driver allocated bmask dimension 12559 12465 * before clearing the bit. 12560 12466 **/ ··· 12563 12469 { 12564 12470 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12565 12471 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12566 - "2762 HBA FCF index goes beyond driver's " 12567 - "book keeping dimension: fcf_index:%d, " 12568 - "driver_bmask_max:%d\n", 12472 + "2762 FCF (x%x) reached driver's book " 12473 + "keeping dimension:x%x\n", 12569 12474 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12570 12475 return; 12571 12476 } ··· 12572 12479 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12573 12480 12574 12481 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12575 - "2791 Clear FCF index x%x from round robin failover " 12482 + "2791 Clear FCF (x%x) from roundrobin failover " 12576 12483 "bmask\n", fcf_index); 12577 12484 } 12578 12485 ··· 12623 12530 } 12624 12531 } else { 12625 12532 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12626 - "2775 Start FCF rediscovery quiescent period " 12627 - "wait timer before scaning FCF table\n"); 12533 + "2775 Start FCF rediscover quiescent timer\n"); 12628 12534 /* 12629 12535 * Start FCF rediscovery wait timer for pending FCF 12630 12536 * before rescan FCF record table.
+7 -2
drivers/scsi/lpfc/lpfc_sli4.h
··· 19 19 *******************************************************************/ 20 20 21 21 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 22 + #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 23 + #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 24 + #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 22 25 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 23 26 #define LPFC_GET_QE_REL_INT 32 24 27 #define LPFC_RPI_LOW_WATER_MARK 10 28 + 29 + #define LPFC_UNREG_FCF 1 30 + #define LPFC_SKIP_UNREG_FCF 0 25 31 26 32 /* Amount of time in seconds for waiting FCF rediscovery to complete */ 27 33 #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ ··· 169 163 #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 170 164 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 171 165 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 172 - #define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ 166 + #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) 173 167 uint32_t addr_mode; 174 - uint16_t fcf_rr_init_indx; 175 168 uint32_t eligible_fcf_cnt; 176 169 struct lpfc_fcf_rec current_rec; 177 170 struct lpfc_fcf_rec failover_rec;
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.3.17" 21 + #define LPFC_DRIVER_VERSION "8.3.18" 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24 24 #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
+124 -2
drivers/scsi/megaraid/megaraid_sas.c
··· 10 10 * 2 of the License, or (at your option) any later version. 11 11 * 12 12 * FILE : megaraid_sas.c 13 - * Version : v00.00.04.17.1-rc1 13 + * Version : v00.00.04.31-rc1 14 14 * 15 15 * Authors: 16 16 * (email-id : megaraidlinux@lsi.com) ··· 55 55 module_param_named(poll_mode_io, poll_mode_io, int, 0); 56 56 MODULE_PARM_DESC(poll_mode_io, 57 57 "Complete cmds from IO path, (default=0)"); 58 + 59 + /* 60 + * Number of sectors per IO command 61 + * Will be set in megasas_init_mfi if user does not provide 62 + */ 63 + static unsigned int max_sectors; 64 + module_param_named(max_sectors, max_sectors, int, 0); 65 + MODULE_PARM_DESC(max_sectors, 66 + "Maximum number of sectors per IO command"); 58 67 59 68 MODULE_LICENSE("GPL"); 60 69 MODULE_VERSION(MEGASAS_VERSION); ··· 112 103 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 113 104 static u32 support_poll_for_event; 114 105 static u32 megasas_dbg_lvl; 106 + static u32 support_device_change; 115 107 116 108 /* define lock for aen poll */ 117 109 spinlock_t poll_aen_lock; ··· 728 718 megasas_check_reset_gen2(struct megasas_instance *instance, 729 719 struct megasas_register_set __iomem *regs) 730 720 { 721 + if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { 722 + return 1; 723 + } 724 + 731 725 return 0; 732 726 } 733 727 ··· 944 930 mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); 945 931 mfi_sgl->sge_skinny[i].phys_addr = 946 932 sg_dma_address(os_sgl); 933 + mfi_sgl->sge_skinny[i].flag = 0; 947 934 } 948 935 } 949 936 return sge_count; ··· 1572 1557 } 1573 1558 } 1574 1559 1560 + static void 1561 + megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 1562 + 1563 + static void 1564 + process_fw_state_change_wq(struct work_struct *work); 1565 + 1566 + void megasas_do_ocr(struct megasas_instance *instance) 1567 + { 1568 + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 1569 + (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 1570 + (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 1571 + *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; 1572 + } 1573 + instance->instancet->disable_intr(instance->reg_set); 1574 + instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 1575 + instance->issuepend_done = 0; 1576 + 1577 + atomic_set(&instance->fw_outstanding, 0); 1578 + megasas_internal_reset_defer_cmds(instance); 1579 + process_fw_state_change_wq(&instance->work_init); 1580 + } 1581 + 1575 1582 /** 1576 1583 * megasas_wait_for_outstanding - Wait for all outstanding cmds 1577 1584 * @instance: Adapter soft state ··· 1611 1574 unsigned long flags; 1612 1575 struct list_head clist_local; 1613 1576 struct megasas_cmd *reset_cmd; 1577 + u32 fw_state; 1578 + u8 kill_adapter_flag; 1614 1579 1615 1580 spin_lock_irqsave(&instance->hba_lock, flags); 1616 1581 adprecovery = instance->adprecovery; ··· 1698 1659 msleep(1000); 1699 1660 } 1700 1661 1701 - if (atomic_read(&instance->fw_outstanding)) { 1662 + i = 0; 1663 + kill_adapter_flag = 0; 1664 + do { 1665 + fw_state = instance->instancet->read_fw_status_reg( 1666 + instance->reg_set) & MFI_STATE_MASK; 1667 + if ((fw_state == MFI_STATE_FAULT) && 1668 + (instance->disableOnlineCtrlReset == 0)) { 1669 + if (i == 3) { 1670 + kill_adapter_flag = 2; 1671 + break; 1672 + } 1673 + megasas_do_ocr(instance); 1674 + kill_adapter_flag = 1; 1675 + 1676 + /* wait for 1 secs to let FW finish the pending cmds */ 1677 + msleep(1000); 1678 + } 1679 + i++; 1680 + } while (i <= 3); 1681 + 1682 + if (atomic_read(&instance->fw_outstanding) && 1683 + !kill_adapter_flag) { 1684 + if (instance->disableOnlineCtrlReset == 0) { 1685 + 1686 + megasas_do_ocr(instance); 1687 + 1688 + /* wait for 5 secs to let FW finish the pending cmds */ 1689 + for (i = 0; i < wait_time; i++) { 1690 + int outstanding = 1691 + atomic_read(&instance->fw_outstanding); 1692 + if (!outstanding) 1693 + return SUCCESS; 1694 + msleep(1000); 1695 + } 1696 + } 1697 + } 1698 + 1699 + if (atomic_read(&instance->fw_outstanding) || 1700 + (kill_adapter_flag == 2)) { 1702 1701 printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); 1703 1702 /* 1704 1703 * Send signal to FW to stop processing any pending cmds. ··· 2746 2669 return -ENOMEM; 2747 2670 } 2748 2671 2672 + memset(cmd->frame, 0, total_sz); 2749 2673 cmd->frame->io.context = cmd->index; 2750 2674 cmd->frame->io.pad_0 = 0; 2751 2675 } ··· 3663 3585 instance->max_fw_cmds - MEGASAS_INT_CMDS; 3664 3586 host->this_id = instance->init_id; 3665 3587 host->sg_tablesize = instance->max_num_sge; 3588 + /* 3589 + * Check if the module parameter value for max_sectors can be used 3590 + */ 3591 + if (max_sectors && max_sectors < instance->max_sectors_per_req) 3592 + instance->max_sectors_per_req = max_sectors; 3593 + else { 3594 + if (max_sectors) { 3595 + if (((instance->pdev->device == 3596 + PCI_DEVICE_ID_LSI_SAS1078GEN2) || 3597 + (instance->pdev->device == 3598 + PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 3599 + (max_sectors <= MEGASAS_MAX_SECTORS)) { 3600 + instance->max_sectors_per_req = max_sectors; 3601 + } else { 3602 + printk(KERN_INFO "megasas: max_sectors should be > 0" 3603 + "and <= %d (or < 1MB for GEN2 controller)\n", 3604 + instance->max_sectors_per_req); 3605 + } 3606 + } 3607 + } 3608 + 3666 3609 host->max_sectors = instance->max_sectors_per_req; 3667 3610 host->cmd_per_lun = 128; 3668 3611 host->max_channel = MEGASAS_MAX_CHANNELS - 1; ··· 4757 4658 static DRIVER_ATTR(support_poll_for_event, S_IRUGO, 4758 4659 megasas_sysfs_show_support_poll_for_event, NULL); 4759 4660 4661 + static ssize_t 4662 + megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) 4663 + { 4664 + return sprintf(buf, "%u\n", support_device_change); 4665 + } 4666 + 4667 + static DRIVER_ATTR(support_device_change, S_IRUGO, 4668 + megasas_sysfs_show_support_device_change, NULL); 4669 + 4760 4670 static ssize_t 4761 4671 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 4762 4672 { ··· 5086 4978 MEGASAS_EXT_VERSION); 5087 4979 5088 4980 support_poll_for_event = 2; 4981 + support_device_change = 1; 5089 4982 5090 4983 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 5091 4984 ··· 5135 5026 if (rval) 5136 5027 goto err_dcf_poll_mode_io; 5137 5028 5029 + rval = driver_create_file(&megasas_pci_driver.driver, 5030 + &driver_attr_support_device_change); 5031 + if (rval) 5032 + goto err_dcf_support_device_change; 5033 + 5138 5034 return rval; 5035 + 5036 + err_dcf_support_device_change: 5037 + driver_remove_file(&megasas_pci_driver.driver, 5038 + &driver_attr_poll_mode_io); 5139 5039 5140 5040 err_dcf_poll_mode_io: 5141 5041 driver_remove_file(&megasas_pci_driver.driver, ··· 5175 5057 &driver_attr_poll_mode_io); 5176 5058 driver_remove_file(&megasas_pci_driver.driver, 5177 5059 &driver_attr_dbg_lvl); 5060 + driver_remove_file(&megasas_pci_driver.driver, 5061 + &driver_attr_support_poll_for_event); 5062 + driver_remove_file(&megasas_pci_driver.driver, 5063 + &driver_attr_support_device_change); 5178 5064 driver_remove_file(&megasas_pci_driver.driver, 5179 5065 &driver_attr_release_date); 5180 5066 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+4 -3
drivers/scsi/megaraid/megaraid_sas.h
··· 18 18 /* 19 19 * MegaRAID SAS Driver meta data 20 20 */ 21 - #define MEGASAS_VERSION "00.00.04.17.1-rc1" 22 - #define MEGASAS_RELDATE "Oct. 29, 2009" 23 - #define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009" 21 + #define MEGASAS_VERSION "00.00.04.31-rc1" 22 + #define MEGASAS_RELDATE "May 3, 2010" 23 + #define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010" 24 24 25 25 /* 26 26 * Device IDs ··· 706 706 #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ 707 707 MEGASAS_MAX_DEV_PER_CHANNEL) 708 708 709 + #define MEGASAS_MAX_SECTORS (2*1024) 709 710 #define MEGASAS_DBG_LVL 1 710 711 711 712 #define MEGASAS_FW_BUSY 1
+229 -15
drivers/scsi/osd/osd_initiator.c
··· 452 452 { 453 453 struct request *rq = or->request; 454 454 455 - _osd_free_seg(or, &or->set_attr); 456 - _osd_free_seg(or, &or->enc_get_attr); 457 - _osd_free_seg(or, &or->get_attr); 458 - 459 455 if (rq) { 460 456 if (rq->next_rq) { 461 457 _put_request(rq->next_rq); ··· 460 464 461 465 _put_request(rq); 462 466 } 467 + 468 + _osd_free_seg(or, &or->get_attr); 469 + _osd_free_seg(or, &or->enc_get_attr); 470 + _osd_free_seg(or, &or->set_attr); 471 + _osd_free_seg(or, &or->cdb_cont); 472 + 463 473 _osd_request_free(or); 464 474 } 465 475 EXPORT_SYMBOL(osd_end_request); ··· 547 545 seg->buff = buff; 548 546 seg->alloc_size = max_bytes; 549 547 return 0; 548 + } 549 + 550 + static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes) 551 + { 552 + OSD_DEBUG("total_bytes=%d\n", total_bytes); 553 + return _osd_realloc_seg(or, &or->cdb_cont, total_bytes); 550 554 } 551 555 552 556 static int _alloc_set_attr_list(struct osd_request *or, ··· 893 885 } 894 886 EXPORT_SYMBOL(osd_req_read_kern); 895 887 888 + static int _add_sg_continuation_descriptor(struct osd_request *or, 889 + const struct osd_sg_entry *sglist, unsigned numentries, u64 *len) 890 + { 891 + struct osd_sg_continuation_descriptor *oscd; 892 + u32 oscd_size; 893 + unsigned i; 894 + int ret; 895 + 896 + oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]); 897 + 898 + if (!or->cdb_cont.total_bytes) { 899 + /* First time, jump over the header, we will write to: 900 + * cdb_cont.buff + cdb_cont.total_bytes 901 + */ 902 + or->cdb_cont.total_bytes = 903 + sizeof(struct osd_continuation_segment_header); 904 + } 905 + 906 + ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size); 907 + if (unlikely(ret)) 908 + return ret; 909 + 910 + oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes; 911 + oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST); 912 + oscd->hdr.pad_length = 0; 913 + oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd)); 914 + 915 + *len = 0; 916 + /* copy the sg entries and convert to network byte order */ 917 + for (i = 0; i < numentries; i++) { 918 + oscd->entries[i].offset = cpu_to_be64(sglist[i].offset); 919 + oscd->entries[i].len = cpu_to_be64(sglist[i].len); 920 + *len += sglist[i].len; 921 + } 922 + 923 + or->cdb_cont.total_bytes += oscd_size; 924 + OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n", 925 + or->cdb_cont.total_bytes, oscd_size, numentries); 926 + return 0; 927 + } 928 + 929 + static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) 930 + { 931 + struct request_queue *req_q = osd_request_queue(or->osd_dev); 932 + struct bio *bio; 933 + struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 934 + struct osd_continuation_segment_header *cont_seg_hdr; 935 + 936 + if (!or->cdb_cont.total_bytes) 937 + return 0; 938 + 939 + cont_seg_hdr = or->cdb_cont.buff; 940 + cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2; 941 + cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action; 942 + 943 + /* create a bio for continuation segment */ 944 + bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, 945 + GFP_KERNEL); 946 + if (unlikely(!bio)) 947 + return -ENOMEM; 948 + 949 + bio->bi_rw |= REQ_WRITE; 950 + 951 + /* integrity check the continuation before the bio is linked 952 + * with the other data segments since the continuation 953 + * integrity is separate from the other data segments. 954 + */ 955 + osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key); 956 + 957 + cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes); 958 + 959 + /* we can't use _req_append_segment, because we need to link in the 960 + * continuation bio to the head of the bio list - the 961 + * continuation segment (if it exists) is always the first segment in 962 + * the out data buffer. 963 + */ 964 + bio->bi_next = or->out.bio; 965 + or->out.bio = bio; 966 + or->out.total_bytes += or->cdb_cont.total_bytes; 967 + 968 + return 0; 969 + } 970 + 971 + /* osd_req_write_sg: Takes a @bio that points to the data out buffer and an 972 + * @sglist that has the scatter gather entries. Scatter-gather enables a write 973 + * of multiple none-contiguous areas of an object, in a single call. The extents 974 + * may overlap and/or be in any order. The only constrain is that: 975 + * total_bytes(sglist) >= total_bytes(bio) 976 + */ 977 + int osd_req_write_sg(struct osd_request *or, 978 + const struct osd_obj_id *obj, struct bio *bio, 979 + const struct osd_sg_entry *sglist, unsigned numentries) 980 + { 981 + u64 len; 982 + int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); 983 + 984 + if (ret) 985 + return ret; 986 + osd_req_write(or, obj, 0, bio, len); 987 + 988 + return 0; 989 + } 990 + EXPORT_SYMBOL(osd_req_write_sg); 991 + 992 + /* osd_req_read_sg: Read multiple extents of an object into @bio 993 + * See osd_req_write_sg 994 + */ 995 + int osd_req_read_sg(struct osd_request *or, 996 + const struct osd_obj_id *obj, struct bio *bio, 997 + const struct osd_sg_entry *sglist, unsigned numentries) 998 + { 999 + u64 len; 1000 + int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); 1001 + 1002 + if (ret) 1003 + return ret; 1004 + osd_req_read(or, obj, 0, bio, len); 1005 + 1006 + return 0; 1007 + } 1008 + EXPORT_SYMBOL(osd_req_read_sg); 1009 + 1010 + /* SG-list write/read Kern API 1011 + * 1012 + * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array 1013 + * of sg_entries. @numentries indicates how many pointers and sg_entries there 1014 + * are. By requiring an array of buff pointers. This allows a caller to do a 1015 + * single write/read and scatter into multiple buffers. 1016 + * NOTE: Each buffer + len should not cross a page boundary. 1017 + */ 1018 + static struct bio *_create_sg_bios(struct osd_request *or, 1019 + void **buff, const struct osd_sg_entry *sglist, unsigned numentries) 1020 + { 1021 + struct request_queue *q = osd_request_queue(or->osd_dev); 1022 + struct bio *bio; 1023 + unsigned i; 1024 + 1025 + bio = bio_kmalloc(GFP_KERNEL, numentries); 1026 + if (unlikely(!bio)) { 1027 + OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries); 1028 + return ERR_PTR(-ENOMEM); 1029 + } 1030 + 1031 + for (i = 0; i < numentries; i++) { 1032 + unsigned offset = offset_in_page(buff[i]); 1033 + struct page *page = virt_to_page(buff[i]); 1034 + unsigned len = sglist[i].len; 1035 + unsigned added_len; 1036 + 1037 + BUG_ON(offset + len > PAGE_SIZE); 1038 + added_len = bio_add_pc_page(q, bio, page, len, offset); 1039 + if (unlikely(len != added_len)) { 1040 + OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n", 1041 + len, added_len); 1042 + bio_put(bio); 1043 + return ERR_PTR(-ENOMEM); 1044 + } 1045 + } 1046 + 1047 + return bio; 1048 + } 1049 + 1050 + int osd_req_write_sg_kern(struct osd_request *or, 1051 + const struct osd_obj_id *obj, void **buff, 1052 + const struct osd_sg_entry *sglist, unsigned numentries) 1053 + { 1054 + struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); 1055 + if (IS_ERR(bio)) 1056 + return PTR_ERR(bio); 1057 + 1058 + bio->bi_rw |= REQ_WRITE; 1059 + osd_req_write_sg(or, obj, bio, sglist, numentries); 1060 + 1061 + return 0; 1062 + } 1063 + EXPORT_SYMBOL(osd_req_write_sg_kern); 1064 + 1065 + int osd_req_read_sg_kern(struct osd_request *or, 1066 + const struct osd_obj_id *obj, void **buff, 1067 + const struct osd_sg_entry *sglist, unsigned numentries) 1068 + { 1069 + struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); 1070 + if (IS_ERR(bio)) 1071 + return PTR_ERR(bio); 1072 + 1073 + osd_req_read_sg(or, obj, bio, sglist, numentries); 1074 + 1075 + return 0; 1076 + } 1077 + EXPORT_SYMBOL(osd_req_read_sg_kern); 1078 + 1079 + 1080 + 896 1081 void osd_req_get_attributes(struct osd_request *or, 897 1082 const struct osd_obj_id *obj) 898 1083 { ··· 1419 1218 or->get_attr.buff = attar_page; 1420 1219 or->get_attr.total_bytes = max_page_len; 1421 1220 1422 - or->set_attr.buff = set_one_attr->val_ptr; 1423 - or->set_attr.total_bytes = set_one_attr->len; 1424 - 1425 1221 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id); 1426 1222 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len); 1427 - /* ocdb->attrs_page.get_attr_offset; */ 1223 + 1224 + if (!set_one_attr || !set_one_attr->attr_page) 1225 + return 0; /* The set is optional */ 1226 + 1227 + or->set_attr.buff = set_one_attr->val_ptr; 1228 + or->set_attr.total_bytes = set_one_attr->len; 1428 1229 1429 1230 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page); 1430 1231 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id); 1431 1232 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len); 1432 - /* ocdb->attrs_page.set_attr_offset; */ 1433 1233 return 0; 1434 1234 } 1435 1235 EXPORT_SYMBOL(osd_req_add_get_attr_page); ··· 1450 1248 if (ret) 1451 1249 return ret; 1452 1250 1251 + if (or->set_attr.total_bytes == 0) 1252 + return 0; 1253 + 1453 1254 /* set one value */ 1454 1255 cdbh->attrs_page.set_attr_offset = 1455 1256 osd_req_encode_offset(or, or->out.total_bytes, &out_padding); 1456 1257 1457 - ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL, 1258 + ret = _req_append_segment(or, out_padding, &or->set_attr, NULL, 1458 1259 &or->out); 1459 1260 return ret; 1460 1261 } ··· 1481 1276 } 1482 1277 1483 1278 static int _osd_req_finalize_data_integrity(struct osd_request *or, 1484 - bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key) 1279 + bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes, 1280 + const u8 *cap_key) 1485 1281 { 1486 1282 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1487 1283 int ret; ··· 1513 1307 or->out.last_seg = NULL; 1514 1308 1515 1309 /* they are now all chained to request sign them all together */ 1516 - osd_sec_sign_data(&or->out_data_integ, or->out.req->bio, 1310 + osd_sec_sign_data(&or->out_data_integ, out_data_bio, 1517 1311 cap_key); 1518 1312 } 1519 1313 ··· 1609 1403 { 1610 1404 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1611 1405 bool has_in, has_out; 1406 + /* Save for data_integrity without the cdb_continuation */ 1407 + struct bio *out_data_bio = or->out.bio; 1612 1408 u64 out_data_bytes = or->out.total_bytes; 1613 1409 int ret; 1614 1410 ··· 1626 1418 osd_set_caps(&or->cdb, cap); 1627 1419 1628 1420 has_in = or->in.bio || or->get_attr.total_bytes; 1629 - has_out = or->out.bio || or->set_attr.total_bytes || 1630 - or->enc_get_attr.total_bytes; 1421 + has_out = or->out.bio || or->cdb_cont.total_bytes || 1422 + or->set_attr.total_bytes || or->enc_get_attr.total_bytes; 1631 1423 1424 + ret = _osd_req_finalize_cdb_cont(or, cap_key); 1425 + if (ret) { 1426 + OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n"); 1427 + return ret; 1428 + } 1632 1429 ret = _init_blk_request(or, has_in, has_out); 1633 1430 if (ret) { 1634 1431 OSD_DEBUG("_init_blk_request failed\n"); ··· 1671 1458 } 1672 1459 1673 1460 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, 1674 - out_data_bytes, cap_key); 1461 + out_data_bio, out_data_bytes, 1462 + cap_key); 1675 1463 if (ret) 1676 1464 return ret; 1677 1465
+110 -19
drivers/scsi/pmcraid.c
··· 1594 1594 cfg_entry = &ccn_hcam->cfg_entry; 1595 1595 fw_version = be16_to_cpu(pinstance->inq_data->fw_version); 1596 1596 1597 - pmcraid_info 1598 - ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n", 1597 + pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \ 1598 + res: %x:%x:%x:%x\n", 1599 1599 pinstance->ccn.hcam->ilid, 1600 1600 pinstance->ccn.hcam->op_code, 1601 + ((pinstance->ccn.hcam->timestamp1) | 1602 + ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)), 1601 1603 pinstance->ccn.hcam->notification_type, 1602 1604 pinstance->ccn.hcam->notification_lost, 1603 1605 pinstance->ccn.hcam->flags, ··· 1852 1850 * none 1853 1851 */ 1854 1852 static void pmcraid_initiate_reset(struct pmcraid_instance *); 1853 + static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd); 1855 1854 1856 1855 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) 1857 1856 { ··· 1883 1880 spin_unlock_irqrestore(pinstance->host->host_lock, 1884 1881 lock_flags); 1885 1882 return; 1883 + } 1884 + if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) { 1885 + pinstance->timestamp_error = 1; 1886 + pmcraid_set_timestamp(cmd); 1886 1887 } 1887 1888 } else { 1888 1889 dev_info(&pinstance->pdev->dev, ··· 3370 3363 sg_size = buflen; 3371 3364 3372 3365 for (i = 0; i < num_elem; i++) { 3373 - page = alloc_pages(GFP_KERNEL|GFP_DMA, order); 3366 + page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order); 3374 3367 if (!page) { 3375 3368 for (j = i - 1; j >= 0; j--) 3376 3369 __free_pages(sg_page(&scatterlist[j]), order); ··· 3746 3739 unsigned long request_buffer; 3747 3740 unsigned long request_offset; 3748 3741 unsigned long lock_flags; 3742 + void *ioasa; 3749 3743 u32 ioasc; 3750 3744 int request_size; 3751 3745 int buffer_size; ··· 3788 3780 rc = __copy_from_user(buffer, 3789 3781 (struct pmcraid_passthrough_ioctl_buffer *) arg, 3790 3782 sizeof(struct pmcraid_passthrough_ioctl_buffer)); 3783 + 3784 + ioasa = 3785 + (void *)(arg + 3786 + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); 3787 + 3791 3788 if (rc) { 3792 3789 pmcraid_err("ioctl: can't copy passthrough buffer\n"); 3793 3790 rc = -EFAULT; ··· 3960 3947 } 3961 3948 3962 3949 out_handle_response: 3963 - /* If the command failed for any reason, copy entire IOASA buffer and 3964 - * return IOCTL success. If copying IOASA to user-buffer fails, return 3950 + /* copy entire IOASA buffer and return IOCTL success. 3951 + * If copying IOASA to user-buffer fails, return 3965 3952 * EFAULT 3966 3953 */ 3967 - if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) { 3968 - void *ioasa = 3969 - (void *)(arg + 3970 - offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); 3971 - 3972 - pmcraid_info("command failed with %x\n", 3973 - le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); 3974 - if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, 3975 - sizeof(struct pmcraid_ioasa))) { 3976 - pmcraid_err("failed to copy ioasa buffer to user\n"); 3977 - rc = -EFAULT; 3978 - } 3954 + if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, 3955 + sizeof(struct pmcraid_ioasa))) { 3956 + pmcraid_err("failed to copy ioasa buffer to user\n"); 3957 + rc = -EFAULT; 3979 3958 } 3980 3959 3981 3960 /* If the data transfer was from device, copy the data onto user ··· 5152 5147 pinstance->inq_data = NULL; 5153 5148 pinstance->inq_data_baddr = 0; 5154 5149 } 5150 + 5151 + if (pinstance->timestamp_data != NULL) { 5152 + pci_free_consistent(pinstance->pdev, 5153 + sizeof(struct pmcraid_timestamp_data), 5154 + pinstance->timestamp_data, 5155 + pinstance->timestamp_data_baddr); 5156 + 5157 + pinstance->timestamp_data = NULL; 5158 + pinstance->timestamp_data_baddr = 0; 5159 + } 5155 5160 } 5156 5161 5157 5162 /** ··· 5219 5204 pmcraid_release_buffers(pinstance); 5220 5205 return -ENOMEM; 5221 5206 } 5207 + 5208 + /* allocate DMAable memory for set timestamp data buffer */ 5209 + pinstance->timestamp_data = pci_alloc_consistent( 5210 + pinstance->pdev, 5211 + sizeof(struct pmcraid_timestamp_data), 5212 + &pinstance->timestamp_data_baddr); 5213 + 5214 + if (pinstance->timestamp_data == NULL) { 5215 + pmcraid_err("couldn't allocate DMA memory for \ 5216 + set time_stamp \n"); 5217 + pmcraid_release_buffers(pinstance); 5218 + return -ENOMEM; 5219 + } 5220 + 5222 5221 5223 5222 /* Initialize all the command blocks and add them to free pool. No 5224 5223 * need to lock (free_pool_lock) as this is done in initialization ··· 5639 5610 } 5640 5611 5641 5612 /** 5613 + * pmcraid_set_timestamp - set the timestamp to IOAFP 5614 + * 5615 + * @cmd: pointer to pmcraid_cmd structure 5616 + * 5617 + * Return Value 5618 + * 0 for success or non-zero for failure cases 5619 + */ 5620 + static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd) 5621 + { 5622 + struct pmcraid_instance *pinstance = cmd->drv_inst; 5623 + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 5624 + __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN); 5625 + struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; 5626 + 5627 + struct timeval tv; 5628 + __le64 timestamp; 5629 + 5630 + do_gettimeofday(&tv); 5631 + timestamp = tv.tv_sec * 1000; 5632 + 5633 + pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp); 5634 + pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8); 5635 + pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16); 5636 + pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24); 5637 + pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32); 5638 + pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40); 5639 + 5640 + pmcraid_reinit_cmdblk(cmd); 5641 + ioarcb->request_type = REQ_TYPE_SCSI; 5642 + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); 5643 + ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP; 5644 + ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION; 5645 + memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len)); 5646 + 5647 + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + 5648 + offsetof(struct pmcraid_ioarcb, 5649 + add_data.u.ioadl[0])); 5650 + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); 5651 + ioarcb->ioarcb_bus_addr &= ~(0x1FULL); 5652 + 5653 + ioarcb->request_flags0 |= NO_LINK_DESCS; 5654 + ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; 5655 + ioarcb->data_transfer_length = 5656 + cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); 5657 + ioadl = &(ioarcb->add_data.u.ioadl[0]); 5658 + ioadl->flags = IOADL_FLAGS_LAST_DESC; 5659 + ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr); 5660 + ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); 5661 + 5662 + if (!pinstance->timestamp_error) { 5663 + pinstance->timestamp_error = 0; 5664 + pmcraid_send_cmd(cmd, pmcraid_set_supported_devs, 5665 + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); 5666 + } else { 5667 + pmcraid_send_cmd(cmd, pmcraid_return_cmd, 5668 + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); 5669 + return; 5670 + } 5671 + } 5672 + 5673 + 5674 + /** 5642 5675 * pmcraid_init_res_table - Initialize the resource table 5643 5676 * @cmd: pointer to pmcraid command struct 5644 5677 * ··· 5811 5720 5812 5721 /* release the resource list lock */ 5813 5722 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); 5814 - pmcraid_set_supported_devs(cmd); 5723 + pmcraid_set_timestamp(cmd); 5815 5724 } 5816 5725 5817 5726 /** ··· 6145 6054 static void __exit pmcraid_exit(void) 6146 6055 { 6147 6056 pmcraid_netlink_release(); 6148 - class_destroy(pmcraid_class); 6149 6057 unregister_chrdev_region(MKDEV(pmcraid_major, 0), 6150 6058 PMCRAID_MAX_ADAPTERS); 6151 6059 pci_unregister_driver(&pmcraid_driver); 6060 + class_destroy(pmcraid_class); 6152 6061 } 6153 6062 6154 6063 module_init(pmcraid_init);
+19 -4
drivers/scsi/pmcraid.h
··· 42 42 */ 43 43 #define PMCRAID_DRIVER_NAME "PMC MaxRAID" 44 44 #define PMCRAID_DEVFILE "pmcsas" 45 - #define PMCRAID_DRIVER_VERSION "2.0.2" 45 + #define PMCRAID_DRIVER_VERSION "2.0.3" 46 46 #define PMCRAID_DRIVER_DATE __DATE__ 47 47 48 48 #define PMCRAID_FW_VERSION_1 0x002 ··· 184 184 #define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000 185 185 #define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000 186 186 #define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000 187 + #define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00 187 188 #define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000 188 189 189 190 /* Driver defined IOASCs */ ··· 562 561 __u8 reserved3[16]; 563 562 }; 564 563 564 + #define PMCRAID_TIMESTAMP_LEN 12 565 + #define PMCRAID_REQ_TM_STR_LEN 6 566 + #define PMCRAID_SCSI_SET_TIMESTAMP 0xA4 567 + #define PMCRAID_SCSI_SERVICE_ACTION 0x0F 568 + 569 + struct pmcraid_timestamp_data { 570 + __u8 reserved1[4]; 571 + __u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */ 572 + __u8 reserved2[2]; 573 + }; 574 + 565 575 /* pmcraid_cmd - LLD representation of SCSI command */ 566 576 struct pmcraid_cmd { 567 577 ··· 580 568 struct pmcraid_control_block *ioa_cb; 581 569 dma_addr_t ioa_cb_bus_addr; 582 570 dma_addr_t dma_handle; 583 - u8 *sense_buffer; 584 571 585 572 /* pointer to mid layer structure of SCSI commands */ 586 573 struct scsi_cmnd *scsi_cmd; ··· 716 705 struct pmcraid_inquiry_data *inq_data; 717 706 dma_addr_t inq_data_baddr; 718 707 708 + struct pmcraid_timestamp_data *timestamp_data; 709 + dma_addr_t timestamp_data_baddr; 710 + 719 711 /* size of configuration table entry, varies based on the firmware */ 720 712 u32 config_table_entry_size; 721 713 ··· 805 791 #define SHUTDOWN_NONE 0x0 806 792 #define SHUTDOWN_NORMAL 0x1 807 793 #define SHUTDOWN_ABBREV 0x2 794 + u32 timestamp_error:1; /* indicate set timestamp for out of sync */ 808 795 809 796 }; 810 797 ··· 1071 1056 #define PMCRAID_PASSTHROUGH_IOCTL 'F' 1072 1057 1073 1058 #define DRV_IOCTL(n, size) \ 1074 - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) 1059 + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) 1075 1060 1076 1061 #define FMW_IOCTL(n, size) \ 1077 - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) 1062 + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) 1078 1063 1079 1064 /* 1080 1065 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
+4
drivers/scsi/qla2xxx/qla_attr.c
··· 1538 1538 if (!fcport) 1539 1539 return; 1540 1540 1541 + /* Now that the rport has been deleted, set the fcport state to 1542 + FCS_DEVICE_DEAD */ 1543 + atomic_set(&fcport->state, FCS_DEVICE_DEAD); 1544 + 1541 1545 /* 1542 1546 * Transport has effectively 'deleted' the rport, clear 1543 1547 * all local references.
+125
drivers/scsi/qla2xxx/qla_bsg.c
··· 1307 1307 } 1308 1308 1309 1309 static int 1310 + qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, 1311 + uint8_t is_update) 1312 + { 1313 + uint32_t start = 0; 1314 + int valid = 0; 1315 + 1316 + bsg_job->reply->reply_payload_rcv_len = 0; 1317 + 1318 + if (unlikely(pci_channel_offline(ha->pdev))) 1319 + return -EINVAL; 1320 + 1321 + start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1322 + if (start > ha->optrom_size) 1323 + return -EINVAL; 1324 + 1325 + if (ha->optrom_state != QLA_SWAITING) 1326 + return -EBUSY; 1327 + 1328 + ha->optrom_region_start = start; 1329 + 1330 + if (is_update) { 1331 + if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1332 + valid = 1; 1333 + else if (start == (ha->flt_region_boot * 4) || 1334 + start == (ha->flt_region_fw * 4)) 1335 + valid = 1; 1336 + else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1337 + IS_QLA8XXX_TYPE(ha)) 1338 + valid = 1; 1339 + if (!valid) { 1340 + qla_printk(KERN_WARNING, ha, 1341 + "Invalid start region 0x%x/0x%x.\n", 1342 + start, bsg_job->request_payload.payload_len); 1343 + return -EINVAL; 1344 + } 1345 + 1346 + ha->optrom_region_size = start + 1347 + bsg_job->request_payload.payload_len > ha->optrom_size ? 1348 + ha->optrom_size - start : 1349 + bsg_job->request_payload.payload_len; 1350 + ha->optrom_state = QLA_SWRITING; 1351 + } else { 1352 + ha->optrom_region_size = start + 1353 + bsg_job->reply_payload.payload_len > ha->optrom_size ? 1354 + ha->optrom_size - start : 1355 + bsg_job->reply_payload.payload_len; 1356 + ha->optrom_state = QLA_SREADING; 1357 + } 1358 + 1359 + ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1360 + if (!ha->optrom_buffer) { 1361 + qla_printk(KERN_WARNING, ha, 1362 + "Read: Unable to allocate memory for optrom retrieval " 1363 + "(%x).\n", ha->optrom_region_size); 1364 + 1365 + ha->optrom_state = QLA_SWAITING; 1366 + return -ENOMEM; 1367 + } 1368 + 1369 + memset(ha->optrom_buffer, 0, ha->optrom_region_size); 1370 + return 0; 1371 + } 1372 + 1373 + static int 1374 + qla2x00_read_optrom(struct fc_bsg_job *bsg_job) 1375 + { 1376 + struct Scsi_Host *host = bsg_job->shost; 1377 + scsi_qla_host_t *vha = shost_priv(host); 1378 + struct qla_hw_data *ha = vha->hw; 1379 + int rval = 0; 1380 + 1381 + rval = qla2x00_optrom_setup(bsg_job, ha, 0); 1382 + if (rval) 1383 + return rval; 1384 + 1385 + ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1386 + ha->optrom_region_start, ha->optrom_region_size); 1387 + 1388 + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1389 + bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1390 + ha->optrom_region_size); 1391 + 1392 + bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; 1393 + bsg_job->reply->result = DID_OK; 1394 + vfree(ha->optrom_buffer); 1395 + ha->optrom_buffer = NULL; 1396 + ha->optrom_state = QLA_SWAITING; 1397 + bsg_job->job_done(bsg_job); 1398 + return rval; 1399 + } 1400 + 1401 + static int 1402 + qla2x00_update_optrom(struct fc_bsg_job *bsg_job) 1403 + { 1404 + struct Scsi_Host *host = bsg_job->shost; 1405 + scsi_qla_host_t *vha = shost_priv(host); 1406 + struct qla_hw_data *ha = vha->hw; 1407 + int rval = 0; 1408 + 1409 + rval = qla2x00_optrom_setup(bsg_job, ha, 1); 1410 + if (rval) 1411 + return rval; 1412 + 1413 + sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1414 + bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1415 + ha->optrom_region_size); 1416 + 1417 + ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1418 + ha->optrom_region_start, ha->optrom_region_size); 1419 + 1420 + bsg_job->reply->result = DID_OK; 1421 + vfree(ha->optrom_buffer); 1422 + ha->optrom_buffer = NULL; 1423 + ha->optrom_state = QLA_SWAITING; 1424 + bsg_job->job_done(bsg_job); 1425 + return rval; 1426 + } 1427 + 1428 + static int 1310 1429 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1311 1430 { 1312 1431 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { ··· 1446 1327 1447 1328 case QL_VND_FCP_PRIO_CFG_CMD: 1448 1329 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 1330 + 1331 + case QL_VND_READ_FLASH: 1332 + return qla2x00_read_optrom(bsg_job); 1333 + 1334 + case QL_VND_UPDATE_FLASH: 1335 + return qla2x00_update_optrom(bsg_job); 1449 1336 1450 1337 default: 1451 1338 bsg_job->reply->result = (DID_ERROR << 16);
+2
drivers/scsi/qla2xxx/qla_bsg.h
··· 14 14 #define QL_VND_A84_MGMT_CMD 0x04 15 15 #define QL_VND_IIDMA 0x05 16 16 #define QL_VND_FCP_PRIO_CFG_CMD 0x06 17 + #define QL_VND_READ_FLASH 0x07 18 + #define QL_VND_UPDATE_FLASH 0x08 17 19 18 20 /* BSG definations for interpreting CommandSent field */ 19 21 #define INT_DEF_LB_LOOPBACK_CMD 0
-2
drivers/scsi/qla2xxx/qla_def.h
··· 1700 1700 atomic_t state; 1701 1701 uint32_t flags; 1702 1702 1703 - int port_login_retry_count; 1704 1703 int login_retry; 1705 - atomic_t port_down_timer; 1706 1704 1707 1705 struct fc_rport *rport, *drport; 1708 1706 u32 supported_classes;
+1
drivers/scsi/qla2xxx/qla_gbl.h
··· 92 92 extern int ql2xdbwr; 93 93 extern int ql2xdontresethba; 94 94 extern int ql2xasynctmfenable; 95 + extern int ql2xgffidenable; 95 96 extern int ql2xenabledif; 96 97 extern int ql2xenablehba_err_chk; 97 98 extern int ql2xtargetreset;
+14 -14
drivers/scsi/qla2xxx/qla_init.c
··· 71 71 struct srb_iocb *iocb = ctx->u.iocb_cmd; 72 72 struct scsi_qla_host *vha = sp->fcport->vha; 73 73 74 - del_timer_sync(&iocb->timer); 74 + del_timer(&iocb->timer); 75 75 kfree(iocb); 76 76 kfree(ctx); 77 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); ··· 1344 1344 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1345 1345 "firmware dump!!!\n", dump_size / 1024); 1346 1346 1347 + if (ha->fce) { 1348 + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 1349 + ha->fce_dma); 1350 + ha->fce = NULL; 1351 + ha->fce_dma = 0; 1352 + } 1353 + 1347 1354 if (ha->eft) { 1348 1355 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 1349 1356 ha->eft_dma); ··· 1825 1818 qla2x00_init_response_q_entries(rsp); 1826 1819 } 1827 1820 1828 - spin_lock_irqsave(&ha->vport_slock, flags); 1821 + spin_lock(&ha->vport_slock); 1829 1822 /* Clear RSCN queue. */ 1830 1823 list_for_each_entry(vp, &ha->vp_list, list) { 1831 1824 vp->rscn_in_ptr = 0; 1832 1825 vp->rscn_out_ptr = 0; 1833 1826 } 1834 1827 1835 - spin_unlock_irqrestore(&ha->vport_slock, flags); 1828 + spin_unlock(&ha->vport_slock); 1836 1829 1837 1830 ha->isp_ops->config_rings(vha); 1838 1831 ··· 2923 2916 void 2924 2917 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2925 2918 { 2926 - struct qla_hw_data *ha = vha->hw; 2927 - 2928 2919 fcport->vha = vha; 2929 2920 fcport->login_retry = 0; 2930 - fcport->port_login_retry_count = ha->port_down_retry_count * 2931 - PORT_RETRY_TIME; 2932 - atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * 2933 - PORT_RETRY_TIME); 2934 2921 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 2935 2922 2936 2923 qla2x00_iidma_fcport(vha, fcport); 2937 - 2938 - atomic_set(&fcport->state, FCS_ONLINE); 2939 - 2940 2924 qla2x00_reg_remote_port(vha, fcport); 2925 + atomic_set(&fcport->state, FCS_ONLINE); 2941 2926 } 2942 2927 2943 2928 /* ··· 3291 3292 continue; 3292 3293 3293 3294 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 3294 - if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 3295 - new_fcport->fc4_type != FC4_TYPE_UNKNOWN) 3295 + if (ql2xgffidenable && 3296 + (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 3297 + new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) 3296 3298 continue; 3297 3299 3298 3300 /* Locate matching device in database. */
+2 -2
drivers/scsi/qla2xxx/qla_iocb.c
··· 992 992 ha = vha->hw; 993 993 994 994 DEBUG18(printk(KERN_DEBUG 995 - "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__, 996 - vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd))); 995 + "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__, 996 + vha->host_no, sp, scsi_get_prot_op(sp->cmd))); 997 997 998 998 cmd_pkt->vp_index = sp->fcport->vp_idx; 999 999
+6 -15
drivers/scsi/qla2xxx/qla_isr.c
··· 1240 1240 case LSC_SCODE_NPORT_USED: 1241 1241 data[0] = MBS_LOOP_ID_USED; 1242 1242 break; 1243 - case LSC_SCODE_CMD_FAILED: 1244 - if ((iop[1] & 0xff) == 0x05) { 1245 - data[0] = MBS_NOT_LOGGED_IN; 1246 - break; 1247 - } 1248 - /* Fall through. */ 1249 1243 default: 1250 1244 data[0] = MBS_COMMAND_ERROR; 1251 1245 break; ··· 1425 1431 rsp->status_srb = sp; 1426 1432 1427 1433 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 1428 - "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 1429 - cp->device->channel, cp->device->id, cp->device->lun, cp, 1430 - cp->serial_number)); 1434 + "cmd=%p\n", __func__, sp->fcport->vha->host_no, 1435 + cp->device->channel, cp->device->id, cp->device->lun, cp)); 1431 1436 if (sense_len) 1432 1437 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1433 1438 } ··· 1750 1757 case CS_INCOMPLETE: 1751 1758 case CS_PORT_UNAVAILABLE: 1752 1759 case CS_TIMEOUT: 1760 + case CS_RESET: 1761 + 1753 1762 /* 1754 1763 * We are going to have the fc class block the rport 1755 1764 * while we try to recover so instruct the mid layer ··· 1776 1781 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1777 1782 break; 1778 1783 1779 - case CS_RESET: 1780 - cp->result = DID_TRANSPORT_DISRUPTED << 16; 1781 - break; 1782 - 1783 1784 case CS_ABORTED: 1784 1785 cp->result = DID_RESET << 16; 1785 1786 break; ··· 1792 1801 if (logit) 1793 1802 DEBUG2(qla_printk(KERN_INFO, ha, 1794 1803 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1795 - "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x " 1804 + "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1796 1805 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1797 1806 cp->device->id, cp->device->lun, comp_status, scsi_status, 1798 - cp->result, ox_id, cp->serial_number, cp->cmnd[0], 1807 + cp->result, ox_id, cp->cmnd[0], 1799 1808 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1800 1809 resid_len, fw_resid_len)); 1801 1810
+106 -107
drivers/scsi/qla2xxx/qla_os.c
··· 160 160 "Enable target reset." 161 161 "Default is 1 - use hw defaults."); 162 162 163 + int ql2xgffidenable; 164 + module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR); 165 + MODULE_PARM_DESC(ql2xgffidenable, 166 + "Enables GFF_ID checks of port type. " 167 + "Default is 0 - Do not use GFF_ID information."); 163 168 164 169 int ql2xasynctmfenable; 165 170 module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR); ··· 260 255 261 256 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 262 257 struct req_que **, struct rsp_que **); 258 + static void qla2x00_free_fw_dump(struct qla_hw_data *); 263 259 static void qla2x00_mem_free(struct qla_hw_data *); 264 260 static void qla2x00_sp_free_dma(srb_t *); 265 261 ··· 545 539 srb_t *sp; 546 540 int rval; 547 541 542 + spin_unlock_irq(vha->host->host_lock); 548 543 if (ha->flags.eeh_busy) { 549 544 if (ha->flags.pci_channel_io_perm_failure) 550 545 cmd->result = DID_NO_CONNECT << 16; ··· 560 553 goto qc24_fail_command; 561 554 } 562 555 563 - /* Close window on fcport/rport state-transitioning. */ 564 - if (fcport->drport) 565 - goto qc24_target_busy; 566 - 567 556 if (!vha->flags.difdix_supported && 568 557 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 569 558 DEBUG2(qla_printk(KERN_ERR, ha, ··· 570 567 } 571 568 if (atomic_read(&fcport->state) != FCS_ONLINE) { 572 569 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 573 - atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 570 + atomic_read(&fcport->state) == FCS_DEVICE_LOST || 571 + atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 574 572 cmd->result = DID_NO_CONNECT << 16; 575 573 goto qc24_fail_command; 576 574 } 577 575 goto qc24_target_busy; 578 576 } 579 - 580 - spin_unlock_irq(vha->host->host_lock); 581 577 582 578 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done); 583 579 if (!sp) ··· 599 597 return SCSI_MLQUEUE_HOST_BUSY; 600 598 601 599 qc24_target_busy: 600 + spin_lock_irq(vha->host->host_lock); 602 601 return SCSI_MLQUEUE_TARGET_BUSY; 603 602 604 603 qc24_fail_command: 604 + spin_lock_irq(vha->host->host_lock); 605 605 done(cmd); 606 606 607 607 return 0; ··· 828 824 { 829 825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 830 826 srb_t *sp; 831 - int ret, i; 827 + int ret; 832 828 unsigned int id, lun; 833 - unsigned long serial; 834 829 unsigned long flags; 835 830 int wait = 0; 836 831 struct qla_hw_data *ha = vha->hw; 837 - struct req_que *req = vha->req; 838 - srb_t *spt; 839 - int got_ref = 0; 840 832 841 833 fc_block_scsi_eh(cmd); 842 834 843 835 if (!CMD_SP(cmd)) 844 836 return SUCCESS; 845 837 846 - ret = SUCCESS; 847 - 848 838 id = cmd->device->id; 849 839 lun = cmd->device->lun; 850 - serial = cmd->serial_number; 851 - spt = (srb_t *) CMD_SP(cmd); 852 - if (!spt) 853 - return SUCCESS; 854 840 855 - /* Check active list for command command. */ 856 841 spin_lock_irqsave(&ha->hardware_lock, flags); 857 - for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 858 - sp = req->outstanding_cmds[i]; 859 - 860 - if (sp == NULL) 861 - continue; 862 - if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) && 863 - !IS_PROT_IO(sp)) 864 - continue; 865 - if (sp->cmd != cmd) 866 - continue; 867 - 868 - DEBUG2(printk("%s(%ld): aborting sp %p from RISC." 869 - " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 870 - 871 - /* Get a reference to the sp and drop the lock.*/ 872 - sp_get(sp); 873 - got_ref++; 874 - 842 + sp = (srb_t *) CMD_SP(cmd); 843 + if (!sp) { 875 844 spin_unlock_irqrestore(&ha->hardware_lock, flags); 876 - if (ha->isp_ops->abort_command(sp)) { 877 - DEBUG2(printk("%s(%ld): abort_command " 878 - "mbx failed.\n", __func__, vha->host_no)); 879 - ret = FAILED; 880 - } else { 881 - DEBUG3(printk("%s(%ld): abort_command " 882 - "mbx success.\n", __func__, vha->host_no)); 883 - wait = 1; 884 - } 885 - spin_lock_irqsave(&ha->hardware_lock, flags); 886 - break; 845 + return SUCCESS; 887 846 } 847 + 848 + DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", 849 + __func__, vha->host_no, sp)); 850 + 851 + /* Get a reference to the sp and drop the lock.*/ 852 + sp_get(sp); 853 + 888 854 spin_unlock_irqrestore(&ha->hardware_lock, flags); 855 + if (ha->isp_ops->abort_command(sp)) { 856 + DEBUG2(printk("%s(%ld): abort_command " 857 + "mbx failed.\n", __func__, vha->host_no)); 858 + ret = FAILED; 859 + } else { 860 + DEBUG3(printk("%s(%ld): abort_command " 861 + "mbx success.\n", __func__, vha->host_no)); 862 + wait = 1; 863 + } 864 + qla2x00_sp_compl(ha, sp); 889 865 890 866 /* Wait for the command to be returned. */ 891 867 if (wait) { 892 868 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 893 869 qla_printk(KERN_ERR, ha, 894 - "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 895 - "%x.\n", vha->host_no, id, lun, serial, ret); 870 + "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", 871 + vha->host_no, id, lun, ret); 896 872 ret = FAILED; 897 873 } 898 874 } 899 875 900 - if (got_ref) 901 - qla2x00_sp_compl(ha, sp); 902 - 903 876 qla_printk(KERN_INFO, ha, 904 - "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 905 - vha->host_no, id, lun, wait, serial, ret); 877 + "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", 878 + vha->host_no, id, lun, wait, ret); 906 879 907 880 return ret; 908 881 } ··· 1024 1043 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1025 1044 int ret = FAILED; 1026 1045 unsigned int id, lun; 1027 - unsigned long serial; 1028 1046 1029 1047 fc_block_scsi_eh(cmd); 1030 1048 1031 1049 id = cmd->device->id; 1032 1050 lun = cmd->device->lun; 1033 - serial = cmd->serial_number; 1034 1051 1035 1052 if (!fcport) 1036 1053 return ret; ··· 1083 1104 struct qla_hw_data *ha = vha->hw; 1084 1105 int ret = FAILED; 1085 1106 unsigned int id, lun; 1086 - unsigned long serial; 1087 1107 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1088 1108 1089 1109 fc_block_scsi_eh(cmd); 1090 1110 1091 1111 id = cmd->device->id; 1092 1112 lun = cmd->device->lun; 1093 - serial = cmd->serial_number; 1094 1113 1095 1114 if (!fcport) 1096 1115 return ret; ··· 1951 1974 ha->bars = bars; 1952 1975 ha->mem_only = mem_only; 1953 1976 spin_lock_init(&ha->hardware_lock); 1977 + spin_lock_init(&ha->vport_slock); 1954 1978 1955 1979 /* Set ISP-type information. */ 1956 1980 qla2x00_set_isp_flags(ha); ··· 2320 2342 } 2321 2343 2322 2344 static void 2345 + qla2x00_shutdown(struct pci_dev *pdev) 2346 + { 2347 + scsi_qla_host_t *vha; 2348 + struct qla_hw_data *ha; 2349 + 2350 + vha = pci_get_drvdata(pdev); 2351 + ha = vha->hw; 2352 + 2353 + /* Turn-off FCE trace */ 2354 + if (ha->flags.fce_enabled) { 2355 + qla2x00_disable_fce_trace(vha, NULL, NULL); 2356 + ha->flags.fce_enabled = 0; 2357 + } 2358 + 2359 + /* Turn-off EFT trace */ 2360 + if (ha->eft) 2361 + qla2x00_disable_eft_trace(vha); 2362 + 2363 + /* Stop currently executing firmware. */ 2364 + qla2x00_try_to_stop_firmware(vha); 2365 + 2366 + /* Turn adapter off line */ 2367 + vha->flags.online = 0; 2368 + 2369 + /* turn-off interrupts on the card */ 2370 + if (ha->interrupts_on) { 2371 + vha->flags.init_done = 0; 2372 + ha->isp_ops->disable_intrs(ha); 2373 + } 2374 + 2375 + qla2x00_free_irqs(vha); 2376 + 2377 + qla2x00_free_fw_dump(ha); 2378 + } 2379 + 2380 + static void 2323 2381 qla2x00_remove_one(struct pci_dev *pdev) 2324 2382 { 2325 2383 scsi_qla_host_t *base_vha, *vha; ··· 2611 2597 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2612 2598 continue; 2613 2599 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2600 + atomic_set(&fcport->state, FCS_DEVICE_LOST); 2614 2601 if (defer) 2615 2602 qla2x00_schedule_rport_del(vha, fcport, defer); 2616 2603 else if (vha->vp_idx == fcport->vp_idx) 2617 2604 qla2x00_schedule_rport_del(vha, fcport, defer); 2618 2605 } 2619 - atomic_set(&fcport->state, FCS_DEVICE_LOST); 2620 2606 } 2621 2607 } 2622 2608 ··· 2844 2830 } 2845 2831 2846 2832 /* 2833 + * qla2x00_free_fw_dump 2834 + * Frees fw dump stuff. 2835 + * 2836 + * Input: 2837 + * ha = adapter block pointer. 2838 + */ 2839 + static void 2840 + qla2x00_free_fw_dump(struct qla_hw_data *ha) 2841 + { 2842 + if (ha->fce) 2843 + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2844 + ha->fce_dma); 2845 + 2846 + if (ha->fw_dump) { 2847 + if (ha->eft) 2848 + dma_free_coherent(&ha->pdev->dev, 2849 + ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2850 + vfree(ha->fw_dump); 2851 + } 2852 + ha->fce = NULL; 2853 + ha->fce_dma = 0; 2854 + ha->eft = NULL; 2855 + ha->eft_dma = 0; 2856 + ha->fw_dump = NULL; 2857 + ha->fw_dumped = 0; 2858 + ha->fw_dump_reading = 0; 2859 + } 2860 + 2861 + /* 2847 2862 * qla2x00_mem_free 2848 2863 * Frees all adapter allocated memory. 2849 2864 * ··· 2882 2839 static void 2883 2840 qla2x00_mem_free(struct qla_hw_data *ha) 2884 2841 { 2842 + qla2x00_free_fw_dump(ha); 2843 + 2885 2844 if (ha->srb_mempool) 2886 2845 mempool_destroy(ha->srb_mempool); 2887 - 2888 - if (ha->fce) 2889 - dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2890 - ha->fce_dma); 2891 - 2892 - if (ha->fw_dump) { 2893 - if (ha->eft) 2894 - dma_free_coherent(&ha->pdev->dev, 2895 - ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2896 - vfree(ha->fw_dump); 2897 - } 2898 2846 2899 2847 if (ha->dcbx_tlv) 2900 2848 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, ··· 2959 2925 2960 2926 ha->srb_mempool = NULL; 2961 2927 ha->ctx_mempool = NULL; 2962 - ha->eft = NULL; 2963 - ha->eft_dma = 0; 2964 2928 ha->sns_cmd = NULL; 2965 2929 ha->sns_cmd_dma = 0; 2966 2930 ha->ct_sns = NULL; ··· 2978 2946 2979 2947 ha->gid_list = NULL; 2980 2948 ha->gid_list_dma = 0; 2981 - 2982 - ha->fw_dump = NULL; 2983 - ha->fw_dumped = 0; 2984 - ha->fw_dump_reading = 0; 2985 2949 } 2986 2950 2987 2951 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, ··· 3575 3547 qla2x00_timer(scsi_qla_host_t *vha) 3576 3548 { 3577 3549 unsigned long cpu_flags = 0; 3578 - fc_port_t *fcport; 3579 3550 int start_dpc = 0; 3580 3551 int index; 3581 3552 srb_t *sp; 3582 - int t; 3583 3553 uint16_t w; 3584 3554 struct qla_hw_data *ha = vha->hw; 3585 3555 struct req_que *req; ··· 3593 3567 /* Hardware read to raise pending EEH errors during mailbox waits. */ 3594 3568 if (!pci_channel_offline(ha->pdev)) 3595 3569 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 3596 - /* 3597 - * Ports - Port down timer. 3598 - * 3599 - * Whenever, a port is in the LOST state we start decrementing its port 3600 - * down timer every second until it reaches zero. Once it reaches zero 3601 - * the port it marked DEAD. 3602 - */ 3603 - t = 0; 3604 - list_for_each_entry(fcport, &vha->vp_fcports, list) { 3605 - if (fcport->port_type != FCT_TARGET) 3606 - continue; 3607 - 3608 - if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 3609 - 3610 - if (atomic_read(&fcport->port_down_timer) == 0) 3611 - continue; 3612 - 3613 - if (atomic_dec_and_test(&fcport->port_down_timer) != 0) 3614 - atomic_set(&fcport->state, FCS_DEVICE_DEAD); 3615 - 3616 - DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 3617 - "%d remaining\n", 3618 - vha->host_no, 3619 - t, atomic_read(&fcport->port_down_timer))); 3620 - } 3621 - t++; 3622 - } /* End of for fcport */ 3623 - 3624 3570 3625 3571 /* Loop down handler. */ 3626 3572 if (atomic_read(&vha->loop_down_timer) > 0 && ··· 4077 4079 .id_table = qla2xxx_pci_tbl, 4078 4080 .probe = qla2x00_probe_one, 4079 4081 .remove = qla2x00_remove_one, 4082 + .shutdown = qla2x00_shutdown, 4080 4083 .err_handler = &qla2xxx_err_handler, 4081 4084 }; 4082 4085
+101
drivers/scsi/qla4xxx/ql4_dbg.c
··· 30 30 printk(KERN_INFO "\n"); 31 31 } 32 32 33 + void qla4xxx_dump_registers(struct scsi_qla_host *ha) 34 + { 35 + uint8_t i; 36 + 37 + if (is_qla8022(ha)) { 38 + for (i = 1; i < MBOX_REG_COUNT; i++) 39 + printk(KERN_INFO "mailbox[%d] = 0x%08X\n", 40 + i, readl(&ha->qla4_8xxx_reg->mailbox_in[i])); 41 + return; 42 + } 43 + 44 + for (i = 0; i < MBOX_REG_COUNT; i++) { 45 + printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n", 46 + (uint8_t) offsetof(struct isp_reg, mailbox[i]), i, 47 + readw(&ha->reg->mailbox[i])); 48 + } 49 + 50 + printk(KERN_INFO "0x%02X flash_address = 0x%08X\n", 51 + (uint8_t) offsetof(struct isp_reg, flash_address), 52 + readw(&ha->reg->flash_address)); 53 + printk(KERN_INFO "0x%02X flash_data = 0x%08X\n", 54 + (uint8_t) offsetof(struct isp_reg, flash_data), 55 + readw(&ha->reg->flash_data)); 56 + printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n", 57 + (uint8_t) offsetof(struct isp_reg, ctrl_status), 58 + readw(&ha->reg->ctrl_status)); 59 + 60 + if (is_qla4010(ha)) { 61 + printk(KERN_INFO "0x%02X nvram = 0x%08X\n", 62 + (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram), 63 + readw(&ha->reg->u1.isp4010.nvram)); 64 + } else if (is_qla4022(ha) | is_qla4032(ha)) { 65 + printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", 66 + (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask), 67 + readw(&ha->reg->u1.isp4022.intr_mask)); 68 + printk(KERN_INFO "0x%02X nvram = 0x%08X\n", 69 + (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram), 70 + readw(&ha->reg->u1.isp4022.nvram)); 71 + printk(KERN_INFO "0x%02X semaphore = 0x%08X\n", 72 + (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore), 73 + readw(&ha->reg->u1.isp4022.semaphore)); 74 + } 75 + printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n", 76 + (uint8_t) offsetof(struct isp_reg, req_q_in), 77 + readw(&ha->reg->req_q_in)); 78 + printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n", 79 + (uint8_t) offsetof(struct isp_reg, rsp_q_out), 80 + readw(&ha->reg->rsp_q_out)); 81 + 82 + if (is_qla4010(ha)) { 83 + printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", 84 + (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf), 85 + readw(&ha->reg->u2.isp4010.ext_hw_conf)); 86 + printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", 87 + (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl), 88 + readw(&ha->reg->u2.isp4010.port_ctrl)); 89 + printk(KERN_INFO "0x%02X port_status = 0x%08X\n", 90 + (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status), 91 + readw(&ha->reg->u2.isp4010.port_status)); 92 + printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", 93 + (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out), 94 + readw(&ha->reg->u2.isp4010.req_q_out)); 95 + printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", 96 + (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out), 97 + readw(&ha->reg->u2.isp4010.gp_out)); 98 + printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", 99 + (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in), 100 + readw(&ha->reg->u2.isp4010.gp_in)); 101 + printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) 102 + offsetof(struct isp_reg, u2.isp4010.port_err_status), 103 + readw(&ha->reg->u2.isp4010.port_err_status)); 104 + } else if (is_qla4022(ha) | is_qla4032(ha)) { 105 + printk(KERN_INFO "Page 0 Registers:\n"); 106 + printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t) 107 + offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), 108 + readw(&ha->reg->u2.isp4022.p0.ext_hw_conf)); 109 + printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t) 110 + offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), 111 + readw(&ha->reg->u2.isp4022.p0.port_ctrl)); 112 + printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t) 113 + offsetof(struct isp_reg, u2.isp4022.p0.port_status), 114 + readw(&ha->reg->u2.isp4022.p0.port_status)); 115 + printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", 116 + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), 117 + readw(&ha->reg->u2.isp4022.p0.gp_out)); 118 + printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", 119 + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), 120 + readw(&ha->reg->u2.isp4022.p0.gp_in)); 121 + printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) 122 + offsetof(struct isp_reg, u2.isp4022.p0.port_err_status), 123 + readw(&ha->reg->u2.isp4022.p0.port_err_status)); 124 + printk(KERN_INFO "Page 1 Registers:\n"); 125 + writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), 126 + &ha->reg->ctrl_status); 127 + printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", 128 + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out), 129 + readw(&ha->reg->u2.isp4022.p1.req_q_out)); 130 + writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), 131 + &ha->reg->ctrl_status); 132 + } 133 + }
+2 -18
drivers/scsi/qla4xxx/ql4_def.h
··· 24 24 #include <linux/delay.h> 25 25 #include <linux/interrupt.h> 26 26 #include <linux/mutex.h> 27 + #include <linux/aer.h> 27 28 28 29 #include <net/tcp.h> 29 30 #include <scsi/scsi.h> ··· 36 35 37 36 #include "ql4_dbg.h" 38 37 #include "ql4_nx.h" 39 - 40 - #if defined(CONFIG_PCIEAER) 41 - #include <linux/aer.h> 42 - #else 43 - /* AER releated */ 44 - static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) 45 - { 46 - return -EINVAL; 47 - } 48 - static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) 49 - { 50 - return -EINVAL; 51 - } 52 - static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 53 - { 54 - return -EINVAL; 55 - } 56 - #endif 57 38 58 39 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 59 40 #define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 ··· 162 179 #define IOCB_TOV_MARGIN 10 163 180 #define RELOGIN_TOV 18 164 181 #define ISNS_DEREG_TOV 5 182 + #define HBA_ONLINE_TOV 30 165 183 166 184 #define MAX_RESET_HA_RETRIES 2 167 185
+3
drivers/scsi/qla4xxx/ql4_fw.h
··· 416 416 #define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C 417 417 #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 418 418 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 419 + #define MBOX_ASTS_TXSCVR_INSERTED 0x8130 420 + #define MBOX_ASTS_TXSCVR_REMOVED 0x8131 419 421 420 422 #define ISNS_EVENT_DATA_RECEIVED 0x0000 421 423 #define ISNS_EVENT_CONNECTION_OPENED 0x0001 ··· 448 446 #define FWOPT_SESSION_MODE 0x0040 449 447 #define FWOPT_INITIATOR_MODE 0x0020 450 448 #define FWOPT_TARGET_MODE 0x0010 449 + #define FWOPT_ENABLE_CRBDB 0x8000 451 450 452 451 uint16_t exec_throttle; /* 04-05 */ 453 452 uint8_t zio_count; /* 06 */
+1
drivers/scsi/qla4xxx/ql4_glbl.h
··· 94 94 void qla4xxx_wake_dpc(struct scsi_qla_host *ha); 95 95 void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); 96 96 void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); 97 + void qla4xxx_dump_registers(struct scsi_qla_host *ha); 97 98 98 99 void qla4_8xxx_pci_config(struct scsi_qla_host *); 99 100 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
+8 -2
drivers/scsi/qla4xxx/ql4_init.c
··· 1207 1207 break; 1208 1208 1209 1209 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot " 1210 - "firmware to complete... ctrl_sts=0x%x\n", 1211 - ha->host_no, __func__, ctrl_status)); 1210 + "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n", 1211 + ha->host_no, __func__, ctrl_status, max_wait_time)); 1212 1212 1213 1213 msleep_interruptible(250); 1214 1214 } while (!time_after_eq(jiffies, max_wait_time)); ··· 1459 1459 exit_init_online: 1460 1460 set_bit(AF_ONLINE, &ha->flags); 1461 1461 exit_init_hba: 1462 + if (is_qla8022(ha) && (status == QLA_ERROR)) { 1463 + /* Since interrupts are registered in start_firmware for 1464 + * 82xx, release them here if initialize_adapter fails */ 1465 + qla4xxx_free_irqs(ha); 1466 + } 1467 + 1462 1468 DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, 1463 1469 status == QLA_ERROR ? "FAILED" : "SUCCEDED")); 1464 1470 return status;
+1 -9
drivers/scsi/qla4xxx/ql4_iocb.c
··· 202 202 void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha) 203 203 { 204 204 uint32_t dbval = 0; 205 - unsigned long wtime; 206 205 207 206 dbval = 0x14 | (ha->func_num << 5); 208 207 dbval = dbval | (0 << 8) | (ha->request_in << 16); 209 - writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr); 210 - wmb(); 211 208 212 - wtime = jiffies + (2 * HZ); 213 - while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval && 214 - !time_after_eq(jiffies, wtime)) { 215 - writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr); 216 - wmb(); 217 - } 209 + qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in); 218 210 } 219 211 220 212 /**
+15 -1
drivers/scsi/qla4xxx/ql4_isr.c
··· 72 72 { 73 73 struct srb *srb = ha->status_srb; 74 74 struct scsi_cmnd *cmd; 75 - uint8_t sense_len; 75 + uint16_t sense_len; 76 76 77 77 if (srb == NULL) 78 78 return; ··· 487 487 case MBOX_ASTS_SYSTEM_ERROR: 488 488 /* Log Mailbox registers */ 489 489 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); 490 + qla4xxx_dump_registers(ha); 491 + 490 492 if (ql4xdontresethba) { 491 493 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", 492 494 ha->host_no, __func__)); ··· 621 619 mbox_sts[3])); 622 620 } 623 621 } 622 + break; 623 + 624 + case MBOX_ASTS_TXSCVR_INSERTED: 625 + DEBUG2(printk(KERN_WARNING 626 + "scsi%ld: AEN %04x Transceiver" 627 + " inserted\n", ha->host_no, mbox_sts[0])); 628 + break; 629 + 630 + case MBOX_ASTS_TXSCVR_REMOVED: 631 + DEBUG2(printk(KERN_WARNING 632 + "scsi%ld: AEN %04x Transceiver" 633 + " removed\n", ha->host_no, mbox_sts[0])); 624 634 break; 625 635 626 636 default:
+10 -1
drivers/scsi/qla4xxx/ql4_mbx.c
··· 299 299 { 300 300 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); 301 301 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); 302 + 303 + if (is_qla8022(ha)) 304 + qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0); 305 + 302 306 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 303 307 mbox_cmd[1] = 0; 304 308 mbox_cmd[2] = LSDW(init_fw_cb_dma); ··· 476 472 init_fw_cb->fw_options |= 477 473 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 478 474 FWOPT_INITIATOR_MODE); 475 + 476 + if (is_qla8022(ha)) 477 + init_fw_cb->fw_options |= 478 + __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB); 479 + 479 480 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 480 481 481 482 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) ··· 601 592 } 602 593 603 594 ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n", 604 - ha->host_no, mbox_cmd[2]); 595 + ha->host_no, mbox_sts[2]); 605 596 606 597 return QLA_SUCCESS; 607 598 }
+50 -39
drivers/scsi/qla4xxx/ql4_nx.c
··· 839 839 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 840 840 if (done == 1) 841 841 break; 842 - if (timeout >= qla4_8xxx_rom_lock_timeout) 842 + if (timeout >= qla4_8xxx_rom_lock_timeout) { 843 + ql4_printk(KERN_WARNING, ha, 844 + "%s: Failed to acquire rom lock", __func__); 843 845 return -1; 846 + } 844 847 845 848 timeout++; 846 849 ··· 1078 1075 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); 1079 1076 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); 1080 1077 1081 - return 0; 1082 - } 1083 - 1084 - static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha) 1085 - { 1086 - u32 val = 0; 1087 - val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ; 1088 - val &= QLA82XX_BOOT_LOADER_MN_ISSUE; 1089 - if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) { 1090 - printk("Memory DIMM SPD not programmed. Assumed valid.\n"); 1091 - return 1; 1092 - } else if (val) { 1093 - printk("Memory DIMM type incorrect. Info:%08X.\n", val); 1094 - return 2; 1095 - } 1096 1078 return 0; 1097 1079 } 1098 1080 ··· 1365 1377 1366 1378 } while (--retries); 1367 1379 1368 - qla4_8xxx_check_for_bad_spd(ha); 1369 - 1370 1380 if (!retries) { 1371 1381 pegtune_val = qla4_8xxx_rd_32(ha, 1372 1382 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); ··· 1526 1540 ql4_printk(KERN_INFO, ha, 1527 1541 "FW: Attempting to load firmware from flash...\n"); 1528 1542 rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw); 1529 - if (rval == QLA_SUCCESS) 1530 - return rval; 1531 1543 1532 - ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n"); 1544 + if (rval != QLA_SUCCESS) { 1545 + ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash" 1546 + " FAILED...\n"); 1547 + return rval; 1548 + } 1533 1549 1534 1550 return rval; 1551 + } 1552 + 1553 + static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha) 1554 + { 1555 + if (qla4_8xxx_rom_lock(ha)) { 1556 + /* Someone else is holding the lock. */ 1557 + dev_info(&ha->pdev->dev, "Resetting rom_lock\n"); 1558 + } 1559 + 1560 + /* 1561 + * Either we got the lock, or someone 1562 + * else died while holding it. 1563 + * In either case, unlock. 1564 + */ 1565 + qla4_8xxx_rom_unlock(ha); 1535 1566 } 1536 1567 1537 1568 /** ··· 1560 1557 static int 1561 1558 qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) 1562 1559 { 1563 - int rval, i, timeout; 1560 + int rval = QLA_ERROR; 1561 + int i, timeout; 1564 1562 uint32_t old_count, count; 1563 + int need_reset = 0, peg_stuck = 1; 1565 1564 1566 - if (qla4_8xxx_need_reset(ha)) 1567 - goto dev_initialize; 1565 + need_reset = qla4_8xxx_need_reset(ha); 1568 1566 1569 1567 old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 1570 1568 ··· 1574 1570 if (timeout) { 1575 1571 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 1576 1572 QLA82XX_DEV_FAILED); 1577 - return QLA_ERROR; 1573 + return rval; 1578 1574 } 1579 1575 1580 1576 count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 1581 1577 if (count != old_count) 1578 + peg_stuck = 0; 1579 + } 1580 + 1581 + if (need_reset) { 1582 + /* We are trying to perform a recovery here. */ 1583 + if (peg_stuck) 1584 + qla4_8xxx_rom_lock_recovery(ha); 1585 + goto dev_initialize; 1586 + } else { 1587 + /* Start of day for this ha context. */ 1588 + if (peg_stuck) { 1589 + /* Either we are the first or recovery in progress. */ 1590 + qla4_8xxx_rom_lock_recovery(ha); 1591 + goto dev_initialize; 1592 + } else { 1593 + /* Firmware already running. */ 1594 + rval = QLA_SUCCESS; 1582 1595 goto dev_ready; 1596 + } 1583 1597 } 1584 1598 1585 1599 dev_initialize: ··· 1623 1601 ql4_printk(KERN_INFO, ha, "HW State: READY\n"); 1624 1602 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 1625 1603 1626 - return QLA_SUCCESS; 1604 + return rval; 1627 1605 } 1628 1606 1629 1607 /** ··· 1786 1764 int retval; 1787 1765 retval = qla4_8xxx_device_state_handler(ha); 1788 1766 1789 - if (retval == QLA_SUCCESS && 1790 - !test_bit(AF_INIT_DONE, &ha->flags)) { 1767 + if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) 1791 1768 retval = qla4xxx_request_irqs(ha); 1792 - if (retval != QLA_SUCCESS) { 1793 - ql4_printk(KERN_WARNING, ha, 1794 - "Failed to reserve interrupt %d already in use.\n", 1795 - ha->pdev->irq); 1796 - } else { 1797 - set_bit(AF_IRQ_ATTACHED, &ha->flags); 1798 - ha->host->irq = ha->pdev->irq; 1799 - ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", 1800 - __func__, ha->pdev->irq); 1801 - } 1802 - } 1769 + 1803 1770 return retval; 1804 1771 } 1805 1772
+2 -3
drivers/scsi/qla4xxx/ql4_nx.h
··· 24 24 25 25 #define CRB_CMDPEG_STATE QLA82XX_REG(0x50) 26 26 #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 27 - #define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) 28 27 #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 29 28 30 29 #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 ··· 528 529 # define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) 529 530 # define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) 530 531 531 - #define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000 532 - #define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff 533 532 #define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) 534 533 #define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) 535 534 #define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) 536 535 #define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0)) 536 + #define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0)) 537 + #define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4)) 537 538 538 539 #define HALT_STATUS_UNRECOVERABLE 0x80000000 539 540 #define HALT_STATUS_RECOVERABLE 0x40000000
+57 -52
drivers/scsi/qla4xxx/ql4_os.c
··· 167 167 "of (%d) secs exhausted, marking device DEAD.\n", 168 168 ha->host_no, __func__, ddb_entry->fw_ddb_index, 169 169 QL4_SESS_RECOVERY_TMO)); 170 - 171 - qla4xxx_wake_dpc(ha); 172 170 } 173 171 } 174 172 ··· 571 573 if (ha->nx_pcibase) 572 574 iounmap( 573 575 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 574 - 575 - if (ha->nx_db_wr_ptr) 576 - iounmap( 577 - (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr); 578 576 } else if (ha->reg) 579 577 iounmap(ha->reg); 580 578 pci_release_regions(ha->pdev); ··· 686 692 qla4xxx_wake_dpc(ha); 687 693 qla4xxx_mailbox_premature_completion(ha); 688 694 } 689 - } 695 + } else 696 + ha->seconds_since_last_heartbeat = 0; 697 + 690 698 ha->fw_heartbeat_counter = fw_heartbeat_counter; 691 699 } 692 700 ··· 881 885 /* Find a command that hasn't completed. */ 882 886 for (index = 0; index < ha->host->can_queue; index++) { 883 887 cmd = scsi_host_find_tag(ha->host, index); 884 - if (cmd != NULL) 888 + /* 889 + * We cannot just check if the index is valid, 890 + * becase if we are run from the scsi eh, then 891 + * the scsi/block layer is going to prevent 892 + * the tag from being released. 893 + */ 894 + if (cmd != NULL && CMD_SP(cmd)) 885 895 break; 886 896 } 887 897 spin_unlock_irqrestore(&ha->hardware_lock, flags); ··· 939 937 { 940 938 uint32_t max_wait_time; 941 939 unsigned long flags = 0; 942 - int status = QLA_ERROR; 940 + int status; 943 941 uint32_t ctrl_status; 944 942 945 - qla4xxx_hw_reset(ha); 943 + status = qla4xxx_hw_reset(ha); 944 + if (status != QLA_SUCCESS) 945 + return status; 946 946 947 + status = QLA_ERROR; 947 948 /* Wait until the Network Reset Intr bit is cleared */ 948 949 max_wait_time = RESET_INTR_TOV; 949 950 do { ··· 1106 1101 ha->host_no, __func__)); 1107 1102 status = ha->isp_ops->reset_firmware(ha); 1108 1103 if (status == QLA_SUCCESS) { 1109 - qla4xxx_cmd_wait(ha); 1104 + if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 1105 + qla4xxx_cmd_wait(ha); 1110 1106 ha->isp_ops->disable_intrs(ha); 1111 1107 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1112 1108 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); ··· 1124 1118 * or if stop_firmware fails for ISP-82xx. 1125 1119 * This is the default case for ISP-4xxx */ 1126 1120 if (!is_qla8022(ha) || reset_chip) { 1127 - qla4xxx_cmd_wait(ha); 1121 + if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 1122 + qla4xxx_cmd_wait(ha); 1128 1123 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1129 1124 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 1130 1125 DEBUG2(ql4_printk(KERN_INFO, ha, ··· 1478 1471 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 1479 1472 db_len = pci_resource_len(pdev, 4); 1480 1473 1481 - /* mapping of doorbell write pointer */ 1482 - ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base + 1483 - (ha->pdev->devfn << 12), 4); 1484 - if (!ha->nx_db_wr_ptr) { 1485 - printk(KERN_ERR 1486 - "cannot remap MMIO doorbell-write (%s), aborting\n", 1487 - pci_name(pdev)); 1488 - goto iospace_error_exit; 1489 - } 1490 - /* mapping of doorbell read pointer */ 1491 - ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + 1492 - (ha->pdev->devfn * 8); 1493 - if (!ha->nx_db_rd_ptr) 1494 - printk(KERN_ERR 1495 - "cannot remap MMIO doorbell-read (%s), aborting\n", 1496 - pci_name(pdev)); 1497 - return 0; 1474 + ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 1475 + QLA82XX_CAM_RAM_DB2); 1498 1476 1477 + return 0; 1499 1478 iospace_error_exit: 1500 1479 return -ENOMEM; 1501 1480 } ··· 1953 1960 { 1954 1961 unsigned long wait_online; 1955 1962 1956 - wait_online = jiffies + (30 * HZ); 1963 + wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 1957 1964 while (time_before(jiffies, wait_online)) { 1958 1965 1959 1966 if (adapter_up(ha)) 1960 1967 return QLA_SUCCESS; 1961 - else if (ha->retry_reset_ha_cnt == 0) 1962 - return QLA_ERROR; 1963 1968 1964 1969 msleep(2000); 1965 1970 } ··· 2012 2021 unsigned int id = cmd->device->id; 2013 2022 unsigned int lun = cmd->device->lun; 2014 2023 unsigned long serial = cmd->serial_number; 2024 + unsigned long flags; 2015 2025 struct srb *srb = NULL; 2016 2026 int ret = SUCCESS; 2017 2027 int wait = 0; ··· 2021 2029 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", 2022 2030 ha->host_no, id, lun, cmd, serial); 2023 2031 2032 + spin_lock_irqsave(&ha->hardware_lock, flags); 2024 2033 srb = (struct srb *) CMD_SP(cmd); 2025 - 2026 - if (!srb) 2034 + if (!srb) { 2035 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 2027 2036 return SUCCESS; 2028 - 2037 + } 2029 2038 kref_get(&srb->srb_ref); 2039 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 2030 2040 2031 2041 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 2032 2042 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n", ··· 2261 2267 qla4xxx_mailbox_premature_completion(ha); 2262 2268 qla4xxx_free_irqs(ha); 2263 2269 pci_disable_device(pdev); 2270 + /* Return back all IOs */ 2271 + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 2264 2272 return PCI_ERS_RESULT_NEED_RESET; 2265 2273 case pci_channel_io_perm_failure: 2266 2274 set_bit(AF_EEH_BUSY, &ha->flags); ··· 2286 2290 if (!is_aer_supported(ha)) 2287 2291 return PCI_ERS_RESULT_NONE; 2288 2292 2289 - if (test_bit(AF_FW_RECOVERY, &ha->flags)) { 2290 - ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- " 2291 - "mmio_enabled\n", ha->host_no, __func__); 2292 - return PCI_ERS_RESULT_NEED_RESET; 2293 - } else 2294 - return PCI_ERS_RESULT_RECOVERED; 2293 + return PCI_ERS_RESULT_RECOVERED; 2295 2294 } 2296 2295 2297 - uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 2296 + static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 2298 2297 { 2299 2298 uint32_t rval = QLA_ERROR; 2299 + uint32_t ret = 0; 2300 2300 int fn; 2301 2301 struct pci_dev *other_pdev = NULL; 2302 2302 ··· 2304 2312 clear_bit(AF_ONLINE, &ha->flags); 2305 2313 qla4xxx_mark_all_devices_missing(ha); 2306 2314 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 2307 - qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 2308 2315 } 2309 2316 2310 2317 fn = PCI_FUNC(ha->pdev->devfn); ··· 2366 2375 /* Clear driver state register */ 2367 2376 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 2368 2377 qla4_8xxx_set_drv_active(ha); 2369 - ha->isp_ops->enable_intrs(ha); 2378 + ret = qla4xxx_request_irqs(ha); 2379 + if (ret) { 2380 + ql4_printk(KERN_WARNING, ha, "Failed to " 2381 + "reserve interrupt %d already in use.\n", 2382 + ha->pdev->irq); 2383 + rval = QLA_ERROR; 2384 + } else { 2385 + ha->isp_ops->enable_intrs(ha); 2386 + rval = QLA_SUCCESS; 2387 + } 2370 2388 } 2371 2389 qla4_8xxx_idc_unlock(ha); 2372 2390 } else { ··· 2387 2387 clear_bit(AF_FW_RECOVERY, &ha->flags); 2388 2388 rval = qla4xxx_initialize_adapter(ha, 2389 2389 PRESERVE_DDB_LIST); 2390 - if (rval == QLA_SUCCESS) 2391 - ha->isp_ops->enable_intrs(ha); 2390 + if (rval == QLA_SUCCESS) { 2391 + ret = qla4xxx_request_irqs(ha); 2392 + if (ret) { 2393 + ql4_printk(KERN_WARNING, ha, "Failed to" 2394 + " reserve interrupt %d already in" 2395 + " use.\n", ha->pdev->irq); 2396 + rval = QLA_ERROR; 2397 + } else { 2398 + ha->isp_ops->enable_intrs(ha); 2399 + rval = QLA_SUCCESS; 2400 + } 2401 + } 2392 2402 qla4_8xxx_idc_lock(ha); 2393 2403 qla4_8xxx_set_drv_active(ha); 2394 2404 qla4_8xxx_idc_unlock(ha); ··· 2440 2430 goto exit_slot_reset; 2441 2431 } 2442 2432 2443 - ret = qla4xxx_request_irqs(ha); 2444 - if (ret) { 2445 - ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d" 2446 - " already in use.\n", pdev->irq); 2447 - goto exit_slot_reset; 2448 - } 2433 + ha->isp_ops->disable_intrs(ha); 2449 2434 2450 2435 if (is_qla8022(ha)) { 2451 2436 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
+1 -1
drivers/scsi/qla4xxx/ql4_version.h
··· 5 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 6 */ 7 7 8 - #define QLA4XXX_DRIVER_VERSION "5.02.00-k3" 8 + #define QLA4XXX_DRIVER_VERSION "5.02.00-k4"
+2 -1
drivers/scsi/scsi_lib.c
··· 2438 2438 sdev->sdev_state = SDEV_RUNNING; 2439 2439 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) 2440 2440 sdev->sdev_state = SDEV_CREATED; 2441 - else 2441 + else if (sdev->sdev_state != SDEV_CANCEL && 2442 + sdev->sdev_state != SDEV_OFFLINE) 2442 2443 return -EINVAL; 2443 2444 2444 2445 spin_lock_irqsave(q->queue_lock, flags);
+2 -1
drivers/scsi/scsi_sysfs.c
··· 964 964 list_for_each_entry(sdev, &shost->__devices, siblings) { 965 965 if (sdev->channel != starget->channel || 966 966 sdev->id != starget->id || 967 - sdev->sdev_state == SDEV_DEL) 967 + scsi_device_get(sdev)) 968 968 continue; 969 969 spin_unlock_irqrestore(shost->host_lock, flags); 970 970 scsi_remove_device(sdev); 971 + scsi_device_put(sdev); 971 972 spin_lock_irqsave(shost->host_lock, flags); 972 973 goto restart; 973 974 }
+23
drivers/scsi/sd.c
··· 259 259 } 260 260 261 261 static ssize_t 262 + sd_show_protection_mode(struct device *dev, struct device_attribute *attr, 263 + char *buf) 264 + { 265 + struct scsi_disk *sdkp = to_scsi_disk(dev); 266 + struct scsi_device *sdp = sdkp->device; 267 + unsigned int dif, dix; 268 + 269 + dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 270 + dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 271 + 272 + if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) { 273 + dif = 0; 274 + dix = 1; 275 + } 276 + 277 + if (!dif && !dix) 278 + return snprintf(buf, 20, "none\n"); 279 + 280 + return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif); 281 + } 282 + 283 + static ssize_t 262 284 sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, 263 285 char *buf) 264 286 { ··· 307 285 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 308 286 sd_store_manage_start_stop), 309 287 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 288 + __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), 310 289 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 311 290 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), 312 291 __ATTR_NULL,
+9
drivers/scsi/sr_ioctl.c
··· 325 325 } 326 326 327 327 /* 328 + * SK/ASC/ASCQ of 2/4/2 means "initialization required" 329 + * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close 330 + * the tray, which resolves the initialization requirement. 331 + */ 332 + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY 333 + && sshdr.asc == 0x04 && sshdr.ascq == 0x02) 334 + return CDS_TRAY_OPEN; 335 + 336 + /* 328 337 * 0x04 is format in progress .. but there must be a disc present! 329 338 */ 330 339 if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
+1 -1
include/scsi/libfc.h
··· 721 721 * struct fc_disc - Discovery context 722 722 * @retry_count: Number of retries 723 723 * @pending: 1 if discovery is pending, 0 if not 724 - * @requesting: 1 if discovery has been requested, 0 if not 724 + * @requested: 1 if discovery has been requested, 0 if not 725 725 * @seq_count: Number of sequences used for discovery 726 726 * @buf_len: Length of the discovery buffer 727 727 * @disc_id: Discovery ID
+15 -1
include/scsi/osd_initiator.h
··· 137 137 void *buff; 138 138 unsigned alloc_size; /* 0 here means: don't call kfree */ 139 139 unsigned total_bytes; 140 - } set_attr, enc_get_attr, get_attr; 140 + } cdb_cont, set_attr, enc_get_attr, get_attr; 141 141 142 142 struct _osd_io_info { 143 143 struct bio *bio; ··· 447 447 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len); 448 448 int osd_req_read_kern(struct osd_request *or, 449 449 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); 450 + 451 + /* Scatter/Gather write/read commands */ 452 + int osd_req_write_sg(struct osd_request *or, 453 + const struct osd_obj_id *obj, struct bio *bio, 454 + const struct osd_sg_entry *sglist, unsigned numentries); 455 + int osd_req_read_sg(struct osd_request *or, 456 + const struct osd_obj_id *obj, struct bio *bio, 457 + const struct osd_sg_entry *sglist, unsigned numentries); 458 + int osd_req_write_sg_kern(struct osd_request *or, 459 + const struct osd_obj_id *obj, void **buff, 460 + const struct osd_sg_entry *sglist, unsigned numentries); 461 + int osd_req_read_sg_kern(struct osd_request *or, 462 + const struct osd_obj_id *obj, void **buff, 463 + const struct osd_sg_entry *sglist, unsigned numentries); 450 464 451 465 /* 452 466 * Root/Partition/Collection/Object Attributes commands
+42
include/scsi/osd_protocol.h
··· 631 631 put_unaligned_le16(bit_mask, &cap->permissions_bit_mask); 632 632 } 633 633 634 + /* osd2r05a sec 5.3: CDB continuation segment formats */ 635 + enum osd_continuation_segment_format { 636 + CDB_CONTINUATION_FORMAT_V2 = 0x01, 637 + }; 638 + 639 + struct osd_continuation_segment_header { 640 + u8 format; 641 + u8 reserved1; 642 + __be16 service_action; 643 + __be32 reserved2; 644 + u8 integrity_check[OSDv2_CRYPTO_KEYID_SIZE]; 645 + } __packed; 646 + 647 + /* osd2r05a sec 5.4.1: CDB continuation descriptors */ 648 + enum osd_continuation_descriptor_type { 649 + NO_MORE_DESCRIPTORS = 0x0000, 650 + SCATTER_GATHER_LIST = 0x0001, 651 + QUERY_LIST = 0x0002, 652 + USER_OBJECT = 0x0003, 653 + COPY_USER_OBJECT_SOURCE = 0x0101, 654 + EXTENSION_CAPABILITIES = 0xFFEE 655 + }; 656 + 657 + struct osd_continuation_descriptor_header { 658 + __be16 type; 659 + u8 reserved; 660 + u8 pad_length; 661 + __be32 length; 662 + } __packed; 663 + 664 + 665 + /* osd2r05a sec 5.4.2: Scatter/gather list */ 666 + struct osd_sg_list_entry { 667 + __be64 offset; 668 + __be64 len; 669 + }; 670 + 671 + struct osd_sg_continuation_descriptor { 672 + struct osd_continuation_descriptor_header hdr; 673 + struct osd_sg_list_entry entries[]; 674 + }; 675 + 634 676 #endif /* ndef __OSD_PROTOCOL_H__ */
+5
include/scsi/osd_types.h
··· 37 37 void *val_ptr; /* in network order */ 38 38 }; 39 39 40 + struct osd_sg_entry { 41 + u64 offset; 42 + u64 len; 43 + }; 44 + 40 45 #endif /* ndef __OSD_TYPES_H__ */