Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] fnic: Add FIP support to the fnic driver

Use libfcoe as a common FIP implementation with fcoe.
FIP or non-FIP mode is fully automatic if the firmware
supports and enables it.

Even if FIP is not supported, this uses libfcoe for the non-FIP
handling of FLOGI and its response.

Use the new lport_set_port_id() notification to capture
successful FLOGI responses and port_id resets.

While transitioning between Ethernet and FC mode, all rx and
tx FC frames are queued. In Ethernet mode, all frames are
passed to the exchange manager to capture FLOGI responses.

Change to set data_src_addr to the ctl_src_addr whenever it
would have previously been zero because we're not logged in.
This seems safer so we'll never send a frame with a 0 source MAC.
This also eliminates a special case for sending FLOGI frames.

Signed-off-by: Joe Eykholt <jeykholt@cisco.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by

Joe Eykholt and committed by
James Bottomley
78112e55 386309ce

+385 -345
+1 -1
drivers/scsi/Kconfig
··· 663 663 config FCOE_FNIC 664 664 tristate "Cisco FNIC Driver" 665 665 depends on PCI && X86 666 - select LIBFC 666 + select LIBFCOE 667 667 help 668 668 This is support for the Cisco PCI-Express FCoE HBA. 669 669
+14 -9
drivers/scsi/fnic/fnic.h
··· 22 22 #include <linux/netdevice.h> 23 23 #include <linux/workqueue.h> 24 24 #include <scsi/libfc.h> 25 + #include <scsi/libfcoe.h> 25 26 #include "fnic_io.h" 26 27 #include "fnic_res.h" 27 28 #include "vnic_dev.h" ··· 146 145 /* Per-instance private data structure */ 147 146 struct fnic { 148 147 struct fc_lport *lport; 148 + struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ 149 149 struct vnic_dev_bar bar0; 150 150 151 151 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; ··· 164 162 unsigned int wq_count; 165 163 unsigned int cq_count; 166 164 167 - u32 fcoui_mode:1; /* use fcoui address*/ 168 165 u32 vlan_hw_insert:1; /* let hw insert the tag */ 169 166 u32 in_remove:1; /* fnic device in removal */ 170 167 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ 171 168 172 169 struct completion *remove_wait; /* device remove thread blocks */ 173 170 174 - struct fc_frame *flogi; 175 - struct fc_frame *flogi_resp; 176 - u16 flogi_oxid; 177 - unsigned long s_id; 178 171 enum fnic_state state; 179 172 spinlock_t fnic_lock; 180 173 181 174 u16 vlan_id; /* VLAN tag including priority */ 182 - u8 mac_addr[ETH_ALEN]; 183 - u8 dest_addr[ETH_ALEN]; 184 175 u8 data_src_addr[ETH_ALEN]; 185 176 u64 fcp_input_bytes; /* internal statistic */ 186 177 u64 fcp_output_bytes; /* internal statistic */ ··· 200 205 struct work_struct link_work; 201 206 struct work_struct frame_work; 202 207 struct sk_buff_head frame_queue; 208 + struct sk_buff_head tx_queue; 203 209 204 210 /* copy work queue cache line section */ 205 211 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; ··· 220 224 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; 221 225 }; 222 226 227 + static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) 228 + { 229 + return container_of(fip, struct fnic, ctlr); 230 + } 231 + 223 232 extern struct workqueue_struct *fnic_event_queue; 224 233 extern struct device_attribute *fnic_attrs[]; 225 234 ··· 240 239 int fnic_rq_cmpl_handler(struct fnic *fnic, int); 241 240 int fnic_alloc_rq_frame(struct vnic_rq *rq); 242 241 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); 243 - int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); 242 + void fnic_flush_tx(struct fnic *); 243 + void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); 244 + void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); 245 + void fnic_update_mac(struct fc_lport *, u8 *new); 246 + void fnic_update_mac_locked(struct fnic *, u8 *new); 244 247 245 248 int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); 246 249 int fnic_abort_cmd(struct scsi_cmnd *); ··· 257 252 void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); 258 253 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); 259 254 int fnic_wq_cmpl_handler(struct fnic *fnic, int); 260 - int fnic_flogi_reg_handler(struct fnic *fnic); 255 + int fnic_flogi_reg_handler(struct fnic *fnic, u32); 261 256 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, 262 257 struct fcpio_host_req *desc); 263 258 int fnic_fw_reset_handler(struct fnic *fnic);
+241 -264
drivers/scsi/fnic/fnic_fcs.c
··· 23 23 #include <linux/if_ether.h> 24 24 #include <linux/if_vlan.h> 25 25 #include <linux/workqueue.h> 26 + #include <scsi/fc/fc_fip.h> 26 27 #include <scsi/fc/fc_els.h> 27 28 #include <scsi/fc/fc_fcoe.h> 28 29 #include <scsi/fc_frame.h> ··· 34 33 #include "cq_exch_desc.h" 35 34 36 35 struct workqueue_struct *fnic_event_queue; 36 + 37 + static void fnic_set_eth_mode(struct fnic *); 37 38 38 39 void fnic_handle_link(struct work_struct *work) 39 40 { ··· 67 64 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 68 65 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 69 66 "link down\n"); 70 - fc_linkdown(fnic->lport); 67 + fcoe_ctlr_link_down(&fnic->ctlr); 71 68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 72 69 "link up\n"); 73 - fc_linkup(fnic->lport); 70 + fcoe_ctlr_link_up(&fnic->ctlr); 74 71 } else 75 72 /* UP -> UP */ 76 73 spin_unlock_irqrestore(&fnic->fnic_lock, flags); ··· 79 76 /* DOWN -> UP */ 80 77 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 81 78 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 82 - fc_linkup(fnic->lport); 79 + fcoe_ctlr_link_up(&fnic->ctlr); 83 80 } else { 84 81 /* UP -> DOWN */ 85 82 fnic->lport->host_stats.link_failure_count++; 86 83 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 87 84 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); 88 - fc_linkdown(fnic->lport); 85 + fcoe_ctlr_link_down(&fnic->ctlr); 89 86 } 90 87 91 88 } ··· 110 107 return; 111 108 } 112 109 fp = (struct fc_frame *)skb; 113 - /* if Flogi resp frame, register the address */ 114 - if (fr_flags(fp)) { 115 - vnic_dev_add_addr(fnic->vdev, 116 - fnic->data_src_addr); 117 - fr_flags(fp) = 0; 110 + 111 + /* 112 + * If we're in a transitional state, just re-queue and return. 113 + * The queue will be serviced when we get to a stable state. 114 + */ 115 + if (fnic->state != FNIC_IN_FC_MODE && 116 + fnic->state != FNIC_IN_ETH_MODE) { 117 + skb_queue_head(&fnic->frame_queue, skb); 118 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 119 + return; 118 120 } 119 121 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 120 122 121 123 fc_exch_recv(lp, fp); 122 124 } 123 - 124 125 } 125 126 126 - static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, 127 - u32 len, u8 sof, u8 eof) 128 - { 129 - struct fc_frame *fp = (struct fc_frame *)skb; 130 - 131 - skb_trim(skb, len); 132 - fr_eof(fp) = eof; 133 - fr_sof(fp) = sof; 134 - } 135 - 136 - 137 - static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) 127 + /** 128 + * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. 129 + * @fnic: fnic instance. 130 + * @skb: Ethernet Frame. 131 + */ 132 + static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) 138 133 { 139 134 struct fc_frame *fp; 140 135 struct ethhdr *eh; 141 - struct vlan_ethhdr *vh; 142 136 struct fcoe_hdr *fcoe_hdr; 143 137 struct fcoe_crc_eof *ft; 144 - u32 transport_len = 0; 145 138 139 + /* 140 + * Undo VLAN encapsulation if present. 141 + */ 146 142 eh = (struct ethhdr *)skb->data; 147 - vh = (struct vlan_ethhdr *)skb->data; 148 - if (vh->h_vlan_proto == htons(ETH_P_8021Q) && 149 - vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { 150 - skb_pull(skb, sizeof(struct vlan_ethhdr)); 151 - transport_len += sizeof(struct vlan_ethhdr); 152 - } else if (eh->h_proto == htons(ETH_P_FCOE)) { 153 - transport_len += sizeof(struct ethhdr); 154 - skb_pull(skb, sizeof(struct ethhdr)); 155 - } else 156 - return -1; 143 + if (eh->h_proto == htons(ETH_P_8021Q)) { 144 + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); 145 + eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); 146 + skb_reset_mac_header(skb); 147 + } 148 + if (eh->h_proto == htons(ETH_P_FIP)) { 149 + skb_pull(skb, sizeof(*eh)); 150 + fcoe_ctlr_recv(&fnic->ctlr, skb); 151 + return 1; /* let caller know packet was used */ 152 + } 153 + if (eh->h_proto != htons(ETH_P_FCOE)) 154 + goto drop; 155 + skb_set_network_header(skb, sizeof(*eh)); 156 + skb_pull(skb, sizeof(*eh)); 157 157 158 158 fcoe_hdr = (struct fcoe_hdr *)skb->data; 159 159 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) 160 - return -1; 160 + goto drop; 161 161 162 162 fp = (struct fc_frame *)skb; 163 163 fc_frame_init(fp); 164 164 fr_sof(fp) = fcoe_hdr->fcoe_sof; 165 165 skb_pull(skb, sizeof(struct fcoe_hdr)); 166 - transport_len += sizeof(struct fcoe_hdr); 166 + skb_reset_transport_header(skb); 167 167 168 - ft = (struct fcoe_crc_eof *)(skb->data + len - 169 - transport_len - sizeof(*ft)); 168 + ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); 170 169 fr_eof(fp) = ft->fcoe_eof; 171 - skb_trim(skb, len - transport_len - sizeof(*ft)); 170 + skb_trim(skb, skb->len - sizeof(*ft)); 172 171 return 0; 172 + drop: 173 + dev_kfree_skb_irq(skb); 174 + return -1; 173 175 } 174 176 175 - static inline int fnic_handle_flogi_resp(struct fnic *fnic, 176 - struct fc_frame *fp) 177 + /** 178 + * fnic_update_mac_locked() - set data MAC address and filters. 179 + * @fnic: fnic instance. 180 + * @new: newly-assigned FCoE MAC address. 181 + * 182 + * Called with the fnic lock held. 183 + */ 184 + void fnic_update_mac_locked(struct fnic *fnic, u8 *new) 177 185 { 178 - u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; 179 - struct ethhdr *eth_hdr; 180 - struct fc_frame_header *fh; 181 - int ret = 0; 182 - unsigned long flags; 183 - struct fc_frame *old_flogi_resp = NULL; 186 + u8 *ctl = fnic->ctlr.ctl_src_addr; 187 + u8 *data = fnic->data_src_addr; 184 188 185 - fh = (struct fc_frame_header *)fr_hdr(fp); 189 + if (is_zero_ether_addr(new)) 190 + new = ctl; 191 + if (!compare_ether_addr(data, new)) 192 + return; 193 + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); 194 + if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) 195 + vnic_dev_del_addr(fnic->vdev, data); 196 + memcpy(data, new, ETH_ALEN); 197 + if (compare_ether_addr(new, ctl)) 198 + vnic_dev_add_addr(fnic->vdev, new); 199 + } 186 200 187 - spin_lock_irqsave(&fnic->fnic_lock, flags); 201 + /** 202 + * fnic_update_mac() - set data MAC address and filters. 203 + * @lport: local port. 204 + * @new: newly-assigned FCoE MAC address. 205 + */ 206 + void fnic_update_mac(struct fc_lport *lport, u8 *new) 207 + { 208 + struct fnic *fnic = lport_priv(lport); 188 209 189 - if (fnic->state == FNIC_IN_ETH_MODE) { 210 + spin_lock_irq(&fnic->fnic_lock); 211 + fnic_update_mac_locked(fnic, new); 212 + spin_unlock_irq(&fnic->fnic_lock); 213 + } 190 214 191 - /* 192 - * Check if oxid matches on taking the lock. A new Flogi 193 - * issued by libFC might have changed the fnic cached oxid 194 - */ 195 - if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { 196 - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 197 - "Flogi response oxid not" 198 - " matching cached oxid, dropping frame" 199 - "\n"); 200 - ret = -1; 201 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 202 - dev_kfree_skb_irq(fp_skb(fp)); 203 - goto handle_flogi_resp_end; 215 + /** 216 + * fnic_set_port_id() - set the port_ID after successful FLOGI. 217 + * @lport: local port. 218 + * @port_id: assigned FC_ID. 219 + * @fp: received frame containing the FLOGI accept or NULL. 220 + * 221 + * This is called from libfc when a new FC_ID has been assigned. 222 + * This causes us to reset the firmware to FC_MODE and setup the new MAC 223 + * address and FC_ID. 224 + * 225 + * It is also called with FC_ID 0 when we're logged off. 226 + * 227 + * If the FC_ID is due to point-to-point, fp may be NULL. 228 + */ 229 + void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) 230 + { 231 + struct fnic *fnic = lport_priv(lport); 232 + u8 *mac; 233 + int ret; 234 + 235 + FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", 236 + port_id, fp); 237 + 238 + /* 239 + * If we're clearing the FC_ID, change to use the ctl_src_addr. 240 + * Set ethernet mode to send FLOGI. 241 + */ 242 + if (!port_id) { 243 + fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); 244 + fnic_set_eth_mode(fnic); 245 + return; 246 + } 247 + 248 + if (fp) { 249 + mac = fr_cb(fp)->granted_mac; 250 + if (is_zero_ether_addr(mac)) { 251 + /* non-FIP - FLOGI already accepted - ignore return */ 252 + fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); 204 253 } 254 + fnic_update_mac(lport, mac); 255 + } 205 256 206 - /* Drop older cached flogi response frame, cache this frame */ 207 - old_flogi_resp = fnic->flogi_resp; 208 - fnic->flogi_resp = fp; 209 - fnic->flogi_oxid = FC_XID_UNKNOWN; 210 - 211 - /* 212 - * this frame is part of flogi get the src mac addr from this 213 - * frame if the src mac is fcoui based then we mark the 214 - * address mode flag to use fcoui base for dst mac addr 215 - * otherwise we have to store the fcoe gateway addr 216 - */ 217 - eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); 218 - memcpy(mac, eth_hdr->h_source, ETH_ALEN); 219 - 220 - if (ntoh24(mac) == FC_FCOE_OUI) 221 - fnic->fcoui_mode = 1; 222 - else { 223 - fnic->fcoui_mode = 0; 224 - memcpy(fnic->dest_addr, mac, ETH_ALEN); 225 - } 226 - 227 - /* 228 - * Except for Flogi frame, all outbound frames from us have the 229 - * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses 230 - * the vnic MAC address as the Eth Src address 231 - */ 232 - fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); 233 - 234 - /* We get our s_id from the d_id of the flogi resp frame */ 235 - fnic->s_id = ntoh24(fh->fh_d_id); 236 - 237 - /* Change state to reflect transition from Eth to FC mode */ 257 + /* Change state to reflect transition to FC mode */ 258 + spin_lock_irq(&fnic->fnic_lock); 259 + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) 238 260 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; 239 - 240 - } else { 261 + else { 241 262 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 242 263 "Unexpected fnic state %s while" 243 264 " processing flogi resp\n", 244 265 fnic_state_to_str(fnic->state)); 245 - ret = -1; 246 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 247 - dev_kfree_skb_irq(fp_skb(fp)); 248 - goto handle_flogi_resp_end; 266 + spin_unlock_irq(&fnic->fnic_lock); 267 + return; 249 268 } 250 - 251 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 252 - 253 - /* Drop older cached frame */ 254 - if (old_flogi_resp) 255 - dev_kfree_skb_irq(fp_skb(old_flogi_resp)); 269 + spin_unlock_irq(&fnic->fnic_lock); 256 270 257 271 /* 258 - * send flogi reg request to firmware, this will put the fnic in 259 - * in FC mode 272 + * Send FLOGI registration to firmware to set up FC mode. 273 + * The new address will be set up when registration completes. 260 274 */ 261 - ret = fnic_flogi_reg_handler(fnic); 275 + ret = fnic_flogi_reg_handler(fnic, port_id); 262 276 263 277 if (ret < 0) { 264 - int free_fp = 1; 265 - spin_lock_irqsave(&fnic->fnic_lock, flags); 266 - /* 267 - * free the frame is some other thread is not 268 - * pointing to it 269 - */ 270 - if (fnic->flogi_resp != fp) 271 - free_fp = 0; 272 - else 273 - fnic->flogi_resp = NULL; 274 - 278 + spin_lock_irq(&fnic->fnic_lock); 275 279 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) 276 280 fnic->state = FNIC_IN_ETH_MODE; 277 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 278 - if (free_fp) 279 - dev_kfree_skb_irq(fp_skb(fp)); 281 + spin_unlock_irq(&fnic->fnic_lock); 280 282 } 281 - 282 - handle_flogi_resp_end: 283 - return ret; 284 - } 285 - 286 - /* Returns 1 for a response that matches cached flogi oxid */ 287 - static inline int is_matching_flogi_resp_frame(struct fnic *fnic, 288 - struct fc_frame *fp) 289 - { 290 - struct fc_frame_header *fh; 291 - int ret = 0; 292 - u32 f_ctl; 293 - 294 - fh = fc_frame_header_get(fp); 295 - f_ctl = ntoh24(fh->fh_f_ctl); 296 - 297 - if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && 298 - fh->fh_r_ctl == FC_RCTL_ELS_REP && 299 - (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && 300 - fh->fh_type == FC_TYPE_ELS) 301 - ret = 1; 302 - 303 - return ret; 304 283 } 305 284 306 285 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc ··· 311 326 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 312 327 PCI_DMA_FROMDEVICE); 313 328 skb = buf->os_buf; 329 + fp = (struct fc_frame *)skb; 314 330 buf->os_buf = NULL; 315 331 316 332 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); ··· 324 338 &fcoe_enc_error, &fcs_ok, &vlan_stripped, 325 339 &vlan); 326 340 eth_hdrs_stripped = 1; 341 + skb_trim(skb, fcp_bytes_written); 342 + fr_sof(fp) = sof; 343 + fr_eof(fp) = eof; 327 344 328 345 } else if (type == CQ_DESC_TYPE_RQ_ENET) { 329 346 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, ··· 341 352 &ipv4_csum_ok, &ipv6, &ipv4, 342 353 &ipv4_fragment, &fcs_ok); 343 354 eth_hdrs_stripped = 0; 355 + skb_trim(skb, bytes_written); 356 + if (!fcs_ok) { 357 + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 358 + "fcs error. dropping packet.\n"); 359 + goto drop; 360 + } 361 + if (fnic_import_rq_eth_pkt(fnic, skb)) 362 + return; 344 363 345 364 } else { 346 365 /* wrong CQ type*/ ··· 367 370 goto drop; 368 371 } 369 372 370 - if (eth_hdrs_stripped) 371 - fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); 372 - else if (fnic_import_rq_eth_pkt(skb, bytes_written)) 373 - goto drop; 374 - 375 - fp = (struct fc_frame *)skb; 376 - 377 - /* 378 - * If frame is an ELS response that matches the cached FLOGI OX_ID, 379 - * and is accept, issue flogi_reg_request copy wq request to firmware 380 - * to register the S_ID and determine whether FC_OUI mode or GW mode. 381 - */ 382 - if (is_matching_flogi_resp_frame(fnic, fp)) { 383 - if (!eth_hdrs_stripped) { 384 - if (fc_frame_payload_op(fp) == ELS_LS_ACC) { 385 - fnic_handle_flogi_resp(fnic, fp); 386 - return; 387 - } 388 - /* 389 - * Recd. Flogi reject. No point registering 390 - * with fw, but forward to libFC 391 - */ 392 - goto forward; 393 - } 394 - goto drop; 395 - } 396 - if (!eth_hdrs_stripped) 397 - goto drop; 398 - 399 - forward: 400 373 spin_lock_irqsave(&fnic->fnic_lock, flags); 401 374 if (fnic->stop_rx_link_events) { 402 375 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 403 376 goto drop; 404 377 } 405 - /* Use fr_flags to indicate whether succ. flogi resp or not */ 406 - fr_flags(fp) = 0; 407 378 fr_dev(fp) = fnic->lport; 408 379 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 409 380 ··· 459 494 buf->os_buf = NULL; 460 495 } 461 496 462 - static inline int is_flogi_frame(struct fc_frame_header *fh) 497 + /** 498 + * fnic_eth_send() - Send Ethernet frame. 499 + * @fip: fcoe_ctlr instance. 500 + * @skb: Ethernet Frame, FIP, without VLAN encapsulation. 501 + */ 502 + void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 463 503 { 464 - return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; 504 + struct fnic *fnic = fnic_from_ctlr(fip); 505 + struct vnic_wq *wq = &fnic->wq[0]; 506 + dma_addr_t pa; 507 + struct ethhdr *eth_hdr; 508 + struct vlan_ethhdr *vlan_hdr; 509 + unsigned long flags; 510 + 511 + if (!fnic->vlan_hw_insert) { 512 + eth_hdr = (struct ethhdr *)skb_mac_header(skb); 513 + vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, 514 + sizeof(*vlan_hdr) - sizeof(*eth_hdr)); 515 + memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); 516 + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); 517 + vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; 518 + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); 519 + } 520 + 521 + pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 522 + 523 + spin_lock_irqsave(&fnic->wq_lock[0], flags); 524 + if (!vnic_wq_desc_avail(wq)) { 525 + pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); 526 + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 527 + kfree_skb(skb); 528 + return; 529 + } 530 + 531 + fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, 532 + fnic->vlan_hw_insert, fnic->vlan_id, 1); 533 + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 465 534 } 466 535 467 - int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) 536 + /* 537 + * Send FC frame. 538 + */ 539 + static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) 468 540 { 469 541 struct vnic_wq *wq = &fnic->wq[0]; 470 542 struct sk_buff *skb; ··· 516 514 517 515 fh = fc_frame_header_get(fp); 518 516 skb = fp_skb(fp); 517 + 518 + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && 519 + fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) 520 + return 0; 519 521 520 522 if (!fnic->vlan_hw_insert) { 521 523 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); ··· 536 530 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); 537 531 } 538 532 539 - if (is_flogi_frame(fh)) { 533 + if (fnic->ctlr.map_dest) 540 534 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); 541 - memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); 542 - } else { 543 - if (fnic->fcoui_mode) 544 - fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); 545 - else 546 - memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); 547 - memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); 548 - } 535 + else 536 + memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); 537 + memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); 549 538 550 539 tot_len = skb->len; 551 540 BUG_ON(tot_len % 4); ··· 579 578 int fnic_send(struct fc_lport *lp, struct fc_frame *fp) 580 579 { 581 580 struct fnic *fnic = lport_priv(lp); 582 - struct fc_frame_header *fh; 583 - int ret = 0; 584 - enum fnic_state old_state; 585 581 unsigned long flags; 586 - struct fc_frame *old_flogi = NULL; 587 - struct fc_frame *old_flogi_resp = NULL; 588 582 589 583 if (fnic->in_remove) { 590 584 dev_kfree_skb(fp_skb(fp)); 591 - ret = -1; 592 - goto fnic_send_end; 585 + return -1; 593 586 } 594 587 595 - fh = fc_frame_header_get(fp); 596 - /* if not an Flogi frame, send it out, this is the common case */ 597 - if (!is_flogi_frame(fh)) 598 - return fnic_send_frame(fnic, fp); 588 + /* 589 + * Queue frame if in a transitional state. 590 + * This occurs while registering the Port_ID / MAC address after FLOGI. 591 + */ 592 + spin_lock_irqsave(&fnic->fnic_lock, flags); 593 + if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { 594 + skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); 595 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 596 + return 0; 597 + } 598 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 599 599 600 - /* Flogi frame, now enter the state machine */ 600 + return fnic_send_frame(fnic, fp); 601 + } 602 + 603 + /** 604 + * fnic_flush_tx() - send queued frames. 605 + * @fnic: fnic device 606 + * 607 + * Send frames that were waiting to go out in FC or Ethernet mode. 608 + * Whenever changing modes we purge queued frames, so these frames should 609 + * be queued for the stable mode that we're in, either FC or Ethernet. 610 + * 611 + * Called without fnic_lock held. 612 + */ 613 + void fnic_flush_tx(struct fnic *fnic) 614 + { 615 + struct sk_buff *skb; 616 + struct fc_frame *fp; 617 + 618 + while ((skb = skb_dequeue(&fnic->frame_queue))) { 619 + fp = (struct fc_frame *)skb; 620 + fnic_send_frame(fnic, fp); 621 + } 622 + } 623 + 624 + /** 625 + * fnic_set_eth_mode() - put fnic into ethernet mode. 626 + * @fnic: fnic device 627 + * 628 + * Called without fnic lock held. 629 + */ 630 + static void fnic_set_eth_mode(struct fnic *fnic) 631 + { 632 + unsigned long flags; 633 + enum fnic_state old_state; 634 + int ret; 601 635 602 636 spin_lock_irqsave(&fnic->fnic_lock, flags); 603 637 again: 604 - /* Get any old cached frames, free them after dropping lock */ 605 - old_flogi = fnic->flogi; 606 - fnic->flogi = NULL; 607 - old_flogi_resp = fnic->flogi_resp; 608 - fnic->flogi_resp = NULL; 609 - 610 - fnic->flogi_oxid = FC_XID_UNKNOWN; 611 - 612 638 old_state = fnic->state; 613 639 switch (old_state) { 614 640 case FNIC_IN_FC_MODE: 615 641 case FNIC_IN_ETH_TRANS_FC_MODE: 616 642 default: 617 643 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 618 - vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 619 644 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 620 - 621 - if (old_flogi) { 622 - dev_kfree_skb(fp_skb(old_flogi)); 623 - old_flogi = NULL; 624 - } 625 - if (old_flogi_resp) { 626 - dev_kfree_skb(fp_skb(old_flogi_resp)); 627 - old_flogi_resp = NULL; 628 - } 629 645 630 646 ret = fnic_fw_reset_handler(fnic); 631 647 632 648 spin_lock_irqsave(&fnic->fnic_lock, flags); 633 649 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) 634 650 goto again; 635 - if (ret) { 651 + if (ret) 636 652 fnic->state = old_state; 637 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 638 - dev_kfree_skb(fp_skb(fp)); 639 - goto fnic_send_end; 640 - } 641 - old_flogi = fnic->flogi; 642 - fnic->flogi = fp; 643 - fnic->flogi_oxid = ntohs(fh->fh_ox_id); 644 - old_flogi_resp = fnic->flogi_resp; 645 - fnic->flogi_resp = NULL; 646 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 647 653 break; 648 654 649 655 case FNIC_IN_FC_TRANS_ETH_MODE: 650 - /* 651 - * A reset is pending with the firmware. Store the flogi 652 - * and its oxid. The transition out of this state happens 653 - * only when Firmware completes the reset, either with 654 - * success or failed. If success, transition to 655 - * FNIC_IN_ETH_MODE, if fail, then transition to 656 - * FNIC_IN_FC_MODE 657 - */ 658 - fnic->flogi = fp; 659 - fnic->flogi_oxid = ntohs(fh->fh_ox_id); 660 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 661 - break; 662 - 663 656 case FNIC_IN_ETH_MODE: 664 - /* 665 - * The fw/hw is already in eth mode. Store the oxid, 666 - * and send the flogi frame out. The transition out of this 667 - * state happens only we receive flogi response from the 668 - * network, and the oxid matches the cached oxid when the 669 - * flogi frame was sent out. If they match, then we issue 670 - * a flogi_reg request and transition to state 671 - * FNIC_IN_ETH_TRANS_FC_MODE 672 - */ 673 - fnic->flogi_oxid = ntohs(fh->fh_ox_id); 674 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 675 - ret = fnic_send_frame(fnic, fp); 676 657 break; 677 658 } 678 - 679 - fnic_send_end: 680 - if (old_flogi) 681 - dev_kfree_skb(fp_skb(old_flogi)); 682 - if (old_flogi_resp) 683 - dev_kfree_skb(fp_skb(old_flogi_resp)); 684 - return ret; 659 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 685 660 } 686 661 687 662 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
+42 -29
drivers/scsi/fnic/fnic_main.c
··· 25 25 #include <linux/interrupt.h> 26 26 #include <linux/spinlock.h> 27 27 #include <linux/workqueue.h> 28 + #include <linux/if_ether.h> 29 + #include <scsi/fc/fc_fip.h> 28 30 #include <scsi/scsi_host.h> 29 31 #include <scsi/scsi_transport.h> 30 32 #include <scsi/scsi_transport_fc.h> ··· 70 68 71 69 static struct libfc_function_template fnic_transport_template = { 72 70 .frame_send = fnic_send, 71 + .lport_set_port_id = fnic_set_port_id, 73 72 .fcp_abort_io = fnic_empty_scsi_cleanup, 74 73 .fcp_cleanup = fnic_empty_scsi_cleanup, 75 74 .exch_mgr_reset = fnic_exch_mgr_reset ··· 327 324 { 328 325 unsigned int i; 329 326 int err; 330 - unsigned long flags; 331 - struct fc_frame *flogi = NULL; 332 - struct fc_frame *flogi_resp = NULL; 333 327 334 328 vnic_dev_disable(fnic->vdev); 335 329 for (i = 0; i < fnic->intr_count; i++) ··· 367 367 for (i = 0; i < fnic->intr_count; i++) 368 368 vnic_intr_clean(&fnic->intr[i]); 369 369 370 - /* 371 - * Remove cached flogi and flogi resp frames if any 372 - * These frames are not in any queue, and therefore queue 373 - * cleanup does not clean them. So clean them explicitly 374 - */ 375 - spin_lock_irqsave(&fnic->fnic_lock, flags); 376 - flogi = fnic->flogi; 377 - fnic->flogi = NULL; 378 - flogi_resp = fnic->flogi_resp; 379 - fnic->flogi_resp = NULL; 380 - spin_unlock_irqrestore(&fnic->fnic_lock, flags); 381 - 382 - if (flogi) 383 - dev_kfree_skb(fp_skb(flogi)); 384 - 385 - if (flogi_resp) 386 - dev_kfree_skb(fp_skb(flogi_resp)); 387 - 388 370 mempool_destroy(fnic->io_req_pool); 389 371 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) 390 372 mempool_destroy(fnic->io_sgl_pool[i]); ··· 389 407 struct kmem_cache *mem = pool_data; 390 408 391 409 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); 410 + } 411 + 412 + /** 413 + * fnic_get_mac() - get assigned data MAC address for FIP code. 414 + * @lport: local port. 415 + */ 416 + static u8 *fnic_get_mac(struct fc_lport *lport) 417 + { 418 + struct fnic *fnic = lport_priv(lport); 419 + 420 + return fnic->data_src_addr; 392 421 } 393 422 394 423 static int __devinit fnic_probe(struct pci_dev *pdev, ··· 426 433 host = lp->host; 427 434 fnic = lport_priv(lp); 428 435 fnic->lport = lp; 436 + fnic->ctlr.lp = lp; 429 437 430 438 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, 431 439 host->host_no); ··· 535 541 goto err_out_dev_close; 536 542 } 537 543 538 - err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); 544 + err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 539 545 if (err) { 540 546 shost_printk(KERN_ERR, fnic->lport->host, 541 547 "vNIC get MAC addr failed \n"); 542 548 goto err_out_dev_close; 543 549 } 550 + /* set data_src for point-to-point mode and to keep it non-zero */ 551 + memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); 544 552 545 553 /* Get vNIC configuration */ 546 554 err = fnic_get_vnic_config(fnic); ··· 611 615 fnic->vlan_hw_insert = 1; 612 616 fnic->vlan_id = 0; 613 617 614 - fnic->flogi_oxid = FC_XID_UNKNOWN; 615 - fnic->flogi = NULL; 616 - fnic->flogi_resp = NULL; 618 + /* Initialize the FIP fcoe_ctrl struct */ 619 + fnic->ctlr.send = fnic_eth_send; 620 + fnic->ctlr.update_mac = fnic_update_mac; 621 + fnic->ctlr.get_src_addr = fnic_get_mac; 622 + fcoe_ctlr_init(&fnic->ctlr); 623 + if (fnic->config.flags & VFCF_FIP_CAPABLE) { 624 + shost_printk(KERN_INFO, fnic->lport->host, 625 + "firmware supports FIP\n"); 626 + vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); 627 + vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 628 + } else { 629 + shost_printk(KERN_INFO, fnic->lport->host, 630 + "firmware uses non-FIP mode\n"); 631 + fnic->ctlr.mode = FIP_ST_NON_FIP; 632 + } 617 633 fnic->state = FNIC_IN_FC_MODE; 618 634 619 635 /* Enable hardware stripping of vlan header on ingress */ ··· 716 708 INIT_WORK(&fnic->link_work, fnic_handle_link); 717 709 INIT_WORK(&fnic->frame_work, fnic_handle_frame); 718 710 skb_queue_head_init(&fnic->frame_queue); 711 + skb_queue_head_init(&fnic->tx_queue); 719 712 720 713 /* Enable all queues */ 721 714 for (i = 0; i < fnic->raw_wq_count; i++) ··· 747 738 err_out_free_exch_mgr: 748 739 fc_exch_mgr_free(lp); 749 740 err_out_remove_scsi_host: 750 - fc_remove_host(fnic->lport->host); 751 - scsi_remove_host(fnic->lport->host); 741 + fc_remove_host(lp->host); 742 + scsi_remove_host(lp->host); 752 743 err_out_free_rq_buf: 753 744 for (i = 0; i < fnic->rq_count; i++) 754 745 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); ··· 782 773 static void __devexit fnic_remove(struct pci_dev *pdev) 783 774 { 784 775 struct fnic *fnic = pci_get_drvdata(pdev); 776 + struct fc_lport *lp = fnic->lport; 785 777 unsigned long flags; 786 778 787 779 /* ··· 804 794 */ 805 795 flush_workqueue(fnic_event_queue); 806 796 skb_queue_purge(&fnic->frame_queue); 797 + skb_queue_purge(&fnic->tx_queue); 807 798 808 799 /* 809 800 * Log off the fabric. This stops all remote ports, dns port, ··· 817 806 fnic->in_remove = 1; 818 807 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 819 808 820 - fc_lport_destroy(fnic->lport); 809 + fcoe_ctlr_destroy(&fnic->ctlr); 810 + fc_lport_destroy(lp); 821 811 822 812 /* 823 813 * This stops the fnic device, masks all interrupts. Completed ··· 828 816 fnic_cleanup(fnic); 829 817 830 818 BUG_ON(!skb_queue_empty(&fnic->frame_queue)); 819 + BUG_ON(!skb_queue_empty(&fnic->tx_queue)); 831 820 832 821 spin_lock_irqsave(&fnic_list_lock, flags); 833 822 list_del(&fnic->list); ··· 847 834 pci_release_regions(pdev); 848 835 pci_disable_device(pdev); 849 836 pci_set_drvdata(pdev, NULL); 850 - scsi_host_put(fnic->lport->host); 837 + scsi_host_put(lp->host); 851 838 } 852 839 853 840 static struct pci_driver fnic_driver = {
+2 -3
drivers/scsi/fnic/fnic_res.c
··· 144 144 c->intr_timer_type = c->intr_timer_type; 145 145 146 146 shost_printk(KERN_INFO, fnic->lport->host, 147 - "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " 147 + "vNIC MAC addr %pM " 148 148 "wq/wq_copy/rq %d/%d/%d\n", 149 - fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], 150 - fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5], 149 + fnic->ctlr.ctl_src_addr, 151 150 c->wq_enet_desc_count, c->wq_copy_desc_count, 152 151 c->rq_desc_count); 153 152 shost_printk(KERN_INFO, fnic->lport->host,
+50
drivers/scsi/fnic/fnic_res.h
··· 51 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 52 52 } 53 53 54 + static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, 55 + void *os_buf, dma_addr_t dma_addr, 56 + unsigned int len, 57 + int vlan_tag_insert, 58 + unsigned int vlan_tag, 59 + int cq_entry) 60 + { 61 + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 62 + 63 + wq_enet_desc_enc(desc, 64 + (u64)dma_addr | VNIC_PADDR_TARGET, 65 + (u16)len, 66 + 0, /* mss_or_csum_offset */ 67 + 0, /* fc_eof */ 68 + 0, /* offload_mode */ 69 + 1, /* eop */ 70 + (u8)cq_entry, 71 + 0, /* fcoe_encap */ 72 + (u8)vlan_tag_insert, 73 + (u16)vlan_tag, 74 + 0 /* loopback */); 75 + 76 + vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); 77 + } 78 + 54 79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, 55 80 u32 req_id, 56 81 u32 lunmap_id, u8 spl_flags, ··· 159 134 desc->hdr.tag.u.req_id = req_id; /* id for this request */ 160 135 161 136 desc->u.flogi_reg.format = format; 137 + desc->u.flogi_reg._resvd = 0; 162 138 hton24(desc->u.flogi_reg.s_id, s_id); 163 139 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); 140 + 141 + vnic_wq_copy_post(wq); 142 + } 143 + 144 + static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, 145 + u32 req_id, u32 s_id, 146 + u8 *fcf_mac, u8 *ha_mac, 147 + u32 r_a_tov, u32 e_d_tov) 148 + { 149 + struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); 150 + 151 + desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */ 152 + desc->hdr.status = 0; /* header status entry */ 153 + desc->hdr._resvd = 0; /* reserved */ 154 + desc->hdr.tag.u.req_id = req_id; /* id for this request */ 155 + 156 + desc->u.flogi_fip_reg._resvd0 = 0; 157 + hton24(desc->u.flogi_fip_reg.s_id, s_id); 158 + memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN); 159 + desc->u.flogi_fip_reg._resvd1 = 0; 160 + desc->u.flogi_fip_reg.r_a_tov = r_a_tov; 161 + desc->u.flogi_fip_reg.e_d_tov = e_d_tov; 162 + memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN); 163 + desc->u.flogi_fip_reg._resvd2 = 0; 164 164 165 165 vnic_wq_copy_post(wq); 166 166 }
+34 -39
drivers/scsi/fnic/fnic_scsi.c
··· 174 174 int ret = 0; 175 175 unsigned long flags; 176 176 177 + skb_queue_purge(&fnic->frame_queue); 178 + skb_queue_purge(&fnic->tx_queue); 179 + 177 180 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); 178 181 179 182 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) ··· 203 200 * fnic_flogi_reg_handler 204 201 * Routine to send flogi register msg to fw 205 202 */ 206 - int fnic_flogi_reg_handler(struct fnic *fnic) 203 + int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) 207 204 { 208 205 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; 206 + enum fcpio_flogi_reg_format_type format; 207 + struct fc_lport *lp = fnic->lport; 209 208 u8 gw_mac[ETH_ALEN]; 210 209 int ret = 0; 211 210 unsigned long flags; ··· 222 217 goto flogi_reg_ioreq_end; 223 218 } 224 219 225 - if (fnic->fcoui_mode) 220 + if (fnic->ctlr.map_dest) { 226 221 memset(gw_mac, 0xff, ETH_ALEN); 227 - else 228 - memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); 222 + format = FCPIO_FLOGI_REG_DEF_DEST; 223 + } else { 224 + memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); 225 + format = FCPIO_FLOGI_REG_GW_DEST; 226 + } 229 227 230 - fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, 231 - FCPIO_FLOGI_REG_GW_DEST, 232 - fnic->s_id, 233 - gw_mac); 228 + if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { 229 + fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, 230 + fc_id, gw_mac, 231 + fnic->data_src_addr, 232 + lp->r_a_tov, lp->e_d_tov); 233 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 234 + "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", 235 + fc_id, fnic->data_src_addr, gw_mac); 236 + } else { 237 + fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, 238 + format, fc_id, gw_mac); 239 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 240 + "FLOGI reg issued fcid %x map %d dest %pM\n", 241 + fc_id, fnic->ctlr.map_dest, gw_mac); 242 + } 234 243 235 244 flogi_reg_ioreq_end: 236 245 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); 237 - 238 - if (!ret) 239 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 240 - "flog reg issued\n"); 241 - 242 246 return ret; 243 247 } 244 248 ··· 467 453 u8 hdr_status; 468 454 struct fcpio_tag tag; 469 455 int ret = 0; 470 - struct fc_frame *flogi; 471 456 unsigned long flags; 472 457 473 458 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); ··· 475 462 fnic_cleanup_io(fnic, SCSI_NO_TAG); 476 463 477 464 spin_lock_irqsave(&fnic->fnic_lock, flags); 478 - 479 - flogi = fnic->flogi; 480 - fnic->flogi = NULL; 481 465 482 466 /* fnic should be in FC_TRANS_ETH_MODE */ 483 467 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { ··· 516 506 * free the flogi frame. Else, send it out 517 507 */ 518 508 if (fnic->remove_wait || ret) { 519 - fnic->flogi_oxid = FC_XID_UNKNOWN; 520 509 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 521 - if (flogi) 522 - dev_kfree_skb_irq(fp_skb(flogi)); 510 + skb_queue_purge(&fnic->tx_queue); 523 511 goto reset_cmpl_handler_end; 524 512 } 525 513 526 514 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 527 515 528 - if (flogi) 529 - ret = fnic_send_frame(fnic, flogi); 516 + fnic_flush_tx(fnic); 530 517 531 518 reset_cmpl_handler_end: 532 519 return ret; ··· 540 533 u8 hdr_status; 541 534 struct fcpio_tag tag; 542 535 int ret = 0; 543 - struct fc_frame *flogi_resp = NULL; 544 536 unsigned long flags; 545 - struct sk_buff *skb; 546 537 547 538 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 548 539 549 540 /* Update fnic state based on status of flogi reg completion */ 550 541 spin_lock_irqsave(&fnic->fnic_lock, flags); 551 - 552 - flogi_resp = fnic->flogi_resp; 553 - fnic->flogi_resp = NULL; 554 542 555 543 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { 556 544 ··· 570 568 ret = -1; 571 569 } 572 570 573 - /* Successful flogi reg cmpl, pass frame to LibFC */ 574 - if (!ret && flogi_resp) { 571 + if (!ret) { 575 572 if (fnic->stop_rx_link_events) { 576 573 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 577 574 goto reg_cmpl_handler_end; 578 575 } 579 - skb = (struct sk_buff *)flogi_resp; 580 - /* Use fr_flags to indicate whether flogi resp or not */ 581 - fr_flags(flogi_resp) = 1; 582 - fr_dev(flogi_resp) = fnic->lport; 583 576 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 584 577 585 - skb_queue_tail(&fnic->frame_queue, skb); 578 + fnic_flush_tx(fnic); 586 579 queue_work(fnic_event_queue, &fnic->frame_work); 587 - 588 580 } else { 589 581 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 590 - if (flogi_resp) 591 - dev_kfree_skb_irq(fp_skb(flogi_resp)); 592 582 } 593 583 594 584 reg_cmpl_handler_end: ··· 902 908 break; 903 909 904 910 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ 911 + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ 905 912 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); 906 913 break; 907 914 ··· 1742 1747 fnic->remove_wait = &remove_wait; 1743 1748 old_state = fnic->state; 1744 1749 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1745 - vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 1750 + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); 1746 1751 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1747 1752 1748 1753 err = fnic_fw_reset_handler(fnic); ··· 1782 1787 spin_lock_irqsave(&fnic->fnic_lock, flags); 1783 1788 old_state = fnic->state; 1784 1789 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1785 - vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); 1790 + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); 1786 1791 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1787 1792 1788 1793 if (fnic_fw_reset_handler(fnic)) {
+1
drivers/scsi/fnic/vnic_scsi.h
··· 95 95 96 96 #define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ 97 97 #define VFCF_PERBI 0x2 /* persistent binding info available */ 98 + #define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */ 98 99 99 100 #endif /* _VNIC_SCSI_H_ */