Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (38 commits)
fusion: mptsas, fix lock imbalance
[SCSI] scsi_transport_fc: replace BUS_ID_SIZE by fixed count
sd, sr: fix Driver 'sd' needs updating message
scsi_transport_iscsi: return -EOVERFLOW for Too many iscsi targets
fc_transport: Selective return value from BSG timeout function
fc_transport: The softirq_done function registration for BSG request
sym53c8xx: ratelimit parity errors
explain the hidden scsi_wait_scan Kconfig variable
ibmvfc: Fix endless PRLI loop in discovery
ibmvfc: Process async events before command responses
libfc: Add runtime debugging with debug_logging module parameter
libfcoe: Add runtime debugging with module param debug_logging
fcoe: Add runtime debug logging with module parameter debug_logging
scsi_debug: Add support for physical block exponent and alignment
cnic: add NETDEV_1000 and NETDEVICES to Kconfig select
cnic: Fix __symbol_get() build error.
Revert "[SCSI] cnic: fix error: implicit declaration of function ‘__symbol_get’"
ipr: differentiate pci-x and pci-e based adapters
ipr: add test for MSI interrupt support
scsi_transport_spi: Blacklist Ultrium-3 tape for IU transfers
...

+1246 -605
-3
block/bsg.c
··· 315 blk_put_request(rq); 316 if (next_rq) { 317 blk_rq_unmap_user(next_rq->bio); 318 - next_rq->bio = NULL; 319 blk_put_request(next_rq); 320 } 321 return ERR_PTR(ret); ··· 448 hdr->dout_resid = rq->resid_len; 449 hdr->din_resid = rq->next_rq->resid_len; 450 blk_rq_unmap_user(bidi_bio); 451 - rq->next_rq->bio = NULL; 452 blk_put_request(rq->next_rq); 453 } else if (rq_data_dir(rq) == READ) 454 hdr->din_resid = rq->resid_len; ··· 466 blk_rq_unmap_user(bio); 467 if (rq->cmd != rq->__cmd) 468 kfree(rq->cmd); 469 - rq->bio = NULL; 470 blk_put_request(rq); 471 472 return ret;
··· 315 blk_put_request(rq); 316 if (next_rq) { 317 blk_rq_unmap_user(next_rq->bio); 318 blk_put_request(next_rq); 319 } 320 return ERR_PTR(ret); ··· 449 hdr->dout_resid = rq->resid_len; 450 hdr->din_resid = rq->next_rq->resid_len; 451 blk_rq_unmap_user(bidi_bio); 452 blk_put_request(rq->next_rq); 453 } else if (rq_data_dir(rq) == READ) 454 hdr->din_resid = rq->resid_len; ··· 468 blk_rq_unmap_user(bio); 469 if (rq->cmd != rq->__cmd) 470 kfree(rq->cmd); 471 blk_put_request(rq); 472 473 return ret;
+2 -2
drivers/message/fusion/mptsas.c
··· 3518 } else 3519 mptsas_volume_delete(ioc, sas_info->fw.id); 3520 } 3521 - mutex_lock(&ioc->sas_device_info_mutex); 3522 3523 /* expanders */ 3524 mutex_lock(&ioc->sas_topology_mutex); ··· 3549 goto redo_expander_scan; 3550 } 3551 } 3552 - mutex_lock(&ioc->sas_topology_mutex); 3553 } 3554 3555 /**
··· 3518 } else 3519 mptsas_volume_delete(ioc, sas_info->fw.id); 3520 } 3521 + mutex_unlock(&ioc->sas_device_info_mutex); 3522 3523 /* expanders */ 3524 mutex_lock(&ioc->sas_topology_mutex); ··· 3549 goto redo_expander_scan; 3550 } 3551 } 3552 + mutex_unlock(&ioc->sas_topology_mutex); 3553 } 3554 3555 /**
+2 -4
drivers/net/cnic.c
··· 25 #include <linux/delay.h> 26 #include <linux/ethtool.h> 27 #include <linux/if_vlan.h> 28 - #include <linux/module.h> 29 - 30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 31 #define BCM_VLAN 1 32 #endif ··· 2519 struct cnic_dev *cdev; 2520 struct cnic_local *cp; 2521 struct cnic_eth_dev *ethdev = NULL; 2522 - struct cnic_eth_dev *(*probe)(void *) = NULL; 2523 2524 - probe = __symbol_get("bnx2_cnic_probe"); 2525 if (probe) { 2526 ethdev = (*probe)(dev); 2527 symbol_put_addr(probe);
··· 25 #include <linux/delay.h> 26 #include <linux/ethtool.h> 27 #include <linux/if_vlan.h> 28 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 29 #define BCM_VLAN 1 30 #endif ··· 2521 struct cnic_dev *cdev; 2522 struct cnic_local *cp; 2523 struct cnic_eth_dev *ethdev = NULL; 2524 + struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 2525 2526 + probe = symbol_get(bnx2_cnic_probe); 2527 if (probe) { 2528 ethdev = (*probe)(dev); 2529 symbol_put_addr(probe);
+2
drivers/net/cnic_if.h
··· 296 297 extern int cnic_unregister_driver(int ulp_type); 298 299 #endif
··· 296 297 extern int cnic_unregister_driver(int ulp_type); 298 299 + extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); 300 + 301 #endif
+12 -1
drivers/scsi/Kconfig
··· 258 or async on the kernel's command line. 259 260 config SCSI_WAIT_SCAN 261 - tristate 262 default m 263 depends on SCSI 264 depends on MODULES 265 266 menu "SCSI Transports" 267 depends on SCSI
··· 258 or async on the kernel's command line. 259 260 config SCSI_WAIT_SCAN 261 + tristate # No prompt here, this is an invisible symbol. 262 default m 263 depends on SCSI 264 depends on MODULES 265 + # scsi_wait_scan is a loadable module which waits until all the async scans are 266 + # complete. The idea is to use it in initrd/ initramfs scripts. You modprobe 267 + # it after all the modprobes of the root SCSI drivers and it will wait until 268 + # they have all finished scanning their buses before allowing the boot to 269 + # proceed. (This method is not applicable if targets boot independently in 270 + # parallel with the initiator, or with transports with non-deterministic target 271 + # discovery schemes, or if a transport driver does not support scsi_wait_scan.) 272 + # 273 + # This symbol is not exposed as a prompt because little is to be gained by 274 + # disabling it, whereas people who accidentally switch it off may wonder why 275 + # their mkinitrd gets into trouble. 276 277 menu "SCSI Transports" 278 depends on SCSI
+2
drivers/scsi/bnx2i/Kconfig
··· 1 config SCSI_BNX2_ISCSI 2 tristate "Broadcom NetXtreme II iSCSI support" 3 select SCSI_ISCSI_ATTRS 4 select CNIC 5 depends on PCI 6 ---help---
··· 1 config SCSI_BNX2_ISCSI 2 tristate "Broadcom NetXtreme II iSCSI support" 3 select SCSI_ISCSI_ATTRS 4 + select NETDEVICES 5 + select NETDEV_1000 6 select CNIC 7 depends on PCI 8 ---help---
+69 -21
drivers/scsi/cxgb3i/cxgb3i_ddp.c
··· 206 return DDP_PGIDX_MAX; 207 } 208 209 static inline void ddp_gl_unmap(struct pci_dev *pdev, 210 struct cxgb3i_gather_list *gl) 211 { ··· 623 * release all the resource held by the ddp pagepod manager for a given 624 * adapter if needed 625 */ 626 void cxgb3i_ddp_cleanup(struct t3cdev *tdev) 627 { 628 - int i = 0; 629 struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; 630 631 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); 632 - 633 - if (ddp) { 634 - tdev->ulp_iscsi = NULL; 635 - while (i < ddp->nppods) { 636 - struct cxgb3i_gather_list *gl = ddp->gl_map[i]; 637 - if (gl) { 638 - int npods = (gl->nelem + PPOD_PAGES_MAX - 1) 639 - >> PPOD_PAGES_SHIFT; 640 - ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", 641 - tdev, i, npods); 642 - kfree(gl); 643 - ddp_free_gl_skb(ddp, i, npods); 644 - i += npods; 645 - } else 646 - i++; 647 - } 648 - cxgb3i_free_big_mem(ddp); 649 - } 650 } 651 652 /** ··· 666 */ 667 static void ddp_init(struct t3cdev *tdev) 668 { 669 - struct cxgb3i_ddp_info *ddp; 670 struct ulp_iscsi_info uinfo; 671 unsigned int ppmax, bits; 672 int i, err; 673 674 - if (tdev->ulp_iscsi) { 675 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", 676 tdev, tdev->ulp_iscsi); 677 return; ··· 706 ppmax * 707 sizeof(struct cxgb3i_gather_list *)); 708 spin_lock_init(&ddp->map_lock); 709 710 ddp->tdev = tdev; 711 ddp->pdev = uinfo.pdev; ··· 752 { 753 if (page_idx == DDP_PGIDX_MAX) { 754 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); 755 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", 756 PAGE_SIZE, page_idx); 757 }
··· 206 return DDP_PGIDX_MAX; 207 } 208 209 + /** 210 + * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE 211 + * return the ddp page index, if no match is found return DDP_PGIDX_MAX. 212 + */ 213 + int cxgb3i_ddp_adjust_page_table(void) 214 + { 215 + int i; 216 + unsigned int base_order, order; 217 + 218 + if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { 219 + ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n", 220 + PAGE_SIZE, 1UL << ddp_page_shift[0]); 221 + return -EINVAL; 222 + } 223 + 224 + base_order = get_order(1UL << ddp_page_shift[0]); 225 + order = get_order(1 << PAGE_SHIFT); 226 + for (i = 0; i < DDP_PGIDX_MAX; i++) { 227 + /* first is the kernel page size, then just doubling the size */ 228 + ddp_page_order[i] = order - base_order + i; 229 + ddp_page_shift[i] = PAGE_SHIFT + i; 230 + } 231 + return 0; 232 + } 233 + 234 static inline void ddp_gl_unmap(struct pci_dev *pdev, 235 struct cxgb3i_gather_list *gl) 236 { ··· 598 * release all the resource held by the ddp pagepod manager for a given 599 * adapter if needed 600 */ 601 + 602 + static void ddp_cleanup(struct kref *kref) 603 + { 604 + struct cxgb3i_ddp_info *ddp = container_of(kref, 605 + struct cxgb3i_ddp_info, 606 + refcnt); 607 + int i = 0; 608 + 609 + ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev); 610 + 611 + ddp->tdev->ulp_iscsi = NULL; 612 + while (i < ddp->nppods) { 613 + struct cxgb3i_gather_list *gl = ddp->gl_map[i]; 614 + if (gl) { 615 + int npods = (gl->nelem + PPOD_PAGES_MAX - 1) 616 + >> PPOD_PAGES_SHIFT; 617 + ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", 618 + ddp->tdev, i, npods); 619 + kfree(gl); 620 + ddp_free_gl_skb(ddp, i, npods); 621 + i += npods; 622 + } else 623 + i++; 624 + } 625 + cxgb3i_free_big_mem(ddp); 626 + } 627 + 628 void cxgb3i_ddp_cleanup(struct t3cdev *tdev) 629 { 630 struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; 631 632 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); 633 + if (ddp) 634 + kref_put(&ddp->refcnt, ddp_cleanup); 635 } 636 637 /** ··· 631 */ 632 static void ddp_init(struct t3cdev *tdev) 633 { 634 + struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; 635 struct ulp_iscsi_info uinfo; 636 unsigned int ppmax, bits; 637 int i, err; 638 639 + if (ddp) { 640 + kref_get(&ddp->refcnt); 641 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", 642 tdev, tdev->ulp_iscsi); 643 return; ··· 670 ppmax * 671 sizeof(struct cxgb3i_gather_list *)); 672 spin_lock_init(&ddp->map_lock); 673 + kref_init(&ddp->refcnt); 674 675 ddp->tdev = tdev; 676 ddp->pdev = uinfo.pdev; ··· 715 { 716 if (page_idx == DDP_PGIDX_MAX) { 717 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); 718 + 719 + if (page_idx == DDP_PGIDX_MAX) { 720 + ddp_log_info("system PAGE_SIZE %lu, update hw.\n", 721 + PAGE_SIZE); 722 + if (cxgb3i_ddp_adjust_page_table() < 0) { 723 + ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n", 724 + PAGE_SIZE); 725 + return; 726 + } 727 + page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); 728 + } 729 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", 730 PAGE_SIZE, page_idx); 731 }
+2
drivers/scsi/cxgb3i/cxgb3i_ddp.h
··· 54 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload 55 * 56 * @list: list head to link elements 57 * @tdev: pointer to t3cdev used by cxgb3 driver 58 * @max_txsz: max tx packet size for ddp 59 * @max_rxsz: max rx packet size for ddp ··· 71 */ 72 struct cxgb3i_ddp_info { 73 struct list_head list; 74 struct t3cdev *tdev; 75 struct pci_dev *pdev; 76 unsigned int max_txsz;
··· 54 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload 55 * 56 * @list: list head to link elements 57 + * @refcnt: ref. count 58 * @tdev: pointer to t3cdev used by cxgb3 driver 59 * @max_txsz: max tx packet size for ddp 60 * @max_rxsz: max rx packet size for ddp ··· 70 */ 71 struct cxgb3i_ddp_info { 72 struct list_head list; 73 + struct kref refcnt; 74 struct t3cdev *tdev; 75 struct pci_dev *pdev; 76 unsigned int max_txsz;
+53 -55
drivers/scsi/fcoe/fcoe.c
··· 45 46 #include "fcoe.h" 47 48 - static int debug_fcoe; 49 - 50 MODULE_AUTHOR("Open-FCoE.org"); 51 MODULE_DESCRIPTION("FCoE"); 52 MODULE_LICENSE("GPL v2"); ··· 303 #ifdef NETIF_F_FCOE_CRC 304 if (netdev->features & NETIF_F_FCOE_CRC) { 305 lp->crc_offload = 1; 306 - printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n", 307 - netdev->name); 308 } 309 #endif 310 #ifdef NETIF_F_FSO 311 if (netdev->features & NETIF_F_FSO) { 312 lp->seq_offload = 1; 313 lp->lso_max = netdev->gso_max_size; 314 - printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n", 315 - netdev->name, lp->lso_max); 316 } 317 #endif 318 if (netdev->fcoe_ddp_xid) { 319 lp->lro_enabled = 1; 320 lp->lro_xid = netdev->fcoe_ddp_xid; 321 - printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n", 322 - netdev->name, lp->lro_xid); 323 } 324 skb_queue_head_init(&fc->fcoe_pending_queue); 325 fc->fcoe_pending_queue_active = 0; ··· 404 /* add the new host to the SCSI-ml */ 405 rc = scsi_add_host(lp->host, dev); 406 if (rc) { 407 - FC_DBG("fcoe_shost_config:error on scsi_add_host\n"); 408 return rc; 409 } 410 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", ··· 446 447 BUG_ON(!netdev); 448 449 - printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n", 450 - netdev->name); 451 452 lp = fcoe_hostlist_lookup(netdev); 453 if (!lp) ··· 557 558 BUG_ON(!netdev); 559 560 - printk(KERN_DEBUG "fcoe_if_create:interface on %s\n", 561 - netdev->name); 562 563 lp = fcoe_hostlist_lookup(netdev); 564 if (lp) ··· 566 shost = libfc_host_alloc(&fcoe_shost_template, 567 sizeof(struct fcoe_softc)); 568 if (!shost) { 569 - FC_DBG("Could not allocate host structure\n"); 570 return -ENOMEM; 571 } 572 lp = shost_priv(shost); ··· 575 /* configure fc_lport, e.g., em */ 576 rc = fcoe_lport_config(lp); 577 if (rc) { 578 - FC_DBG("Could not configure lport\n"); 579 goto out_host_put; 580 } 581 ··· 590 /* configure lport network properties */ 591 rc = fcoe_netdev_config(lp, netdev); 592 if (rc) { 593 - FC_DBG("Could not configure netdev for the interface\n"); 594 goto out_netdev_cleanup; 595 } 596 597 /* configure lport scsi host properties */ 598 rc = fcoe_shost_config(lp, shost, &netdev->dev); 599 if (rc) { 600 - FC_DBG("Could not configure shost for lport\n"); 601 goto out_netdev_cleanup; 602 } 603 604 /* lport exch manager allocation */ 605 rc = fcoe_em_config(lp); 606 if (rc) { 607 - FC_DBG("Could not configure em for lport\n"); 608 goto out_netdev_cleanup; 609 } 610 611 /* Initialize the library */ 612 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); 613 if (rc) { 614 - FC_DBG("Could not configure libfc for lport!\n"); 615 goto out_lp_destroy; 616 } 617 ··· 654 fc_attach_transport(&fcoe_transport_function); 655 656 if (!scsi_transport_fcoe_sw) { 657 - printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n"); 658 return -ENODEV; 659 } 660 ··· 715 unsigned targ_cpu = smp_processor_id(); 716 #endif /* CONFIG_SMP */ 717 718 - printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu); 719 720 /* Prevent any new skbs from being queued for this CPU. */ 721 p = &per_cpu(fcoe_percpu, cpu); ··· 737 p0 = &per_cpu(fcoe_percpu, targ_cpu); 738 spin_lock_bh(&p0->fcoe_rx_list.lock); 739 if (p0->thread) { 740 - FC_DBG("Moving frames from CPU %d to CPU %d\n", 741 - cpu, targ_cpu); 742 743 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 744 __skb_queue_tail(&p0->fcoe_rx_list, skb); ··· 804 switch (action) { 805 case CPU_ONLINE: 806 case CPU_ONLINE_FROZEN: 807 - FC_DBG("CPU %x online: Create Rx thread\n", cpu); 808 fcoe_percpu_thread_create(cpu); 809 break; 810 case CPU_DEAD: 811 case CPU_DEAD_FROZEN: 812 - FC_DBG("CPU %x offline: Remove Rx thread\n", cpu); 813 fcoe_percpu_thread_destroy(cpu); 814 break; 815 default: ··· 847 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); 848 lp = fc->ctlr.lp; 849 if (unlikely(lp == NULL)) { 850 - FC_DBG("cannot find hba structure"); 851 goto err2; 852 } 853 if (!lp->link_up) 854 goto err2; 855 856 - if (unlikely(debug_fcoe)) { 857 - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " 858 - "end:%p sum:%d dev:%s", skb->len, skb->data_len, 859 - skb->head, skb->data, skb_tail_pointer(skb), 860 - skb_end_pointer(skb), skb->csum, 861 - skb->dev ? skb->dev->name : "<NULL>"); 862 - 863 - } 864 865 /* check for FCOE packet type */ 866 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 867 - FC_DBG("wrong FC type frame"); 868 goto err; 869 } 870 ··· 899 * the first CPU now. For non-SMP systems this 900 * will check the same CPU twice. 901 */ 902 - FC_DBG("CPU is online, but no receive thread ready " 903 - "for incoming skb- using first online CPU.\n"); 904 905 spin_unlock_bh(&fps->fcoe_rx_list.lock); 906 cpu = first_cpu(cpu_online_map); ··· 1200 fr = fcoe_dev_from_skb(skb); 1201 lp = fr->fr_dev; 1202 if (unlikely(lp == NULL)) { 1203 - FC_DBG("invalid HBA Structure"); 1204 kfree_skb(skb); 1205 continue; 1206 } 1207 1208 - if (unlikely(debug_fcoe)) { 1209 - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " 1210 - "tail:%p end:%p sum:%d dev:%s", 1211 - skb->len, skb->data_len, 1212 - skb->head, skb->data, skb_tail_pointer(skb), 1213 - skb_end_pointer(skb), skb->csum, 1214 - skb->dev ? skb->dev->name : "<NULL>"); 1215 - } 1216 1217 /* 1218 * Save source MAC address before discarding header. ··· 1230 stats = fc_lport_get_stats(lp); 1231 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1232 if (stats->ErrorFrames < 5) 1233 - printk(KERN_WARNING "FCoE version " 1234 "mismatch: The frame has " 1235 "version %x, but the " 1236 "initiator supports version " ··· 1283 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 1284 if (le32_to_cpu(fr_crc(fp)) != 1285 ~crc32(~0, skb->data, fr_len)) { 1286 - if (debug_fcoe || stats->InvalidCRCCount < 5) 1287 printk(KERN_WARNING "fcoe: dropping " 1288 "frame with CRC error\n"); 1289 stats->InvalidCRCCount++; ··· 1429 case NETDEV_REGISTER: 1430 break; 1431 default: 1432 - FC_DBG("Unknown event %ld from netdev netlink\n", event); 1433 } 1434 if (link_possible && !fcoe_link_ok(lp)) 1435 fcoe_ctlr_link_up(&fc->ctlr); ··· 1503 1504 owner = fcoe_netdev_to_module_owner(netdev); 1505 if (owner) { 1506 - printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n", 1507 - module_name(owner), netdev->name); 1508 return try_module_get(owner); 1509 } 1510 return -ENODEV; ··· 1525 1526 owner = fcoe_netdev_to_module_owner(netdev); 1527 if (owner) { 1528 - printk(KERN_DEBUG "fcoe:release driver module %s for %s\n", 1529 - module_name(owner), netdev->name); 1530 module_put(owner); 1531 return 0; 1532 } ··· 1557 } 1558 rc = fcoe_if_destroy(netdev); 1559 if (rc) { 1560 - printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n", 1561 netdev->name); 1562 rc = -EIO; 1563 goto out_putdev; ··· 1596 1597 rc = fcoe_if_create(netdev); 1598 if (rc) { 1599 - printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n", 1600 netdev->name); 1601 fcoe_ethdrv_put(netdev); 1602 rc = -EIO;
··· 45 46 #include "fcoe.h" 47 48 MODULE_AUTHOR("Open-FCoE.org"); 49 MODULE_DESCRIPTION("FCoE"); 50 MODULE_LICENSE("GPL v2"); ··· 305 #ifdef NETIF_F_FCOE_CRC 306 if (netdev->features & NETIF_F_FCOE_CRC) { 307 lp->crc_offload = 1; 308 + FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); 309 } 310 #endif 311 #ifdef NETIF_F_FSO 312 if (netdev->features & NETIF_F_FSO) { 313 lp->seq_offload = 1; 314 lp->lso_max = netdev->gso_max_size; 315 + FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", 316 + lp->lso_max); 317 } 318 #endif 319 if (netdev->fcoe_ddp_xid) { 320 lp->lro_enabled = 1; 321 lp->lro_xid = netdev->fcoe_ddp_xid; 322 + FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", 323 + lp->lro_xid); 324 } 325 skb_queue_head_init(&fc->fcoe_pending_queue); 326 fc->fcoe_pending_queue_active = 0; ··· 407 /* add the new host to the SCSI-ml */ 408 rc = scsi_add_host(lp->host, dev); 409 if (rc) { 410 + FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: " 411 + "error on scsi_add_host\n"); 412 return rc; 413 } 414 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", ··· 448 449 BUG_ON(!netdev); 450 451 + FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); 452 453 lp = fcoe_hostlist_lookup(netdev); 454 if (!lp) ··· 560 561 BUG_ON(!netdev); 562 563 + FCOE_NETDEV_DBG(netdev, "Create Interface\n"); 564 565 lp = fcoe_hostlist_lookup(netdev); 566 if (lp) ··· 570 shost = libfc_host_alloc(&fcoe_shost_template, 571 sizeof(struct fcoe_softc)); 572 if (!shost) { 573 + FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); 574 return -ENOMEM; 575 } 576 lp = shost_priv(shost); ··· 579 /* configure fc_lport, e.g., em */ 580 rc = fcoe_lport_config(lp); 581 if (rc) { 582 + FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " 583 + "interface\n"); 584 goto out_host_put; 585 } 586 ··· 593 /* configure lport network properties */ 594 rc = fcoe_netdev_config(lp, netdev); 595 if (rc) { 596 + FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " 597 + "interface\n"); 598 goto out_netdev_cleanup; 599 } 600 601 /* configure lport scsi host properties */ 602 rc = fcoe_shost_config(lp, shost, &netdev->dev); 603 if (rc) { 604 + FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " 605 + "interface\n"); 606 goto out_netdev_cleanup; 607 } 608 609 /* lport exch manager allocation */ 610 rc = fcoe_em_config(lp); 611 if (rc) { 612 + FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " 613 + "interface\n"); 614 goto out_netdev_cleanup; 615 } 616 617 /* Initialize the library */ 618 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); 619 if (rc) { 620 + FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 621 + "interface\n"); 622 goto out_lp_destroy; 623 } 624 ··· 653 fc_attach_transport(&fcoe_transport_function); 654 655 if (!scsi_transport_fcoe_sw) { 656 + printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); 657 return -ENODEV; 658 } 659 ··· 714 unsigned targ_cpu = smp_processor_id(); 715 #endif /* CONFIG_SMP */ 716 717 + FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); 718 719 /* Prevent any new skbs from being queued for this CPU. */ 720 p = &per_cpu(fcoe_percpu, cpu); ··· 736 p0 = &per_cpu(fcoe_percpu, targ_cpu); 737 spin_lock_bh(&p0->fcoe_rx_list.lock); 738 if (p0->thread) { 739 + FCOE_DBG("Moving frames from CPU %d to CPU %d\n", 740 + cpu, targ_cpu); 741 742 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) 743 __skb_queue_tail(&p0->fcoe_rx_list, skb); ··· 803 switch (action) { 804 case CPU_ONLINE: 805 case CPU_ONLINE_FROZEN: 806 + FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); 807 fcoe_percpu_thread_create(cpu); 808 break; 809 case CPU_DEAD: 810 case CPU_DEAD_FROZEN: 811 + FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); 812 fcoe_percpu_thread_destroy(cpu); 813 break; 814 default: ··· 846 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); 847 lp = fc->ctlr.lp; 848 if (unlikely(lp == NULL)) { 849 + FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); 850 goto err2; 851 } 852 if (!lp->link_up) 853 goto err2; 854 855 + FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p " 856 + "data:%p tail:%p end:%p sum:%d dev:%s", 857 + skb->len, skb->data_len, skb->head, skb->data, 858 + skb_tail_pointer(skb), skb_end_pointer(skb), 859 + skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 860 861 /* check for FCOE packet type */ 862 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 863 + FCOE_NETDEV_DBG(dev, "Wrong FC type frame"); 864 goto err; 865 } 866 ··· 901 * the first CPU now. For non-SMP systems this 902 * will check the same CPU twice. 903 */ 904 + FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread " 905 + "ready for incoming skb- using first online " 906 + "CPU.\n"); 907 908 spin_unlock_bh(&fps->fcoe_rx_list.lock); 909 cpu = first_cpu(cpu_online_map); ··· 1201 fr = fcoe_dev_from_skb(skb); 1202 lp = fr->fr_dev; 1203 if (unlikely(lp == NULL)) { 1204 + FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure"); 1205 kfree_skb(skb); 1206 continue; 1207 } 1208 1209 + FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " 1210 + "head:%p data:%p tail:%p end:%p sum:%d dev:%s", 1211 + skb->len, skb->data_len, 1212 + skb->head, skb->data, skb_tail_pointer(skb), 1213 + skb_end_pointer(skb), skb->csum, 1214 + skb->dev ? skb->dev->name : "<NULL>"); 1215 1216 /* 1217 * Save source MAC address before discarding header. ··· 1233 stats = fc_lport_get_stats(lp); 1234 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1235 if (stats->ErrorFrames < 5) 1236 + printk(KERN_WARNING "fcoe: FCoE version " 1237 "mismatch: The frame has " 1238 "version %x, but the " 1239 "initiator supports version " ··· 1286 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 1287 if (le32_to_cpu(fr_crc(fp)) != 1288 ~crc32(~0, skb->data, fr_len)) { 1289 + if (stats->InvalidCRCCount < 5) 1290 printk(KERN_WARNING "fcoe: dropping " 1291 "frame with CRC error\n"); 1292 stats->InvalidCRCCount++; ··· 1432 case NETDEV_REGISTER: 1433 break; 1434 default: 1435 + FCOE_NETDEV_DBG(real_dev, "Unknown event %ld " 1436 + "from netdev netlink\n", event); 1437 } 1438 if (link_possible && !fcoe_link_ok(lp)) 1439 fcoe_ctlr_link_up(&fc->ctlr); ··· 1505 1506 owner = fcoe_netdev_to_module_owner(netdev); 1507 if (owner) { 1508 + FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n", 1509 + module_name(owner)); 1510 return try_module_get(owner); 1511 } 1512 return -ENODEV; ··· 1527 1528 owner = fcoe_netdev_to_module_owner(netdev); 1529 if (owner) { 1530 + FCOE_NETDEV_DBG(netdev, "Release driver module %s\n", 1531 + module_name(owner)); 1532 module_put(owner); 1533 return 0; 1534 } ··· 1559 } 1560 rc = fcoe_if_destroy(netdev); 1561 if (rc) { 1562 + printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n", 1563 netdev->name); 1564 rc = -EIO; 1565 goto out_putdev; ··· 1598 1599 rc = fcoe_if_create(netdev); 1600 if (rc) { 1601 + printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 1602 netdev->name); 1603 fcoe_ethdrv_put(netdev); 1604 rc = -EIO;
+24
drivers/scsi/fcoe/fcoe.h
··· 40 #define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ 41 #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ 42 43 /* 44 * this percpu struct for fcoe 45 */
··· 40 #define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ 41 #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ 42 43 + unsigned int fcoe_debug_logging; 44 + module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); 45 + MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 46 + 47 + #define FCOE_LOGGING 0x01 /* General logging, not categorized */ 48 + #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ 49 + 50 + #define FCOE_CHECK_LOGGING(LEVEL, CMD) \ 51 + do { \ 52 + if (unlikely(fcoe_debug_logging & LEVEL)) \ 53 + do { \ 54 + CMD; \ 55 + } while (0); \ 56 + } while (0); 57 + 58 + #define FCOE_DBG(fmt, args...) \ 59 + FCOE_CHECK_LOGGING(FCOE_LOGGING, \ 60 + printk(KERN_INFO "fcoe: " fmt, ##args);) 61 + 62 + #define FCOE_NETDEV_DBG(netdev, fmt, args...) \ 63 + FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ 64 + printk(KERN_INFO "fcoe: %s" fmt, \ 65 + netdev->name, ##args);) 66 + 67 /* 68 * this percpu struct for fcoe 69 */
+54 -40
drivers/scsi/fcoe/libfcoe.c
··· 56 57 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; 58 59 - static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */ 60 61 - #define FIP_DBG_LVL(level, fmt, args...) \ 62 do { \ 63 - if (fcoe_ctlr_debug >= (level)) \ 64 - FC_DBG(fmt, ##args); \ 65 - } while (0) 66 67 - #define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args) 68 69 /* 70 * Return non-zero if FCF fcoe_size has been validated. ··· 256 fip->last_link = 1; 257 fip->link = 1; 258 spin_unlock_bh(&fip->lock); 259 - FIP_DBG("%s", "setting AUTO mode.\n"); 260 fc_linkup(fip->lp); 261 fcoe_ctlr_solicit(fip, NULL); 262 } else ··· 627 ((struct fip_mac_desc *)desc)->fd_mac, 628 ETH_ALEN); 629 if (!is_valid_ether_addr(fcf->fcf_mac)) { 630 - FIP_DBG("invalid MAC addr in FIP adv\n"); 631 return -EINVAL; 632 } 633 break; ··· 661 case FIP_DT_LOGO: 662 case FIP_DT_ELP: 663 default: 664 - FIP_DBG("unexpected descriptor type %x in FIP adv\n", 665 - desc->fip_dtype); 666 /* standard says ignore unknown descriptors >= 128 */ 667 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 668 return -EINVAL; ··· 678 return 0; 679 680 len_err: 681 - FIP_DBG("FIP length error in descriptor type %x len %zu\n", 682 - desc->fip_dtype, dlen); 683 return -EINVAL; 684 } 685 ··· 742 } 743 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 744 fcf->time = jiffies; 745 - FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n", 746 - found ? "old" : "new", 747 - fcf->fabric_name, fcf->fc_map, mtu_valid); 748 749 /* 750 * If this advertisement is not solicited and our max receive size ··· 822 ((struct fip_mac_desc *)desc)->fd_mac, 823 ETH_ALEN); 824 if (!is_valid_ether_addr(granted_mac)) { 825 - FIP_DBG("invalid MAC addrs in FIP ELS\n"); 826 goto drop; 827 } 828 break; ··· 841 els_dtype = desc->fip_dtype; 842 break; 843 default: 844 - FIP_DBG("unexpected descriptor type %x " 845 - "in FIP adv\n", desc->fip_dtype); 846 /* standard says ignore unknown descriptors >= 128 */ 847 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 848 goto drop; ··· 883 return; 884 885 len_err: 886 - FIP_DBG("FIP length error in descriptor type %x len %zu\n", 887 - desc->fip_dtype, dlen); 888 drop: 889 kfree_skb(skb); 890 } ··· 910 struct fc_lport *lp = fip->lp; 911 u32 desc_mask; 912 913 - FIP_DBG("Clear Virtual Link received\n"); 914 if (!fcf) 915 return; 916 if (!fcf || !fc_host_port_id(lp->host)) ··· 968 * reset only if all required descriptors were present and valid. 969 */ 970 if (desc_mask) { 971 - FIP_DBG("missing descriptors mask %x\n", desc_mask); 972 } else { 973 - FIP_DBG("performing Clear Virtual Link\n"); 974 fcoe_ctlr_reset(fip, FIP_ST_ENABLED); 975 } 976 } ··· 1018 op = ntohs(fiph->fip_op); 1019 sub = fiph->fip_subcode; 1020 1021 - FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n", 1022 - FIP_VER_DECAPS(fiph->fip_ver), op, sub, 1023 - ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags)); 1024 - 1025 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) 1026 goto drop; 1027 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) ··· 1029 fip->map_dest = 0; 1030 fip->state = FIP_ST_ENABLED; 1031 state = FIP_ST_ENABLED; 1032 - FIP_DBG("using FIP mode\n"); 1033 } 1034 spin_unlock_bh(&fip->lock); 1035 if (state != FIP_ST_ENABLED) ··· 1064 struct fcoe_fcf *best = NULL; 1065 1066 list_for_each_entry(fcf, &fip->fcfs, list) { 1067 - FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n", 1068 - fcf->fabric_name, fcf->vfid, 1069 - fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); 1070 if (!fcoe_ctlr_fcf_usable(fcf)) { 1071 - FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n", 1072 - fcf->fabric_name, fcf->fc_map, 1073 - (fcf->flags & FIP_FL_SOL) ? "" : "in", 1074 - (fcf->flags & FIP_FL_AVAIL) ? "" : "un"); 1075 continue; 1076 } 1077 if (!best) { ··· 1082 if (fcf->fabric_name != best->fabric_name || 1083 fcf->vfid != best->vfid || 1084 fcf->fc_map != best->fc_map) { 1085 - FIP_DBG("conflicting fabric, VFID, or FC-MAP\n"); 1086 return; 1087 } 1088 if (fcf->pri < best->pri) ··· 1127 if (sel != fcf) { 1128 fcf = sel; /* the old FCF may have been freed */ 1129 if (sel) { 1130 - printk(KERN_INFO "host%d: FIP selected " 1131 "Fibre-Channel Forwarder MAC %s\n", 1132 fip->lp->host->host_no, 1133 print_mac(buf, sel->fcf_mac)); ··· 1137 fip->ctlr_ka_time = jiffies + sel->fka_period; 1138 fip->link = 1; 1139 } else { 1140 - printk(KERN_NOTICE "host%d: " 1141 "FIP Fibre-Channel Forwarder timed out. " 1142 "Starting FCF discovery.\n", 1143 fip->lp->host->host_no); ··· 1261 return -EINVAL; 1262 } 1263 fip->state = FIP_ST_NON_FIP; 1264 - FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); 1265 1266 /* 1267 * FLOGI accepted. ··· 1290 memcpy(fip->dest_addr, sa, ETH_ALEN); 1291 fip->map_dest = 0; 1292 if (fip->state == FIP_ST_NON_FIP) 1293 - FIP_DBG("received FLOGI REQ, " 1294 "using non-FIP mode\n"); 1295 fip->state = FIP_ST_NON_FIP; 1296 }
··· 56 57 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; 58 59 + unsigned int libfcoe_debug_logging; 60 + module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); 61 + MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 62 63 + #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ 64 + #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ 65 + 66 + #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ 67 + do { \ 68 + if (unlikely(libfcoe_debug_logging & LEVEL)) \ 69 do { \ 70 + CMD; \ 71 + } while (0); \ 72 + } while (0); 73 74 + #define LIBFCOE_DBG(fmt, args...) \ 75 + LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ 76 + printk(KERN_INFO "libfcoe: " fmt, ##args);) 77 + 78 + #define LIBFCOE_FIP_DBG(fmt, args...) \ 79 + LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ 80 + printk(KERN_INFO "fip: " fmt, ##args);) 81 82 /* 83 * Return non-zero if FCF fcoe_size has been validated. ··· 243 fip->last_link = 1; 244 fip->link = 1; 245 spin_unlock_bh(&fip->lock); 246 + LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); 247 fc_linkup(fip->lp); 248 fcoe_ctlr_solicit(fip, NULL); 249 } else ··· 614 ((struct fip_mac_desc *)desc)->fd_mac, 615 ETH_ALEN); 616 if (!is_valid_ether_addr(fcf->fcf_mac)) { 617 + LIBFCOE_FIP_DBG("Invalid MAC address " 618 + "in FIP adv\n"); 619 return -EINVAL; 620 } 621 break; ··· 647 case FIP_DT_LOGO: 648 case FIP_DT_ELP: 649 default: 650 + LIBFCOE_FIP_DBG("unexpected descriptor type %x " 651 + "in FIP adv\n", desc->fip_dtype); 652 /* standard says ignore unknown descriptors >= 128 */ 653 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 654 return -EINVAL; ··· 664 return 0; 665 666 len_err: 667 + LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", 668 + desc->fip_dtype, dlen); 669 return -EINVAL; 670 } 671 ··· 728 } 729 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 730 fcf->time = jiffies; 731 + if (!found) { 732 + LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n", 733 + fcf->fabric_name, fcf->fc_map, mtu_valid); 734 + } 735 736 /* 737 * If this advertisement is not solicited and our max receive size ··· 807 ((struct fip_mac_desc *)desc)->fd_mac, 808 ETH_ALEN); 809 if (!is_valid_ether_addr(granted_mac)) { 810 + LIBFCOE_FIP_DBG("Invalid MAC address " 811 + "in FIP ELS\n"); 812 goto drop; 813 } 814 break; ··· 825 els_dtype = desc->fip_dtype; 826 break; 827 default: 828 + LIBFCOE_FIP_DBG("unexpected descriptor type %x " 829 + "in FIP adv\n", desc->fip_dtype); 830 /* standard says ignore unknown descriptors >= 128 */ 831 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 832 goto drop; ··· 867 return; 868 869 len_err: 870 + LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", 871 + desc->fip_dtype, dlen); 872 drop: 873 kfree_skb(skb); 874 } ··· 894 struct fc_lport *lp = fip->lp; 895 u32 desc_mask; 896 897 + LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); 898 if (!fcf) 899 return; 900 if (!fcf || !fc_host_port_id(lp->host)) ··· 952 * reset only if all required descriptors were present and valid. 953 */ 954 if (desc_mask) { 955 + LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask); 956 } else { 957 + LIBFCOE_FIP_DBG("performing Clear Virtual Link\n"); 958 fcoe_ctlr_reset(fip, FIP_ST_ENABLED); 959 } 960 } ··· 1002 op = ntohs(fiph->fip_op); 1003 sub = fiph->fip_subcode; 1004 1005 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) 1006 goto drop; 1007 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) ··· 1017 fip->map_dest = 0; 1018 fip->state = FIP_ST_ENABLED; 1019 state = FIP_ST_ENABLED; 1020 + LIBFCOE_FIP_DBG("Using FIP mode\n"); 1021 } 1022 spin_unlock_bh(&fip->lock); 1023 if (state != FIP_ST_ENABLED) ··· 1052 struct fcoe_fcf *best = NULL; 1053 1054 list_for_each_entry(fcf, &fip->fcfs, list) { 1055 + LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x " 1056 + "val %d\n", fcf->fabric_name, fcf->vfid, 1057 + fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); 1058 if (!fcoe_ctlr_fcf_usable(fcf)) { 1059 + LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid " 1060 + "%savailable\n", fcf->fabric_name, 1061 + fcf->fc_map, (fcf->flags & FIP_FL_SOL) 1062 + ? "" : "in", (fcf->flags & FIP_FL_AVAIL) 1063 + ? "" : "un"); 1064 continue; 1065 } 1066 if (!best) { ··· 1069 if (fcf->fabric_name != best->fabric_name || 1070 fcf->vfid != best->vfid || 1071 fcf->fc_map != best->fc_map) { 1072 + LIBFCOE_FIP_DBG("Conflicting fabric, VFID, " 1073 + "or FC-MAP\n"); 1074 return; 1075 } 1076 if (fcf->pri < best->pri) ··· 1113 if (sel != fcf) { 1114 fcf = sel; /* the old FCF may have been freed */ 1115 if (sel) { 1116 + printk(KERN_INFO "libfcoe: host%d: FIP selected " 1117 "Fibre-Channel Forwarder MAC %s\n", 1118 fip->lp->host->host_no, 1119 print_mac(buf, sel->fcf_mac)); ··· 1123 fip->ctlr_ka_time = jiffies + sel->fka_period; 1124 fip->link = 1; 1125 } else { 1126 + printk(KERN_NOTICE "libfcoe: host%d: " 1127 "FIP Fibre-Channel Forwarder timed out. " 1128 "Starting FCF discovery.\n", 1129 fip->lp->host->host_no); ··· 1247 return -EINVAL; 1248 } 1249 fip->state = FIP_ST_NON_FIP; 1250 + LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); 1251 1252 /* 1253 * FLOGI accepted. ··· 1276 memcpy(fip->dest_addr, sa, ETH_ALEN); 1277 fip->map_dest = 0; 1278 if (fip->state == FIP_ST_NON_FIP) 1279 + LIBFCOE_FIP_DBG("received FLOGI REQ, " 1280 "using non-FIP mode\n"); 1281 fip->state = FIP_ST_NON_FIP; 1282 }
+6 -2
drivers/scsi/hosts.c
··· 40 #include "scsi_logging.h" 41 42 43 - static int scsi_host_next_hn; /* host_no for next new host */ 44 45 46 static void scsi_host_cls_release(struct device *dev) ··· 333 334 mutex_init(&shost->scan_mutex); 335 336 - shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */ 337 shost->dma_channel = 0xff; 338 339 /* These three are default values which can be overridden */
··· 40 #include "scsi_logging.h" 41 42 43 + static atomic_t scsi_host_next_hn; /* host_no for next new host */ 44 45 46 static void scsi_host_cls_release(struct device *dev) ··· 333 334 mutex_init(&shost->scan_mutex); 335 336 + /* 337 + * subtract one because we increment first then return, but we need to 338 + * know what the next host number was before increment 339 + */ 340 + shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; 341 shost->dma_channel = 0xff; 342 343 /* These three are default values which can be overridden */
+22 -14
drivers/scsi/ibmvscsi/ibmvfc.c
··· 2254 continue; 2255 if (crq->node_name && tgt->ids.node_name != crq->node_name) 2256 continue; 2257 - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2258 } 2259 - 2260 - ibmvfc_reinit_host(vhost); 2261 break; 2262 case IBMVFC_AE_LINK_DOWN: 2263 case IBMVFC_AE_ADAPTER_FAILED: ··· 2786 2787 spin_lock_irqsave(vhost->host->host_lock, flags); 2788 while (!done) { 2789 - /* Pull all the valid messages off the CRQ */ 2790 - while ((crq = ibmvfc_next_crq(vhost)) != NULL) { 2791 - ibmvfc_handle_crq(crq, vhost); 2792 - crq->valid = 0; 2793 - } 2794 - 2795 /* Pull all the valid messages off the async CRQ */ 2796 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2797 ibmvfc_handle_async(async, vhost); 2798 async->valid = 0; 2799 } 2800 2801 - vio_enable_interrupts(vdev); 2802 - if ((crq = ibmvfc_next_crq(vhost)) != NULL) { 2803 - vio_disable_interrupts(vdev); 2804 ibmvfc_handle_crq(crq, vhost); 2805 crq->valid = 0; 2806 - } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2807 vio_disable_interrupts(vdev); 2808 ibmvfc_handle_async(async, vhost); 2809 async->valid = 0; 2810 } else 2811 done = 1; 2812 } ··· 2930 break; 2931 case IBMVFC_MAD_FAILED: 2932 default: 2933 - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2934 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2935 else 2936 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); ··· 3061 return; 3062 3063 kref_get(&tgt->kref); 3064 evt = ibmvfc_get_event(vhost); 3065 vhost->discovery_threads++; 3066 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
··· 2254 continue; 2255 if (crq->node_name && tgt->ids.node_name != crq->node_name) 2256 continue; 2257 + if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO) 2258 + tgt->logo_rcvd = 1; 2259 + if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) { 2260 + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2261 + ibmvfc_reinit_host(vhost); 2262 + } 2263 } 2264 break; 2265 case IBMVFC_AE_LINK_DOWN: 2266 case IBMVFC_AE_ADAPTER_FAILED: ··· 2783 2784 spin_lock_irqsave(vhost->host->host_lock, flags); 2785 while (!done) { 2786 /* Pull all the valid messages off the async CRQ */ 2787 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2788 ibmvfc_handle_async(async, vhost); 2789 async->valid = 0; 2790 } 2791 2792 + /* Pull all the valid messages off the CRQ */ 2793 + while ((crq = ibmvfc_next_crq(vhost)) != NULL) { 2794 ibmvfc_handle_crq(crq, vhost); 2795 crq->valid = 0; 2796 + } 2797 + 2798 + vio_enable_interrupts(vdev); 2799 + if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2800 vio_disable_interrupts(vdev); 2801 ibmvfc_handle_async(async, vhost); 2802 async->valid = 0; 2803 + } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { 2804 + vio_disable_interrupts(vdev); 2805 + ibmvfc_handle_crq(crq, vhost); 2806 + crq->valid = 0; 2807 } else 2808 done = 1; 2809 } ··· 2927 break; 2928 case IBMVFC_MAD_FAILED: 2929 default: 2930 + if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED) 2931 + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 2932 + else if (tgt->logo_rcvd) 2933 + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 2934 + else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2935 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2936 else 2937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); ··· 3054 return; 3055 3056 kref_get(&tgt->kref); 3057 + tgt->logo_rcvd = 0; 3058 evt = ibmvfc_get_event(vhost); 3059 vhost->discovery_threads++; 3060 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+1
drivers/scsi/ibmvscsi/ibmvfc.h
··· 605 int need_login; 606 int add_rport; 607 int init_retries; 608 u32 cancel_key; 609 struct ibmvfc_service_parms service_parms; 610 struct ibmvfc_service_parms service_parms_change;
··· 605 int need_login; 606 int add_rport; 607 int init_retries; 608 + int logo_rcvd; 609 u32 cancel_key; 610 struct ibmvfc_service_parms service_parms; 611 struct ibmvfc_service_parms service_parms_change;
+117 -21
drivers/scsi/ipr.c
··· 131 }; 132 133 static const struct ipr_chip_t ipr_chip[] = { 134 - { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, 135 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, 136 - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, 137 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, 138 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] }, 139 - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, 140 - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } 141 }; 142 143 static int ipr_max_bus_speeds [] = { ··· 7367 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 7368 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 7369 init_waitqueue_head(&ioa_cfg->reset_wait_q); 7370 ioa_cfg->sdt_state = INACTIVE; 7371 if (ipr_enable_cache) 7372 ioa_cfg->cache_state = CACHE_ENABLED; ··· 7399 } 7400 7401 /** 7402 - * ipr_get_chip_cfg - Find adapter chip configuration 7403 * @dev_id: PCI device id struct 7404 * 7405 * Return value: 7406 - * ptr to chip config on success / NULL on failure 7407 **/ 7408 - static const struct ipr_chip_cfg_t * __devinit 7409 - ipr_get_chip_cfg(const struct pci_device_id *dev_id) 7410 { 7411 int i; 7412 7413 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 7414 if (ipr_chip[i].vendor == dev_id->vendor && 7415 ipr_chip[i].device == dev_id->device) 7416 - return ipr_chip[i].cfg; 7417 return NULL; 7418 } 7419 7420 /** ··· 7525 goto out; 7526 } 7527 7528 - if (!(rc = pci_enable_msi(pdev))) 7529 - dev_info(&pdev->dev, "MSI enabled\n"); 7530 - else if (ipr_debug) 7531 - dev_info(&pdev->dev, "Cannot enable MSI\n"); 7532 - 7533 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 7534 7535 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); ··· 7540 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 7541 sata_port_info.flags, &ipr_sata_ops); 7542 7543 - ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); 7544 7545 - if (!ioa_cfg->chip_cfg) { 7546 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 7547 dev_id->vendor, dev_id->device); 7548 goto out_scsi_host_put; 7549 } 7550 7551 if (ipr_transop_timeout) 7552 ioa_cfg->transop_timeout = ipr_transop_timeout; ··· 7600 goto cleanup_nomem; 7601 } 7602 7603 /* Save away PCI config space for use following IOA reset */ 7604 rc = pci_save_state(pdev); 7605 ··· 7649 ioa_cfg->ioa_unit_checked = 1; 7650 7651 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7652 - rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); 7653 7654 if (rc) { 7655 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", ··· 7678 ipr_free_mem(ioa_cfg); 7679 cleanup_nomem: 7680 iounmap(ipr_regs); 7681 out_release_regions: 7682 pci_release_regions(pdev); 7683 out_scsi_host_put: 7684 scsi_host_put(host); 7685 out_disable: 7686 - pci_disable_msi(pdev); 7687 pci_disable_device(pdev); 7688 goto out; 7689 }
··· 131 }; 132 133 static const struct ipr_chip_t ipr_chip[] = { 134 + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 135 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 136 + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 137 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 138 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, 139 + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, 140 + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } 141 }; 142 143 static int ipr_max_bus_speeds [] = { ··· 7367 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 7368 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 7369 init_waitqueue_head(&ioa_cfg->reset_wait_q); 7370 + init_waitqueue_head(&ioa_cfg->msi_wait_q); 7371 ioa_cfg->sdt_state = INACTIVE; 7372 if (ipr_enable_cache) 7373 ioa_cfg->cache_state = CACHE_ENABLED; ··· 7398 } 7399 7400 /** 7401 + * ipr_get_chip_info - Find adapter chip information 7402 * @dev_id: PCI device id struct 7403 * 7404 * Return value: 7405 + * ptr to chip information on success / NULL on failure 7406 **/ 7407 + static const struct ipr_chip_t * __devinit 7408 + ipr_get_chip_info(const struct pci_device_id *dev_id) 7409 { 7410 int i; 7411 7412 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 7413 if (ipr_chip[i].vendor == dev_id->vendor && 7414 ipr_chip[i].device == dev_id->device) 7415 + return &ipr_chip[i]; 7416 return NULL; 7417 + } 7418 + 7419 + /** 7420 + * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 7421 + * @pdev: PCI device struct 7422 + * 7423 + * Description: Simply set the msi_received flag to 1 indicating that 7424 + * Message Signaled Interrupts are supported. 7425 + * 7426 + * Return value: 7427 + * 0 on success / non-zero on failure 7428 + **/ 7429 + static irqreturn_t __devinit ipr_test_intr(int irq, void *devp) 7430 + { 7431 + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 7432 + unsigned long lock_flags = 0; 7433 + irqreturn_t rc = IRQ_HANDLED; 7434 + 7435 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7436 + 7437 + ioa_cfg->msi_received = 1; 7438 + wake_up(&ioa_cfg->msi_wait_q); 7439 + 7440 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7441 + return rc; 7442 + } 7443 + 7444 + /** 7445 + * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 7446 + * @pdev: PCI device struct 7447 + * 7448 + * Description: The return value from pci_enable_msi() can not always be 7449 + * trusted. This routine sets up and initiates a test interrupt to determine 7450 + * if the interrupt is received via the ipr_test_intr() service routine. 7451 + * If the tests fails, the driver will fall back to LSI. 7452 + * 7453 + * Return value: 7454 + * 0 on success / non-zero on failure 7455 + **/ 7456 + static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, 7457 + struct pci_dev *pdev) 7458 + { 7459 + int rc; 7460 + volatile u32 int_reg; 7461 + unsigned long lock_flags = 0; 7462 + 7463 + ENTER; 7464 + 7465 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7466 + init_waitqueue_head(&ioa_cfg->msi_wait_q); 7467 + ioa_cfg->msi_received = 0; 7468 + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7469 + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); 7470 + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7471 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7472 + 7473 + rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 7474 + if (rc) { 7475 + dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 7476 + return rc; 7477 + } else if (ipr_debug) 7478 + dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 7479 + 7480 + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); 7481 + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 7482 + wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 7483 + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7484 + 7485 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7486 + if (!ioa_cfg->msi_received) { 7487 + /* MSI test failed */ 7488 + dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 7489 + rc = -EOPNOTSUPP; 7490 + } else if (ipr_debug) 7491 + dev_info(&pdev->dev, "MSI test succeeded.\n"); 7492 + 7493 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7494 + 7495 + free_irq(pdev->irq, ioa_cfg); 7496 + 7497 + LEAVE; 7498 + 7499 + return rc; 7500 } 7501 7502 /** ··· 7441 goto out; 7442 } 7443 7444 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 7445 7446 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); ··· 7461 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 7462 sata_port_info.flags, &ipr_sata_ops); 7463 7464 + ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 7465 7466 + if (!ioa_cfg->ipr_chip) { 7467 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 7468 dev_id->vendor, dev_id->device); 7469 goto out_scsi_host_put; 7470 } 7471 + 7472 + ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 7473 7474 if (ipr_transop_timeout) 7475 ioa_cfg->transop_timeout = ipr_transop_timeout; ··· 7519 goto cleanup_nomem; 7520 } 7521 7522 + /* Enable MSI style interrupts if they are supported. */ 7523 + if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) { 7524 + rc = ipr_test_msi(ioa_cfg, pdev); 7525 + if (rc == -EOPNOTSUPP) 7526 + pci_disable_msi(pdev); 7527 + else if (rc) 7528 + goto out_msi_disable; 7529 + else 7530 + dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq); 7531 + } else if (ipr_debug) 7532 + dev_info(&pdev->dev, "Cannot enable MSI.\n"); 7533 + 7534 /* Save away PCI config space for use following IOA reset */ 7535 rc = pci_save_state(pdev); 7536 ··· 7556 ioa_cfg->ioa_unit_checked = 1; 7557 7558 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7559 + rc = request_irq(pdev->irq, ipr_isr, 7560 + ioa_cfg->msi_received ? 0 : IRQF_SHARED, 7561 + IPR_NAME, ioa_cfg); 7562 7563 if (rc) { 7564 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", ··· 7583 ipr_free_mem(ioa_cfg); 7584 cleanup_nomem: 7585 iounmap(ipr_regs); 7586 + out_msi_disable: 7587 + pci_disable_msi(pdev); 7588 out_release_regions: 7589 pci_release_regions(pdev); 7590 out_scsi_host_put: 7591 scsi_host_put(host); 7592 out_disable: 7593 pci_disable_device(pdev); 7594 goto out; 7595 }
+8 -2
drivers/scsi/ipr.h
··· 37 /* 38 * Literals 39 */ 40 - #define IPR_DRIVER_VERSION "2.4.2" 41 - #define IPR_DRIVER_DATE "(January 21, 2009)" 42 43 /* 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding ··· 1025 struct ipr_chip_t { 1026 u16 vendor; 1027 u16 device; 1028 const struct ipr_chip_cfg_t *cfg; 1029 }; 1030 ··· 1097 u8 needs_hard_reset:1; 1098 u8 dual_raid:1; 1099 u8 needs_warm_reset:1; 1100 1101 u8 revid; 1102 ··· 1163 1164 unsigned int transop_timeout; 1165 const struct ipr_chip_cfg_t *chip_cfg; 1166 1167 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ 1168 unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ ··· 1184 struct work_struct work_q; 1185 1186 wait_queue_head_t reset_wait_q; 1187 1188 struct ipr_dump *dump; 1189 enum ipr_sdt_state sdt_state;
··· 37 /* 38 * Literals 39 */ 40 + #define IPR_DRIVER_VERSION "2.4.3" 41 + #define IPR_DRIVER_DATE "(June 10, 2009)" 42 43 /* 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding ··· 1025 struct ipr_chip_t { 1026 u16 vendor; 1027 u16 device; 1028 + u16 intr_type; 1029 + #define IPR_USE_LSI 0x00 1030 + #define IPR_USE_MSI 0x01 1031 const struct ipr_chip_cfg_t *cfg; 1032 }; 1033 ··· 1094 u8 needs_hard_reset:1; 1095 u8 dual_raid:1; 1096 u8 needs_warm_reset:1; 1097 + u8 msi_received:1; 1098 1099 u8 revid; 1100 ··· 1159 1160 unsigned int transop_timeout; 1161 const struct ipr_chip_cfg_t *chip_cfg; 1162 + const struct ipr_chip_t *ipr_chip; 1163 1164 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ 1165 unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ ··· 1179 struct work_struct work_q; 1180 1181 wait_queue_head_t reset_wait_q; 1182 + wait_queue_head_t msi_wait_q; 1183 1184 struct ipr_dump *dump; 1185 enum ipr_sdt_state sdt_state;
+9 -5
drivers/scsi/iscsi_tcp.c
··· 253 254 if (r < 0) { 255 iscsi_tcp_segment_unmap(segment); 256 - if (copied || r == -EAGAIN) 257 - break; 258 return r; 259 } 260 copied += r; ··· 273 274 while (1) { 275 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); 276 - if (rc < 0) { 277 rc = ISCSI_ERR_XMIT_FAILED; 278 goto error; 279 - } 280 - if (rc == 0) 281 break; 282 283 consumed += rc;
··· 253 254 if (r < 0) { 255 iscsi_tcp_segment_unmap(segment); 256 return r; 257 } 258 copied += r; ··· 275 276 while (1) { 277 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); 278 + /* 279 + * We may not have been able to send data because the conn 280 + * is getting stopped. libiscsi will know so propogate err 281 + * for it to do the right thing. 282 + */ 283 + if (rc == -EAGAIN) 284 + return rc; 285 + else if (rc < 0) { 286 rc = ISCSI_ERR_XMIT_FAILED; 287 goto error; 288 + } else if (rc == 0) 289 break; 290 291 consumed += rc;
+35 -48
drivers/scsi/libfc/fc_disc.c
··· 45 46 #define FC_DISC_DELAY 3 47 48 - static int fc_disc_debug; 49 - 50 - #define FC_DEBUG_DISC(fmt...) \ 51 - do { \ 52 - if (fc_disc_debug) \ 53 - FC_DBG(fmt); \ 54 - } while (0) 55 - 56 static void fc_disc_gpn_ft_req(struct fc_disc *); 57 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); 58 static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, ··· 129 struct fc_rport_libfc_priv *rdata = rport->dd_data; 130 struct fc_disc *disc = &lport->disc; 131 132 - FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event, 133 - rport->port_id); 134 135 switch (event) { 136 case RPORT_EV_CREATED: ··· 183 184 lport = disc->lport; 185 186 - FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n", 187 - fc_host_port_id(lport->host)); 188 189 /* make sure the frame contains an RSCN message */ 190 rp = fc_frame_payload_get(fp, sizeof(*rp)); ··· 216 */ 217 switch (fmt) { 218 case ELS_ADDR_FMT_PORT: 219 - FC_DEBUG_DISC("Port address format for port (%6x)\n", 220 - ntoh24(pp->rscn_fid)); 221 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 222 if (!dp) { 223 redisc = 1; ··· 234 case ELS_ADDR_FMT_DOM: 235 case ELS_ADDR_FMT_FAB: 236 default: 237 - FC_DEBUG_DISC("Address format is (%d)\n", fmt); 238 redisc = 1; 239 break; 240 } 241 } 242 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 243 if (redisc) { 244 - FC_DEBUG_DISC("RSCN received: rediscovering\n"); 245 fc_disc_restart(disc); 246 } else { 247 - FC_DEBUG_DISC("RSCN received: not rediscovering. " 248 - "redisc %d state %d in_prog %d\n", 249 - redisc, lport->state, disc->pending); 250 list_for_each_entry_safe(dp, next, &disc_ports, peers) { 251 list_del(&dp->peers); 252 rport = lport->tt.rport_lookup(lport, dp->ids.port_id); ··· 261 fc_frame_free(fp); 262 return; 263 reject: 264 - FC_DEBUG_DISC("Received a bad RSCN frame\n"); 265 rjt_data.fp = NULL; 266 rjt_data.reason = ELS_RJT_LOGIC; 267 rjt_data.explan = ELS_EXPL_NONE; ··· 293 mutex_unlock(&disc->disc_mutex); 294 break; 295 default: 296 - FC_DBG("Received an unsupported request. opcode (%x)\n", op); 297 break; 298 } 299 } ··· 312 struct fc_rport_libfc_priv *rdata, *next; 313 struct fc_lport *lport = disc->lport; 314 315 - FC_DEBUG_DISC("Restarting discovery for port (%6x)\n", 316 - fc_host_port_id(lport->host)); 317 318 list_for_each_entry_safe(rdata, next, &disc->rports, peers) { 319 rport = PRIV_TO_RPORT(rdata); 320 - FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id); 321 list_del(&rdata->peers); 322 lport->tt.rport_logoff(rport); 323 } ··· 475 struct fc_lport *lport = disc->lport; 476 enum fc_disc_event event; 477 478 - FC_DEBUG_DISC("Discovery complete for port (%6x)\n", 479 - fc_host_port_id(lport->host)); 480 481 event = disc->event; 482 disc->event = DISC_EV_NONE; ··· 499 { 500 struct fc_lport *lport = disc->lport; 501 unsigned long delay = 0; 502 - if (fc_disc_debug) 503 - FC_DBG("Error %ld, retries %d/%d\n", 504 - PTR_ERR(fp), disc->retry_count, 505 - FC_DISC_RETRY_LIMIT); 506 507 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 508 /* ··· 638 &disc->rogue_rports); 639 lport->tt.rport_login(rport); 640 } else 641 - FC_DBG("Failed to allocate memory for " 642 - "the newly discovered port (%6x)\n", 643 - dp.ids.port_id); 644 } 645 646 if (np->fp_flags & FC_NS_FID_LAST) { ··· 660 */ 661 if (error == 0 && len > 0 && len < sizeof(*np)) { 662 if (np != &disc->partial_buf) { 663 - FC_DEBUG_DISC("Partial buffer remains " 664 - "for discovery by (%6x)\n", 665 - fc_host_port_id(lport->host)); 666 memcpy(&disc->partial_buf, np, len); 667 } 668 disc->buf_len = (unsigned char) len; ··· 709 int error; 710 711 mutex_lock(&disc->disc_mutex); 712 - FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n", 713 - fc_host_port_id(disc->lport->host)); 714 715 if (IS_ERR(fp)) { 716 fc_disc_error(disc, fp); ··· 725 disc->seq_count == 0) { 726 cp = fc_frame_payload_get(fp, sizeof(*cp)); 727 if (!cp) { 728 - FC_DBG("GPN_FT response too short, len %d\n", 729 - fr_len(fp)); 730 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { 731 732 /* Accepted, parse the response. */ 733 buf = cp + 1; 734 len -= sizeof(*cp); 735 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { 736 - FC_DBG("GPN_FT rejected reason %x exp %x " 737 - "(check zoning)\n", cp->ct_reason, 738 - cp->ct_explan); 739 disc->event = DISC_EV_FAILED; 740 fc_disc_done(disc); 741 } else { 742 - FC_DBG("GPN_FT unexpected response code %x\n", 743 - ntohs(cp->ct_cmd)); 744 } 745 } else if (fr_sof(fp) == FC_SOF_N3 && 746 seq_cnt == disc->seq_count) { 747 buf = fh + 1; 748 } else { 749 - FC_DBG("GPN_FT unexpected frame - out of sequence? " 750 - "seq_cnt %x expected %x sof %x eof %x\n", 751 - seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); 752 } 753 if (buf) { 754 error = fc_disc_gpn_ft_parse(disc, buf, len);
··· 45 46 #define FC_DISC_DELAY 3 47 48 static void fc_disc_gpn_ft_req(struct fc_disc *); 49 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); 50 static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, ··· 137 struct fc_rport_libfc_priv *rdata = rport->dd_data; 138 struct fc_disc *disc = &lport->disc; 139 140 + FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, 141 + rport->port_id); 142 143 switch (event) { 144 case RPORT_EV_CREATED: ··· 191 192 lport = disc->lport; 193 194 + FC_DISC_DBG(disc, "Received an RSCN event\n"); 195 196 /* make sure the frame contains an RSCN message */ 197 rp = fc_frame_payload_get(fp, sizeof(*rp)); ··· 225 */ 226 switch (fmt) { 227 case ELS_ADDR_FMT_PORT: 228 + FC_DISC_DBG(disc, "Port address format for port " 229 + "(%6x)\n", ntoh24(pp->rscn_fid)); 230 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 231 if (!dp) { 232 redisc = 1; ··· 243 case ELS_ADDR_FMT_DOM: 244 case ELS_ADDR_FMT_FAB: 245 default: 246 + FC_DISC_DBG(disc, "Address format is (%d)\n", fmt); 247 redisc = 1; 248 break; 249 } 250 } 251 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 252 if (redisc) { 253 + FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); 254 fc_disc_restart(disc); 255 } else { 256 + FC_DISC_DBG(disc, "RSCN received: not rediscovering. " 257 + "redisc %d state %d in_prog %d\n", 258 + redisc, lport->state, disc->pending); 259 list_for_each_entry_safe(dp, next, &disc_ports, peers) { 260 list_del(&dp->peers); 261 rport = lport->tt.rport_lookup(lport, dp->ids.port_id); ··· 270 fc_frame_free(fp); 271 return; 272 reject: 273 + FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); 274 rjt_data.fp = NULL; 275 rjt_data.reason = ELS_RJT_LOGIC; 276 rjt_data.explan = ELS_EXPL_NONE; ··· 302 mutex_unlock(&disc->disc_mutex); 303 break; 304 default: 305 + FC_DISC_DBG(disc, "Received an unsupported request, " 306 + "the opcode is (%x)\n", op); 307 break; 308 } 309 } ··· 320 struct fc_rport_libfc_priv *rdata, *next; 321 struct fc_lport *lport = disc->lport; 322 323 + FC_DISC_DBG(disc, "Restarting discovery\n"); 324 325 list_for_each_entry_safe(rdata, next, &disc->rports, peers) { 326 rport = PRIV_TO_RPORT(rdata); 327 list_del(&rdata->peers); 328 lport->tt.rport_logoff(rport); 329 } ··· 485 struct fc_lport *lport = disc->lport; 486 enum fc_disc_event event; 487 488 + FC_DISC_DBG(disc, "Discovery complete\n"); 489 490 event = disc->event; 491 disc->event = DISC_EV_NONE; ··· 510 { 511 struct fc_lport *lport = disc->lport; 512 unsigned long delay = 0; 513 + 514 + FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n", 515 + PTR_ERR(fp), disc->retry_count, 516 + FC_DISC_RETRY_LIMIT); 517 518 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 519 /* ··· 649 &disc->rogue_rports); 650 lport->tt.rport_login(rport); 651 } else 652 + printk(KERN_WARNING "libfc: Failed to allocate " 653 + "memory for the newly discovered port " 654 + "(%6x)\n", dp.ids.port_id); 655 } 656 657 if (np->fp_flags & FC_NS_FID_LAST) { ··· 671 */ 672 if (error == 0 && len > 0 && len < sizeof(*np)) { 673 if (np != &disc->partial_buf) { 674 + FC_DISC_DBG(disc, "Partial buffer remains " 675 + "for discovery\n"); 676 memcpy(&disc->partial_buf, np, len); 677 } 678 disc->buf_len = (unsigned char) len; ··· 721 int error; 722 723 mutex_lock(&disc->disc_mutex); 724 + FC_DISC_DBG(disc, "Received a GPN_FT response\n"); 725 726 if (IS_ERR(fp)) { 727 fc_disc_error(disc, fp); ··· 738 disc->seq_count == 0) { 739 cp = fc_frame_payload_get(fp, sizeof(*cp)); 740 if (!cp) { 741 + FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", 742 + fr_len(fp)); 743 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { 744 745 /* Accepted, parse the response. */ 746 buf = cp + 1; 747 len -= sizeof(*cp); 748 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { 749 + FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " 750 + "(check zoning)\n", cp->ct_reason, 751 + cp->ct_explan); 752 disc->event = DISC_EV_FAILED; 753 fc_disc_done(disc); 754 } else { 755 + FC_DISC_DBG(disc, "GPN_FT unexpected response code " 756 + "%x\n", ntohs(cp->ct_cmd)); 757 } 758 } else if (fr_sof(fp) == FC_SOF_N3 && 759 seq_cnt == disc->seq_count) { 760 buf = fh + 1; 761 } else { 762 + FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " 763 + "seq_cnt %x expected %x sof %x eof %x\n", 764 + seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); 765 } 766 if (buf) { 767 error = fc_disc_gpn_ft_parse(disc, buf, len);
+25 -33
drivers/scsi/libfc/fc_exch.c
··· 32 #include <scsi/libfc.h> 33 #include <scsi/fc_encode.h> 34 35 - /* 36 - * fc_exch_debug can be set in debugger or at compile time to get more logs. 37 - */ 38 - static int fc_exch_debug; 39 - 40 - #define FC_DEBUG_EXCH(fmt...) \ 41 - do { \ 42 - if (fc_exch_debug) \ 43 - FC_DBG(fmt); \ 44 - } while (0) 45 - 46 - static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ 47 48 /* 49 * Structure and function definitions for managing Fibre Channel Exchanges ··· 322 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 323 return; 324 325 - FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n", 326 - ep->xid); 327 if (schedule_delayed_work(&ep->timeout_work, 328 msecs_to_jiffies(timer_msec))) 329 fc_exch_hold(ep); /* hold for timer */ ··· 534 /* alloc a new xid */ 535 xid = fc_em_alloc_xid(mp, fp); 536 if (!xid) { 537 - printk(KERN_ERR "fc_em_alloc_xid() failed\n"); 538 goto err; 539 } 540 } ··· 809 struct fc_exch *ep = fc_seq_exch(sp); 810 811 sp = fc_seq_alloc(ep, ep->seq_id++); 812 - FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n", 813 - ep->xid, ep->f_ctl, sp->id); 814 return sp; 815 } 816 /* ··· 890 fc_exch_els_rec(sp, els_data->fp); 891 break; 892 default: 893 - FC_DBG("Invalid ELS CMD:%x\n", els_cmd); 894 } 895 } 896 EXPORT_SYMBOL(fc_seq_els_rsp_send); ··· 1123 lp->tt.lport_recv(lp, sp, fp); 1124 fc_exch_release(ep); /* release from lookup */ 1125 } else { 1126 - FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject); 1127 fc_frame_free(fp); 1128 } 1129 } ··· 1231 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ 1232 if (!sp) { 1233 atomic_inc(&mp->stats.xid_not_found); 1234 - FC_DEBUG_EXCH("seq lookup failed\n"); 1235 } else { 1236 atomic_inc(&mp->stats.non_bls_resp); 1237 - FC_DEBUG_EXCH("non-BLS response to sequence"); 1238 } 1239 fc_frame_free(fp); 1240 } ··· 1255 int rc = 1, has_rec = 0; 1256 1257 fh = fc_frame_header_get(fp); 1258 - FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n", 1259 - fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); 1260 1261 if (cancel_delayed_work_sync(&ep->timeout_work)) 1262 fc_exch_release(ep); /* release from pending timer hold */ ··· 1348 case FC_RCTL_ACK_0: 1349 break; 1350 default: 1351 - FC_DEBUG_EXCH("BLS rctl %x - %s received", 1352 - fh->fh_r_ctl, 1353 - fc_exch_rctl_name(fh->fh_r_ctl)); 1354 break; 1355 } 1356 fc_frame_free(fp); ··· 1588 1589 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) 1590 goto cleanup; 1591 - FC_DBG("Cannot process RRQ, because of frame error %d\n", err); 1592 return; 1593 } 1594 ··· 1598 1599 switch (op) { 1600 case ELS_LS_RJT: 1601 - FC_DBG("LS_RJT for RRQ"); 1602 /* fall through */ 1603 case ELS_LS_ACC: 1604 goto cleanup; 1605 default: 1606 - FC_DBG("unexpected response op %x for RRQ", op); 1607 return; 1608 } 1609 ··· 1731 size_t len; 1732 1733 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { 1734 - FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n", 1735 - min_xid, max_xid); 1736 return NULL; 1737 } 1738 ··· 1869 1870 /* lport lock ? */ 1871 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { 1872 - FC_DBG("fc_lport or EM is not allocated and configured"); 1873 fc_frame_free(fp); 1874 return; 1875 } ··· 1896 fc_exch_recv_req(lp, mp, fp); 1897 break; 1898 default: 1899 - FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp)); 1900 fc_frame_free(fp); 1901 break; 1902 }
··· 32 #include <scsi/libfc.h> 33 #include <scsi/fc_encode.h> 34 35 + static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ 36 37 /* 38 * Structure and function definitions for managing Fibre Channel Exchanges ··· 333 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 334 return; 335 336 + FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n"); 337 + 338 if (schedule_delayed_work(&ep->timeout_work, 339 msecs_to_jiffies(timer_msec))) 340 fc_exch_hold(ep); /* hold for timer */ ··· 545 /* alloc a new xid */ 546 xid = fc_em_alloc_xid(mp, fp); 547 if (!xid) { 548 + printk(KERN_WARNING "libfc: Failed to allocate an exhange\n"); 549 goto err; 550 } 551 } ··· 820 struct fc_exch *ep = fc_seq_exch(sp); 821 822 sp = fc_seq_alloc(ep, ep->seq_id++); 823 + FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", 824 + ep->f_ctl, sp->id); 825 return sp; 826 } 827 /* ··· 901 fc_exch_els_rec(sp, els_data->fp); 902 break; 903 default: 904 + FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); 905 } 906 } 907 EXPORT_SYMBOL(fc_seq_els_rsp_send); ··· 1134 lp->tt.lport_recv(lp, sp, fp); 1135 fc_exch_release(ep); /* release from lookup */ 1136 } else { 1137 + FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject); 1138 fc_frame_free(fp); 1139 } 1140 } ··· 1242 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ 1243 if (!sp) { 1244 atomic_inc(&mp->stats.xid_not_found); 1245 + FC_EM_DBG(mp, "seq lookup failed\n"); 1246 } else { 1247 atomic_inc(&mp->stats.non_bls_resp); 1248 + FC_EM_DBG(mp, "non-BLS response to sequence"); 1249 } 1250 fc_frame_free(fp); 1251 } ··· 1266 int rc = 1, has_rec = 0; 1267 1268 fh = fc_frame_header_get(fp); 1269 + FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, 1270 + fc_exch_rctl_name(fh->fh_r_ctl)); 1271 1272 if (cancel_delayed_work_sync(&ep->timeout_work)) 1273 fc_exch_release(ep); /* release from pending timer hold */ ··· 1359 case FC_RCTL_ACK_0: 1360 break; 1361 default: 1362 + FC_EXCH_DBG(ep, "BLS rctl %x - %s received", 1363 + fh->fh_r_ctl, 1364 + fc_exch_rctl_name(fh->fh_r_ctl)); 1365 break; 1366 } 1367 fc_frame_free(fp); ··· 1599 1600 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) 1601 goto cleanup; 1602 + FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, " 1603 + "frame error %d\n", err); 1604 return; 1605 } 1606 ··· 1608 1609 switch (op) { 1610 case ELS_LS_RJT: 1611 + FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); 1612 /* fall through */ 1613 case ELS_LS_ACC: 1614 goto cleanup; 1615 default: 1616 + FC_EXCH_DBG(aborted_ep, "unexpected response op %x " 1617 + "for RRQ", op); 1618 return; 1619 } 1620 ··· 1740 size_t len; 1741 1742 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { 1743 + FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", 1744 + min_xid, max_xid); 1745 return NULL; 1746 } 1747 ··· 1878 1879 /* lport lock ? */ 1880 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { 1881 + FC_LPORT_DBG(lp, "Receiving frames for an lport that " 1882 + "has not been initialized correctly\n"); 1883 fc_frame_free(fp); 1884 return; 1885 } ··· 1904 fc_exch_recv_req(lp, mp, fp); 1905 break; 1906 default: 1907 + FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp)); 1908 fc_frame_free(fp); 1909 break; 1910 }
+50 -47
drivers/scsi/libfc/fc_fcp.c
··· 43 MODULE_DESCRIPTION("libfc"); 44 MODULE_LICENSE("GPL v2"); 45 46 - static int fc_fcp_debug; 47 - 48 - #define FC_DEBUG_FCP(fmt...) \ 49 - do { \ 50 - if (fc_fcp_debug) \ 51 - FC_DBG(fmt); \ 52 - } while (0) 53 54 static struct kmem_cache *scsi_pkt_cachep; 55 ··· 343 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 344 fc_frame_crc_check(fp)) 345 goto crc_err; 346 - FC_DEBUG_FCP("data received past end. len %zx offset %zx " 347 - "data_len %x\n", len, offset, fsp->data_len); 348 fc_fcp_retry_cmd(fsp); 349 return; 350 } ··· 407 stats->ErrorFrames++; 408 /* FIXME - per cpu count, not total count! */ 409 if (stats->InvalidCRCCount++ < 5) 410 - printk(KERN_WARNING "CRC error on data frame for port (%6x)\n", 411 fc_host_port_id(lp->host)); 412 /* 413 * Assume the frame is total garbage. ··· 472 WARN_ON(seq_blen <= 0); 473 if (unlikely(offset + seq_blen > fsp->data_len)) { 474 /* this should never happen */ 475 - FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", 476 - seq_blen, offset); 477 fc_fcp_send_abort(fsp); 478 return 0; 479 } else if (offset != fsp->xfer_len) { 480 /* Out of Order Data Request - no problem, but unexpected. */ 481 - FC_DEBUG_FCP("xfer-ready non-contiguous. " 482 - "seq_blen %zx offset %zx\n", seq_blen, offset); 483 } 484 485 /* ··· 490 t_blen = fsp->max_payload; 491 if (lp->seq_offload) { 492 t_blen = min(seq_blen, (size_t)lp->lso_max); 493 - FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 494 fsp, seq_blen, lp->lso_max, t_blen); 495 } 496 ··· 691 if (!can_queue) 692 can_queue = 1; 693 lp->host->can_queue = can_queue; 694 - shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n" 695 "Reducing can_queue to %d.\n", can_queue); 696 done: 697 spin_unlock_irqrestore(lp->host->host_lock, flags); ··· 765 766 fc_fcp_resp(fsp, fp); 767 } else { 768 - FC_DBG("unexpected frame. r_ctl %x\n", r_ctl); 769 } 770 unlock: 771 fc_fcp_unlock_pkt(fsp); ··· 874 return; 875 } 876 fsp->status_code = FC_DATA_OVRRUN; 877 - FC_DBG("tgt %6x xfer len %zx greater than expected len %x. " 878 - "data len %x\n", 879 - fsp->rport->port_id, 880 - fsp->xfer_len, expected_len, fsp->data_len); 881 } 882 fc_fcp_complete_locked(fsp); 883 return; 884 885 len_err: 886 - FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n", 887 - flags, fr_len(fp), respl, snsl); 888 err: 889 fsp->status_code = FC_ERROR; 890 fc_fcp_complete_locked(fsp); ··· 1104 if (fc_fcp_lock_pkt(fsp)) 1105 return; 1106 1107 - switch (error) { 1108 - case -FC_EX_CLOSED: 1109 fc_fcp_retry_cmd(fsp); 1110 goto unlock; 1111 - default: 1112 - FC_DBG("unknown error %ld\n", PTR_ERR(fp)); 1113 } 1114 /* 1115 * clear abort pending, because the lower layer 1116 * decided to force completion. ··· 1140 fsp->wait_for_comp = 0; 1141 1142 if (!rc) { 1143 - FC_DBG("target abort cmd failed\n"); 1144 rc = FAILED; 1145 } else if (fsp->state & FC_SRB_ABORTED) { 1146 - FC_DBG("target abort cmd passed\n"); 1147 rc = SUCCESS; 1148 fc_fcp_complete_locked(fsp); 1149 } ··· 1208 spin_unlock_bh(&fsp->scsi_pkt_lock); 1209 1210 if (!rc) { 1211 - FC_DBG("lun reset failed\n"); 1212 return FAILED; 1213 } 1214 ··· 1216 if (fsp->cdb_status != FCP_TMF_CMPL) 1217 return FAILED; 1218 1219 - FC_DBG("lun reset to lun %u completed\n", lun); 1220 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1221 return SUCCESS; 1222 } ··· 1383 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1384 switch (rjt->er_reason) { 1385 default: 1386 - FC_DEBUG_FCP("device %x unexpected REC reject " 1387 - "reason %d expl %d\n", 1388 - fsp->rport->port_id, rjt->er_reason, 1389 - rjt->er_explan); 1390 /* fall through */ 1391 case ELS_RJT_UNSUP: 1392 - FC_DEBUG_FCP("device does not support REC\n"); 1393 rp = fsp->rport->dd_data; 1394 /* 1395 * if we do not spport RECs or got some bogus ··· 1509 break; 1510 1511 default: 1512 - FC_DBG("REC %p fid %x error unexpected error %d\n", 1513 - fsp, fsp->rport->port_id, error); 1514 fsp->status_code = FC_CMD_PLOGO; 1515 /* fall through */ 1516 ··· 1519 * Assume REC or LS_ACC was lost. 1520 * The exchange manager will have aborted REC, so retry. 1521 */ 1522 - FC_DBG("REC fid %x error error %d retry %d/%d\n", 1523 - fsp->rport->port_id, error, fsp->recov_retry, 1524 - FC_MAX_RECOV_RETRY); 1525 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1526 fc_fcp_rec(fsp); 1527 else ··· 2006 if (lp->state != LPORT_ST_READY) 2007 return rc; 2008 2009 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2010 if (fsp == NULL) { 2011 - FC_DBG("could not allocate scsi_pkt\n"); 2012 sc_cmd->result = DID_NO_CONNECT << 16; 2013 goto out; 2014 } ··· 2045 struct fc_lport *lp = shost_priv(shost); 2046 unsigned long wait_tmo; 2047 2048 lp->tt.lport_reset(lp); 2049 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2050 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2051 msleep(1000); 2052 2053 if (fc_fcp_lport_queue_ready(lp)) { 2054 - shost_printk(KERN_INFO, shost, "Host reset succeeded.\n"); 2055 return SUCCESS; 2056 } else { 2057 - shost_printk(KERN_INFO, shost, "Host reset failed. " 2058 - "lport not ready.\n"); 2059 return FAILED; 2060 } 2061 } ··· 2118 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2119 2120 if (!list_empty(&si->scsi_pkt_queue)) 2121 - printk(KERN_ERR "Leaked scsi packets.\n"); 2122 2123 mempool_destroy(si->scsi_pkt_pool); 2124 kfree(si); ··· 2168 sizeof(struct fc_fcp_pkt), 2169 0, SLAB_HWCACHE_ALIGN, NULL); 2170 if (scsi_pkt_cachep == NULL) { 2171 - FC_DBG("Unable to allocate SRB cache...module load failed!"); 2172 return -ENOMEM; 2173 } 2174
··· 43 MODULE_DESCRIPTION("libfc"); 44 MODULE_LICENSE("GPL v2"); 45 46 + unsigned int fc_debug_logging; 47 + module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); 48 + MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 49 50 static struct kmem_cache *scsi_pkt_cachep; 51 ··· 347 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 348 fc_frame_crc_check(fp)) 349 goto crc_err; 350 + FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " 351 + "data_len %x\n", len, offset, fsp->data_len); 352 fc_fcp_retry_cmd(fsp); 353 return; 354 } ··· 411 stats->ErrorFrames++; 412 /* FIXME - per cpu count, not total count! */ 413 if (stats->InvalidCRCCount++ < 5) 414 + printk(KERN_WARNING "libfc: CRC error on data " 415 + "frame for port (%6x)\n", 416 fc_host_port_id(lp->host)); 417 /* 418 * Assume the frame is total garbage. ··· 475 WARN_ON(seq_blen <= 0); 476 if (unlikely(offset + seq_blen > fsp->data_len)) { 477 /* this should never happen */ 478 + FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " 479 + "offset %zx\n", seq_blen, offset); 480 fc_fcp_send_abort(fsp); 481 return 0; 482 } else if (offset != fsp->xfer_len) { 483 /* Out of Order Data Request - no problem, but unexpected. */ 484 + FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " 485 + "seq_blen %zx offset %zx\n", seq_blen, offset); 486 } 487 488 /* ··· 493 t_blen = fsp->max_payload; 494 if (lp->seq_offload) { 495 t_blen = min(seq_blen, (size_t)lp->lso_max); 496 + FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 497 fsp, seq_blen, lp->lso_max, t_blen); 498 } 499 ··· 694 if (!can_queue) 695 can_queue = 1; 696 lp->host->can_queue = can_queue; 697 + shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" 698 "Reducing can_queue to %d.\n", can_queue); 699 done: 700 spin_unlock_irqrestore(lp->host->host_lock, flags); ··· 768 769 fc_fcp_resp(fsp, fp); 770 } else { 771 + FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); 772 } 773 unlock: 774 fc_fcp_unlock_pkt(fsp); ··· 877 return; 878 } 879 fsp->status_code = FC_DATA_OVRRUN; 880 + FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, " 881 + "len %x, data len %x\n", 882 + fsp->rport->port_id, 883 + fsp->xfer_len, expected_len, fsp->data_len); 884 } 885 fc_fcp_complete_locked(fsp); 886 return; 887 888 len_err: 889 + FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " 890 + "snsl %u\n", flags, fr_len(fp), respl, snsl); 891 err: 892 fsp->status_code = FC_ERROR; 893 fc_fcp_complete_locked(fsp); ··· 1107 if (fc_fcp_lock_pkt(fsp)) 1108 return; 1109 1110 + if (error == -FC_EX_CLOSED) { 1111 fc_fcp_retry_cmd(fsp); 1112 goto unlock; 1113 } 1114 + 1115 /* 1116 * clear abort pending, because the lower layer 1117 * decided to force completion. ··· 1145 fsp->wait_for_comp = 0; 1146 1147 if (!rc) { 1148 + FC_FCP_DBG(fsp, "target abort cmd failed\n"); 1149 rc = FAILED; 1150 } else if (fsp->state & FC_SRB_ABORTED) { 1151 + FC_FCP_DBG(fsp, "target abort cmd passed\n"); 1152 rc = SUCCESS; 1153 fc_fcp_complete_locked(fsp); 1154 } ··· 1213 spin_unlock_bh(&fsp->scsi_pkt_lock); 1214 1215 if (!rc) { 1216 + FC_SCSI_DBG(lp, "lun reset failed\n"); 1217 return FAILED; 1218 } 1219 ··· 1221 if (fsp->cdb_status != FCP_TMF_CMPL) 1222 return FAILED; 1223 1224 + FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); 1225 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); 1226 return SUCCESS; 1227 } ··· 1388 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1389 switch (rjt->er_reason) { 1390 default: 1391 + FC_FCP_DBG(fsp, "device %x unexpected REC reject " 1392 + "reason %d expl %d\n", 1393 + fsp->rport->port_id, rjt->er_reason, 1394 + rjt->er_explan); 1395 /* fall through */ 1396 case ELS_RJT_UNSUP: 1397 + FC_FCP_DBG(fsp, "device does not support REC\n"); 1398 rp = fsp->rport->dd_data; 1399 /* 1400 * if we do not spport RECs or got some bogus ··· 1514 break; 1515 1516 default: 1517 + FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n", 1518 + fsp, fsp->rport->port_id, error); 1519 fsp->status_code = FC_CMD_PLOGO; 1520 /* fall through */ 1521 ··· 1524 * Assume REC or LS_ACC was lost. 1525 * The exchange manager will have aborted REC, so retry. 1526 */ 1527 + FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n", 1528 + fsp->rport->port_id, error, fsp->recov_retry, 1529 + FC_MAX_RECOV_RETRY); 1530 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1531 fc_fcp_rec(fsp); 1532 else ··· 2011 if (lp->state != LPORT_ST_READY) 2012 return rc; 2013 2014 + FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); 2015 + 2016 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); 2017 if (fsp == NULL) { 2018 + printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); 2019 sc_cmd->result = DID_NO_CONNECT << 16; 2020 goto out; 2021 } ··· 2048 struct fc_lport *lp = shost_priv(shost); 2049 unsigned long wait_tmo; 2050 2051 + FC_SCSI_DBG(lp, "Resetting host\n"); 2052 + 2053 lp->tt.lport_reset(lp); 2054 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2055 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) 2056 msleep(1000); 2057 2058 if (fc_fcp_lport_queue_ready(lp)) { 2059 + shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 2060 + "on port (%6x)\n", fc_host_port_id(lp->host)); 2061 return SUCCESS; 2062 } else { 2063 + shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2064 + "port (%6x) is not ready.\n", 2065 + fc_host_port_id(lp->host)); 2066 return FAILED; 2067 } 2068 } ··· 2117 struct fc_fcp_internal *si = fc_get_scsi_internal(lp); 2118 2119 if (!list_empty(&si->scsi_pkt_queue)) 2120 + printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2121 + "port (%6x)\n", fc_host_port_id(lp->host)); 2122 2123 mempool_destroy(si->scsi_pkt_pool); 2124 kfree(si); ··· 2166 sizeof(struct fc_fcp_pkt), 2167 0, SLAB_HWCACHE_ALIGN, NULL); 2168 if (scsi_pkt_cachep == NULL) { 2169 + printk(KERN_ERR "libfc: Unable to allocate SRB cache, " 2170 + "module load failed!"); 2171 return -ENOMEM; 2172 } 2173
+75 -81
drivers/scsi/libfc/fc_lport.c
··· 101 102 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 103 104 - static int fc_lport_debug; 105 - 106 - #define FC_DEBUG_LPORT(fmt...) \ 107 - do { \ 108 - if (fc_lport_debug) \ 109 - FC_DBG(fmt); \ 110 - } while (0) 111 - 112 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 113 114 static void fc_lport_enter_reset(struct fc_lport *); ··· 143 struct fc_rport *rport, 144 enum fc_rport_event event) 145 { 146 - FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event, 147 - rport->port_id); 148 149 switch (event) { 150 case RPORT_EV_CREATED: ··· 154 lport->dns_rp = rport; 155 fc_lport_enter_rpn_id(lport); 156 } else { 157 - FC_DEBUG_LPORT("Received an CREATED event on " 158 - "port (%6x) for the directory " 159 - "server, but the lport is not " 160 - "in the DNS state, it's in the " 161 - "%d state", rport->port_id, 162 - lport->state); 163 lport->tt.rport_logoff(rport); 164 } 165 mutex_unlock(&lport->lp_mutex); 166 } else 167 - FC_DEBUG_LPORT("Received an event for port (%6x) " 168 - "which is not the directory server\n", 169 - rport->port_id); 170 break; 171 case RPORT_EV_LOGO: 172 case RPORT_EV_FAILED: ··· 177 mutex_unlock(&lport->lp_mutex); 178 179 } else 180 - FC_DEBUG_LPORT("Received an event for port (%6x) " 181 - "which is not the directory server\n", 182 - rport->port_id); 183 break; 184 case RPORT_EV_NONE: 185 break; ··· 355 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, 356 struct fc_lport *lport) 357 { 358 - FC_DEBUG_LPORT("Received RLIR request while in state %s\n", 359 - fc_lport_state(lport)); 360 361 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 362 fc_frame_free(fp); ··· 381 void *dp; 382 u32 f_ctl; 383 384 - FC_DEBUG_LPORT("Received RLIR request while in state %s\n", 385 - fc_lport_state(lport)); 386 387 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 388 pp = fc_frame_payload_get(in_fp, len); ··· 429 size_t len; 430 u32 f_ctl; 431 432 - FC_DEBUG_LPORT("Received RNID request while in state %s\n", 433 - fc_lport_state(lport)); 434 435 req = fc_frame_payload_get(in_fp, sizeof(*req)); 436 if (!req) { ··· 490 size_t len; 491 u32 f_ctl; 492 493 - FC_DEBUG_LPORT("Received ADISC request while in state %s\n", 494 - fc_lport_state(lport)); 495 496 req = fc_frame_payload_get(in_fp, sizeof(*req)); 497 if (!req) { ··· 566 */ 567 void fc_linkup(struct fc_lport *lport) 568 { 569 - FC_DEBUG_LPORT("Link is up for port (%6x)\n", 570 - fc_host_port_id(lport->host)); 571 572 mutex_lock(&lport->lp_mutex); 573 if (!lport->link_up) { ··· 587 void fc_linkdown(struct fc_lport *lport) 588 { 589 mutex_lock(&lport->lp_mutex); 590 - FC_DEBUG_LPORT("Link is down for port (%6x)\n", 591 - fc_host_port_id(lport->host)); 592 593 if (lport->link_up) { 594 lport->link_up = 0; ··· 693 { 694 switch (event) { 695 case DISC_EV_SUCCESS: 696 - FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n", 697 - fc_host_port_id(lport->host)); 698 break; 699 case DISC_EV_FAILED: 700 - FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n", 701 - fc_host_port_id(lport->host)); 702 mutex_lock(&lport->lp_mutex); 703 fc_lport_enter_reset(lport); 704 mutex_unlock(&lport->lp_mutex); ··· 717 */ 718 static void fc_lport_enter_ready(struct fc_lport *lport) 719 { 720 - FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n", 721 - fc_host_port_id(lport->host), fc_lport_state(lport)); 722 723 fc_lport_state_enter(lport, LPORT_ST_READY); 724 ··· 753 u32 local_fid; 754 u32 f_ctl; 755 756 - FC_DEBUG_LPORT("Received FLOGI request while in state %s\n", 757 - fc_lport_state(lport)); 758 759 fh = fc_frame_header_get(rx_fp); 760 remote_fid = ntoh24(fh->fh_s_id); ··· 763 goto out; 764 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 765 if (remote_wwpn == lport->wwpn) { 766 - FC_DBG("FLOGI from port with same WWPN %llx " 767 - "possible configuration error\n", 768 - (unsigned long long)remote_wwpn); 769 goto out; 770 } 771 - FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn); 772 773 /* 774 * XXX what is the right thing to do for FIDs? ··· 899 } 900 } 901 } else { 902 - FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp)); 903 fc_frame_free(fp); 904 } 905 mutex_unlock(&lport->lp_mutex); ··· 938 */ 939 static void fc_lport_enter_reset(struct fc_lport *lport) 940 { 941 - FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n", 942 - fc_host_port_id(lport->host), fc_lport_state(lport)); 943 944 fc_lport_state_enter(lport, LPORT_ST_RESET); 945 ··· 973 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 974 { 975 unsigned long delay = 0; 976 - FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n", 977 - PTR_ERR(fp), fc_lport_state(lport), 978 - lport->retry_count); 979 980 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 981 /* ··· 1031 1032 mutex_lock(&lport->lp_mutex); 1033 1034 - FC_DEBUG_LPORT("Received a RFT_ID response\n"); 1035 1036 if (lport->state != LPORT_ST_RFT_ID) { 1037 - FC_DBG("Received a RFT_ID response, but in state %s\n", 1038 - fc_lport_state(lport)); 1039 if (IS_ERR(fp)) 1040 goto err; 1041 goto out; ··· 1085 1086 mutex_lock(&lport->lp_mutex); 1087 1088 - FC_DEBUG_LPORT("Received a RPN_ID response\n"); 1089 1090 if (lport->state != LPORT_ST_RPN_ID) { 1091 - FC_DBG("Received a RPN_ID response, but in state %s\n", 1092 - fc_lport_state(lport)); 1093 if (IS_ERR(fp)) 1094 goto err; 1095 goto out; ··· 1137 1138 mutex_lock(&lport->lp_mutex); 1139 1140 - FC_DEBUG_LPORT("Received a SCR response\n"); 1141 1142 if (lport->state != LPORT_ST_SCR) { 1143 - FC_DBG("Received a SCR response, but in state %s\n", 1144 - fc_lport_state(lport)); 1145 if (IS_ERR(fp)) 1146 goto err; 1147 goto out; ··· 1175 { 1176 struct fc_frame *fp; 1177 1178 - FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n", 1179 - fc_host_port_id(lport->host), fc_lport_state(lport)); 1180 1181 fc_lport_state_enter(lport, LPORT_ST_SCR); 1182 ··· 1204 struct fc_ns_fts *lps; 1205 int i; 1206 1207 - FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n", 1208 - fc_host_port_id(lport->host), fc_lport_state(lport)); 1209 1210 fc_lport_state_enter(lport, LPORT_ST_RFT_ID); 1211 ··· 1244 { 1245 struct fc_frame *fp; 1246 1247 - FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n", 1248 - fc_host_port_id(lport->host), fc_lport_state(lport)); 1249 1250 fc_lport_state_enter(lport, LPORT_ST_RPN_ID); 1251 ··· 1285 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; 1286 dp.lp = lport; 1287 1288 - FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n", 1289 - fc_host_port_id(lport->host), fc_lport_state(lport)); 1290 1291 fc_lport_state_enter(lport, LPORT_ST_DNS); 1292 ··· 1365 1366 mutex_lock(&lport->lp_mutex); 1367 1368 - FC_DEBUG_LPORT("Received a LOGO response\n"); 1369 1370 if (lport->state != LPORT_ST_LOGO) { 1371 - FC_DBG("Received a LOGO response, but in state %s\n", 1372 - fc_lport_state(lport)); 1373 if (IS_ERR(fp)) 1374 goto err; 1375 goto out; ··· 1404 struct fc_frame *fp; 1405 struct fc_els_logo *logo; 1406 1407 - FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n", 1408 - fc_host_port_id(lport->host), fc_lport_state(lport)); 1409 1410 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1411 ··· 1447 1448 mutex_lock(&lport->lp_mutex); 1449 1450 - FC_DEBUG_LPORT("Received a FLOGI response\n"); 1451 1452 if (lport->state != LPORT_ST_FLOGI) { 1453 - FC_DBG("Received a FLOGI response, but in state %s\n", 1454 - fc_lport_state(lport)); 1455 if (IS_ERR(fp)) 1456 goto err; 1457 goto out; ··· 1466 did = ntoh24(fh->fh_d_id); 1467 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { 1468 1469 - FC_DEBUG_LPORT("Assigned fid %x\n", did); 1470 fc_host_port_id(lport->host) = did; 1471 1472 flp = fc_frame_payload_get(fp, sizeof(*flp)); ··· 1486 if (e_d_tov > lport->e_d_tov) 1487 lport->e_d_tov = e_d_tov; 1488 lport->r_a_tov = 2 * e_d_tov; 1489 - FC_DBG("Point-to-Point mode\n"); 1490 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), 1491 get_unaligned_be64( 1492 &flp->fl_wwpn), ··· 1510 } 1511 } 1512 } else { 1513 - FC_DBG("bad FLOGI response\n"); 1514 } 1515 1516 out: ··· 1530 { 1531 struct fc_frame *fp; 1532 1533 - FC_DEBUG_LPORT("Processing FLOGI state\n"); 1534 1535 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1536
··· 101 102 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 103 104 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 105 106 static void fc_lport_enter_reset(struct fc_lport *); ··· 151 struct fc_rport *rport, 152 enum fc_rport_event event) 153 { 154 + FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, 155 + rport->port_id); 156 157 switch (event) { 158 case RPORT_EV_CREATED: ··· 162 lport->dns_rp = rport; 163 fc_lport_enter_rpn_id(lport); 164 } else { 165 + FC_LPORT_DBG(lport, "Received an CREATED event " 166 + "on port (%6x) for the directory " 167 + "server, but the lport is not " 168 + "in the DNS state, it's in the " 169 + "%d state", rport->port_id, 170 + lport->state); 171 lport->tt.rport_logoff(rport); 172 } 173 mutex_unlock(&lport->lp_mutex); 174 } else 175 + FC_LPORT_DBG(lport, "Received an event for port (%6x) " 176 + "which is not the directory server\n", 177 + rport->port_id); 178 break; 179 case RPORT_EV_LOGO: 180 case RPORT_EV_FAILED: ··· 185 mutex_unlock(&lport->lp_mutex); 186 187 } else 188 + FC_LPORT_DBG(lport, "Received an event for port (%6x) " 189 + "which is not the directory server\n", 190 + rport->port_id); 191 break; 192 case RPORT_EV_NONE: 193 break; ··· 363 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, 364 struct fc_lport *lport) 365 { 366 + FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 367 + fc_lport_state(lport)); 368 369 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 370 fc_frame_free(fp); ··· 389 void *dp; 390 u32 f_ctl; 391 392 + FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 393 + fc_lport_state(lport)); 394 395 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 396 pp = fc_frame_payload_get(in_fp, len); ··· 437 size_t len; 438 u32 f_ctl; 439 440 + FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 441 + fc_lport_state(lport)); 442 443 req = fc_frame_payload_get(in_fp, sizeof(*req)); 444 if (!req) { ··· 498 size_t len; 499 u32 f_ctl; 500 501 + FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n", 502 + fc_lport_state(lport)); 503 504 req = fc_frame_payload_get(in_fp, sizeof(*req)); 505 if (!req) { ··· 574 */ 575 void fc_linkup(struct fc_lport *lport) 576 { 577 + printk(KERN_INFO "libfc: Link up on port (%6x)\n", 578 + fc_host_port_id(lport->host)); 579 580 mutex_lock(&lport->lp_mutex); 581 if (!lport->link_up) { ··· 595 void fc_linkdown(struct fc_lport *lport) 596 { 597 mutex_lock(&lport->lp_mutex); 598 + printk(KERN_INFO "libfc: Link down on port (%6x)\n", 599 + fc_host_port_id(lport->host)); 600 601 if (lport->link_up) { 602 lport->link_up = 0; ··· 701 { 702 switch (event) { 703 case DISC_EV_SUCCESS: 704 + FC_LPORT_DBG(lport, "Discovery succeeded\n"); 705 break; 706 case DISC_EV_FAILED: 707 + printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", 708 + fc_host_port_id(lport->host)); 709 mutex_lock(&lport->lp_mutex); 710 fc_lport_enter_reset(lport); 711 mutex_unlock(&lport->lp_mutex); ··· 726 */ 727 static void fc_lport_enter_ready(struct fc_lport *lport) 728 { 729 + FC_LPORT_DBG(lport, "Entered READY from state %s\n", 730 + fc_lport_state(lport)); 731 732 fc_lport_state_enter(lport, LPORT_ST_READY); 733 ··· 762 u32 local_fid; 763 u32 f_ctl; 764 765 + FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 766 + fc_lport_state(lport)); 767 768 fh = fc_frame_header_get(rx_fp); 769 remote_fid = ntoh24(fh->fh_s_id); ··· 772 goto out; 773 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 774 if (remote_wwpn == lport->wwpn) { 775 + printk(KERN_WARNING "libfc: Received FLOGI from port " 776 + "with same WWPN %llx\n", remote_wwpn); 777 goto out; 778 } 779 + FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); 780 781 /* 782 * XXX what is the right thing to do for FIDs? ··· 909 } 910 } 911 } else { 912 + FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", 913 + fr_eof(fp)); 914 fc_frame_free(fp); 915 } 916 mutex_unlock(&lport->lp_mutex); ··· 947 */ 948 static void fc_lport_enter_reset(struct fc_lport *lport) 949 { 950 + FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 951 + fc_lport_state(lport)); 952 953 fc_lport_state_enter(lport, LPORT_ST_RESET); 954 ··· 982 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 983 { 984 unsigned long delay = 0; 985 + FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 986 + PTR_ERR(fp), fc_lport_state(lport), 987 + lport->retry_count); 988 989 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { 990 /* ··· 1040 1041 mutex_lock(&lport->lp_mutex); 1042 1043 + FC_LPORT_DBG(lport, "Received a RFT_ID response\n"); 1044 1045 if (lport->state != LPORT_ST_RFT_ID) { 1046 + FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " 1047 + "%s\n", fc_lport_state(lport)); 1048 if (IS_ERR(fp)) 1049 goto err; 1050 goto out; ··· 1094 1095 mutex_lock(&lport->lp_mutex); 1096 1097 + FC_LPORT_DBG(lport, "Received a RPN_ID response\n"); 1098 1099 if (lport->state != LPORT_ST_RPN_ID) { 1100 + FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " 1101 + "%s\n", fc_lport_state(lport)); 1102 if (IS_ERR(fp)) 1103 goto err; 1104 goto out; ··· 1146 1147 mutex_lock(&lport->lp_mutex); 1148 1149 + FC_LPORT_DBG(lport, "Received a SCR response\n"); 1150 1151 if (lport->state != LPORT_ST_SCR) { 1152 + FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1153 + "%s\n", fc_lport_state(lport)); 1154 if (IS_ERR(fp)) 1155 goto err; 1156 goto out; ··· 1184 { 1185 struct fc_frame *fp; 1186 1187 + FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1188 + fc_lport_state(lport)); 1189 1190 fc_lport_state_enter(lport, LPORT_ST_SCR); 1191 ··· 1213 struct fc_ns_fts *lps; 1214 int i; 1215 1216 + FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", 1217 + fc_lport_state(lport)); 1218 1219 fc_lport_state_enter(lport, LPORT_ST_RFT_ID); 1220 ··· 1253 { 1254 struct fc_frame *fp; 1255 1256 + FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n", 1257 + fc_lport_state(lport)); 1258 1259 fc_lport_state_enter(lport, LPORT_ST_RPN_ID); 1260 ··· 1294 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; 1295 dp.lp = lport; 1296 1297 + FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1298 + fc_lport_state(lport)); 1299 1300 fc_lport_state_enter(lport, LPORT_ST_DNS); 1301 ··· 1374 1375 mutex_lock(&lport->lp_mutex); 1376 1377 + FC_LPORT_DBG(lport, "Received a LOGO response\n"); 1378 1379 if (lport->state != LPORT_ST_LOGO) { 1380 + FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1381 + "%s\n", fc_lport_state(lport)); 1382 if (IS_ERR(fp)) 1383 goto err; 1384 goto out; ··· 1413 struct fc_frame *fp; 1414 struct fc_els_logo *logo; 1415 1416 + FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1417 + fc_lport_state(lport)); 1418 1419 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1420 ··· 1456 1457 mutex_lock(&lport->lp_mutex); 1458 1459 + FC_LPORT_DBG(lport, "Received a FLOGI response\n"); 1460 1461 if (lport->state != LPORT_ST_FLOGI) { 1462 + FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1463 + "%s\n", fc_lport_state(lport)); 1464 if (IS_ERR(fp)) 1465 goto err; 1466 goto out; ··· 1475 did = ntoh24(fh->fh_d_id); 1476 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { 1477 1478 + printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n", 1479 + did); 1480 fc_host_port_id(lport->host) = did; 1481 1482 flp = fc_frame_payload_get(fp, sizeof(*flp)); ··· 1494 if (e_d_tov > lport->e_d_tov) 1495 lport->e_d_tov = e_d_tov; 1496 lport->r_a_tov = 2 * e_d_tov; 1497 + printk(KERN_INFO "libfc: Port (%6x) entered " 1498 + "point to point mode\n", did); 1499 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), 1500 get_unaligned_be64( 1501 &flp->fl_wwpn), ··· 1517 } 1518 } 1519 } else { 1520 + FC_LPORT_DBG(lport, "Bad FLOGI response\n"); 1521 } 1522 1523 out: ··· 1537 { 1538 struct fc_frame *fp; 1539 1540 + FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1541 + fc_lport_state(lport)); 1542 1543 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1544
+50 -70
drivers/scsi/libfc/fc_rport.c
··· 55 #include <scsi/libfc.h> 56 #include <scsi/fc_encode.h> 57 58 - static int fc_rport_debug; 59 - 60 - #define FC_DEBUG_RPORT(fmt...) \ 61 - do { \ 62 - if (fc_rport_debug) \ 63 - FC_DBG(fmt); \ 64 - } while (0) 65 - 66 struct workqueue_struct *rport_event_queue; 67 68 static void fc_rport_enter_plogi(struct fc_rport *); ··· 89 static void fc_rport_rogue_destroy(struct device *dev) 90 { 91 struct fc_rport *rport = dev_to_rport(dev); 92 - FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id); 93 kfree(rport); 94 } 95 ··· 255 256 fc_rport_state_enter(new_rport, RPORT_ST_READY); 257 } else { 258 - FC_DBG("Failed to create the rport for port " 259 - "(%6x).\n", ids.port_id); 260 event = RPORT_EV_FAILED; 261 } 262 if (rport->port_id != FC_FID_DIR_SERV) ··· 301 302 mutex_lock(&rdata->rp_mutex); 303 304 - FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id); 305 306 fc_rport_enter_plogi(rport); 307 ··· 321 int fc_rport_logoff(struct fc_rport *rport) 322 { 323 struct fc_rport_libfc_priv *rdata = rport->dd_data; 324 - struct fc_lport *lport = rdata->local_port; 325 326 mutex_lock(&rdata->rp_mutex); 327 328 - FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); 329 330 if (rdata->rp_state == RPORT_ST_NONE) { 331 - FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state," 332 - " not removing", fc_host_port_id(lport->host), 333 - rport->port_id); 334 mutex_unlock(&rdata->rp_mutex); 335 goto out; 336 } ··· 368 369 fc_rport_state_enter(rport, RPORT_ST_READY); 370 371 - FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); 372 373 rdata->event = RPORT_EV_CREATED; 374 queue_work(rport_event_queue, &rdata->event_work); ··· 425 { 426 struct fc_rport_libfc_priv *rdata = rport->dd_data; 427 428 - FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", 429 - PTR_ERR(fp), fc_rport_state(rport), rdata->retries); 430 431 switch (rdata->rp_state) { 432 case RPORT_ST_PLOGI: ··· 468 return fc_rport_error(rport, fp); 469 470 if (rdata->retries < rdata->local_port->max_rport_retry_count) { 471 - FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", 472 - PTR_ERR(fp), fc_rport_state(rport)); 473 rdata->retries++; 474 /* no additional delay on exchange timeouts */ 475 if (PTR_ERR(fp) == -FC_EX_TIMEOUT) ··· 506 507 mutex_lock(&rdata->rp_mutex); 508 509 - FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", 510 - rport->port_id); 511 512 if (rdata->rp_state != RPORT_ST_PLOGI) { 513 - FC_DBG("Received a PLOGI response, but in state %s\n", 514 - fc_rport_state(rport)); 515 if (IS_ERR(fp)) 516 goto err; 517 goto out; ··· 571 struct fc_lport *lport = rdata->local_port; 572 struct fc_frame *fp; 573 574 - FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", 575 - rport->port_id, fc_rport_state(rport)); 576 577 fc_rport_state_enter(rport, RPORT_ST_PLOGI); 578 ··· 616 617 mutex_lock(&rdata->rp_mutex); 618 619 - FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", 620 - rport->port_id); 621 622 if (rdata->rp_state != RPORT_ST_PRLI) { 623 - FC_DBG("Received a PRLI response, but in state %s\n", 624 - fc_rport_state(rport)); 625 if (IS_ERR(fp)) 626 goto err; 627 goto out; ··· 650 fc_rport_enter_rtv(rport); 651 652 } else { 653 - FC_DBG("Bad ELS response\n"); 654 rdata->event = RPORT_EV_FAILED; 655 fc_rport_state_enter(rport, RPORT_ST_NONE); 656 queue_work(rport_event_queue, &rdata->event_work); ··· 682 683 mutex_lock(&rdata->rp_mutex); 684 685 - FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n", 686 - rport->port_id); 687 688 if (rdata->rp_state != RPORT_ST_LOGO) { 689 - FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n", 690 - fc_rport_state(rport)); 691 if (IS_ERR(fp)) 692 goto err; 693 goto out; ··· 701 if (op == ELS_LS_ACC) { 702 fc_rport_enter_rtv(rport); 703 } else { 704 - FC_DBG("Bad ELS response\n"); 705 rdata->event = RPORT_EV_LOGO; 706 fc_rport_state_enter(rport, RPORT_ST_NONE); 707 queue_work(rport_event_queue, &rdata->event_work); ··· 731 } *pp; 732 struct fc_frame *fp; 733 734 - FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n", 735 - rport->port_id, fc_rport_state(rport)); 736 737 fc_rport_state_enter(rport, RPORT_ST_PRLI); 738 ··· 770 771 mutex_lock(&rdata->rp_mutex); 772 773 - FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", 774 - rport->port_id); 775 776 if (rdata->rp_state != RPORT_ST_RTV) { 777 - FC_DBG("Received a RTV response, but in state %s\n", 778 - fc_rport_state(rport)); 779 if (IS_ERR(fp)) 780 goto err; 781 goto out; ··· 829 struct fc_rport_libfc_priv *rdata = rport->dd_data; 830 struct fc_lport *lport = rdata->local_port; 831 832 - FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n", 833 - rport->port_id, fc_rport_state(rport)); 834 835 fc_rport_state_enter(rport, RPORT_ST_RTV); 836 ··· 860 struct fc_lport *lport = rdata->local_port; 861 struct fc_frame *fp; 862 863 - FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", 864 - rport->port_id, fc_rport_state(rport)); 865 866 fc_rport_state_enter(rport, RPORT_ST_LOGO); 867 ··· 968 969 fh = fc_frame_header_get(fp); 970 971 - FC_DEBUG_RPORT("Received PLOGI request from port (%6x) " 972 - "while in state %s\n", ntoh24(fh->fh_s_id), 973 - fc_rport_state(rport)); 974 975 sid = ntoh24(fh->fh_s_id); 976 pl = fc_frame_payload_get(fp, sizeof(*pl)); 977 if (!pl) { 978 - FC_DBG("incoming PLOGI from %x too short\n", sid); 979 WARN_ON(1); 980 /* XXX TBD: send reject? */ 981 fc_frame_free(fp); ··· 996 */ 997 switch (rdata->rp_state) { 998 case RPORT_ST_INIT: 999 - FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " 1000 - "- reject\n", sid, (unsigned long long)wwpn); 1001 reject = ELS_RJT_UNSUP; 1002 break; 1003 case RPORT_ST_PLOGI: 1004 - FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n", 1005 - sid, rdata->rp_state); 1006 if (wwpn < lport->wwpn) 1007 reject = ELS_RJT_INPROG; 1008 break; 1009 case RPORT_ST_PRLI: 1010 case RPORT_ST_READY: 1011 - FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d " 1012 - "- ignored for now\n", sid, rdata->rp_state); 1013 /* XXX TBD - should reset */ 1014 break; 1015 case RPORT_ST_NONE: 1016 default: 1017 - FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected " 1018 - "state %d\n", sid, rdata->rp_state); 1019 fc_frame_free(fp); 1020 return; 1021 break; ··· 1099 1100 fh = fc_frame_header_get(rx_fp); 1101 1102 - FC_DEBUG_RPORT("Received PRLI request from port (%6x) " 1103 - "while in state %s\n", ntoh24(fh->fh_s_id), 1104 - fc_rport_state(rport)); 1105 1106 switch (rdata->rp_state) { 1107 case RPORT_ST_PRLI: ··· 1235 1236 fh = fc_frame_header_get(fp); 1237 1238 - FC_DEBUG_RPORT("Received PRLO request from port (%6x) " 1239 - "while in state %s\n", ntoh24(fh->fh_s_id), 1240 - fc_rport_state(rport)); 1241 1242 if (rdata->rp_state == RPORT_ST_NONE) { 1243 fc_frame_free(fp); ··· 1268 1269 fh = fc_frame_header_get(fp); 1270 1271 - FC_DEBUG_RPORT("Received LOGO request from port (%6x) " 1272 - "while in state %s\n", ntoh24(fh->fh_s_id), 1273 - fc_rport_state(rport)); 1274 1275 if (rdata->rp_state == RPORT_ST_NONE) { 1276 fc_frame_free(fp); ··· 1288 { 1289 flush_workqueue(rport_event_queue); 1290 } 1291 - 1292 1293 int fc_rport_init(struct fc_lport *lport) 1294 {
··· 55 #include <scsi/libfc.h> 56 #include <scsi/fc_encode.h> 57 58 struct workqueue_struct *rport_event_queue; 59 60 static void fc_rport_enter_plogi(struct fc_rport *); ··· 97 static void fc_rport_rogue_destroy(struct device *dev) 98 { 99 struct fc_rport *rport = dev_to_rport(dev); 100 + FC_RPORT_DBG(rport, "Destroying rogue rport\n"); 101 kfree(rport); 102 } 103 ··· 263 264 fc_rport_state_enter(new_rport, RPORT_ST_READY); 265 } else { 266 + printk(KERN_WARNING "libfc: Failed to allocate " 267 + " memory for rport (%6x)\n", ids.port_id); 268 event = RPORT_EV_FAILED; 269 } 270 if (rport->port_id != FC_FID_DIR_SERV) ··· 309 310 mutex_lock(&rdata->rp_mutex); 311 312 + FC_RPORT_DBG(rport, "Login to port\n"); 313 314 fc_rport_enter_plogi(rport); 315 ··· 329 int fc_rport_logoff(struct fc_rport *rport) 330 { 331 struct fc_rport_libfc_priv *rdata = rport->dd_data; 332 333 mutex_lock(&rdata->rp_mutex); 334 335 + FC_RPORT_DBG(rport, "Remove port\n"); 336 337 if (rdata->rp_state == RPORT_ST_NONE) { 338 + FC_RPORT_DBG(rport, "Port in NONE state, not removing\n"); 339 mutex_unlock(&rdata->rp_mutex); 340 goto out; 341 } ··· 379 380 fc_rport_state_enter(rport, RPORT_ST_READY); 381 382 + FC_RPORT_DBG(rport, "Port is Ready\n"); 383 384 rdata->event = RPORT_EV_CREATED; 385 queue_work(rport_event_queue, &rdata->event_work); ··· 436 { 437 struct fc_rport_libfc_priv *rdata = rport->dd_data; 438 439 + FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", 440 + PTR_ERR(fp), fc_rport_state(rport), rdata->retries); 441 442 switch (rdata->rp_state) { 443 case RPORT_ST_PLOGI: ··· 479 return fc_rport_error(rport, fp); 480 481 if (rdata->retries < rdata->local_port->max_rport_retry_count) { 482 + FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n", 483 + PTR_ERR(fp), fc_rport_state(rport)); 484 rdata->retries++; 485 /* no additional delay on exchange timeouts */ 486 if (PTR_ERR(fp) == -FC_EX_TIMEOUT) ··· 517 518 mutex_lock(&rdata->rp_mutex); 519 520 + FC_RPORT_DBG(rport, "Received a PLOGI response\n"); 521 522 if (rdata->rp_state != RPORT_ST_PLOGI) { 523 + FC_RPORT_DBG(rport, "Received a PLOGI response, but in state " 524 + "%s\n", fc_rport_state(rport)); 525 if (IS_ERR(fp)) 526 goto err; 527 goto out; ··· 583 struct fc_lport *lport = rdata->local_port; 584 struct fc_frame *fp; 585 586 + FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n", 587 + fc_rport_state(rport)); 588 589 fc_rport_state_enter(rport, RPORT_ST_PLOGI); 590 ··· 628 629 mutex_lock(&rdata->rp_mutex); 630 631 + FC_RPORT_DBG(rport, "Received a PRLI response\n"); 632 633 if (rdata->rp_state != RPORT_ST_PRLI) { 634 + FC_RPORT_DBG(rport, "Received a PRLI response, but in state " 635 + "%s\n", fc_rport_state(rport)); 636 if (IS_ERR(fp)) 637 goto err; 638 goto out; ··· 663 fc_rport_enter_rtv(rport); 664 665 } else { 666 + FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); 667 rdata->event = RPORT_EV_FAILED; 668 fc_rport_state_enter(rport, RPORT_ST_NONE); 669 queue_work(rport_event_queue, &rdata->event_work); ··· 695 696 mutex_lock(&rdata->rp_mutex); 697 698 + FC_RPORT_DBG(rport, "Received a LOGO response\n"); 699 700 if (rdata->rp_state != RPORT_ST_LOGO) { 701 + FC_RPORT_DBG(rport, "Received a LOGO response, but in state " 702 + "%s\n", fc_rport_state(rport)); 703 if (IS_ERR(fp)) 704 goto err; 705 goto out; ··· 715 if (op == ELS_LS_ACC) { 716 fc_rport_enter_rtv(rport); 717 } else { 718 + FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); 719 rdata->event = RPORT_EV_LOGO; 720 fc_rport_state_enter(rport, RPORT_ST_NONE); 721 queue_work(rport_event_queue, &rdata->event_work); ··· 745 } *pp; 746 struct fc_frame *fp; 747 748 + FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n", 749 + fc_rport_state(rport)); 750 751 fc_rport_state_enter(rport, RPORT_ST_PRLI); 752 ··· 784 785 mutex_lock(&rdata->rp_mutex); 786 787 + FC_RPORT_DBG(rport, "Received a RTV response\n"); 788 789 if (rdata->rp_state != RPORT_ST_RTV) { 790 + FC_RPORT_DBG(rport, "Received a RTV response, but in state " 791 + "%s\n", fc_rport_state(rport)); 792 if (IS_ERR(fp)) 793 goto err; 794 goto out; ··· 844 struct fc_rport_libfc_priv *rdata = rport->dd_data; 845 struct fc_lport *lport = rdata->local_port; 846 847 + FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", 848 + fc_rport_state(rport)); 849 850 fc_rport_state_enter(rport, RPORT_ST_RTV); 851 ··· 875 struct fc_lport *lport = rdata->local_port; 876 struct fc_frame *fp; 877 878 + FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n", 879 + fc_rport_state(rport)); 880 881 fc_rport_state_enter(rport, RPORT_ST_LOGO); 882 ··· 983 984 fh = fc_frame_header_get(fp); 985 986 + FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n", 987 + fc_rport_state(rport)); 988 989 sid = ntoh24(fh->fh_s_id); 990 pl = fc_frame_payload_get(fp, sizeof(*pl)); 991 if (!pl) { 992 + FC_RPORT_DBG(rport, "Received PLOGI too short\n"); 993 WARN_ON(1); 994 /* XXX TBD: send reject? */ 995 fc_frame_free(fp); ··· 1012 */ 1013 switch (rdata->rp_state) { 1014 case RPORT_ST_INIT: 1015 + FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT " 1016 + "- reject\n", (unsigned long long)wwpn); 1017 reject = ELS_RJT_UNSUP; 1018 break; 1019 case RPORT_ST_PLOGI: 1020 + FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n", 1021 + rdata->rp_state); 1022 if (wwpn < lport->wwpn) 1023 reject = ELS_RJT_INPROG; 1024 break; 1025 case RPORT_ST_PRLI: 1026 case RPORT_ST_READY: 1027 + FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d " 1028 + "- ignored for now\n", rdata->rp_state); 1029 /* XXX TBD - should reset */ 1030 break; 1031 case RPORT_ST_NONE: 1032 default: 1033 + FC_RPORT_DBG(rport, "Received PLOGI in unexpected " 1034 + "state %d\n", rdata->rp_state); 1035 fc_frame_free(fp); 1036 return; 1037 break; ··· 1115 1116 fh = fc_frame_header_get(rx_fp); 1117 1118 + FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n", 1119 + fc_rport_state(rport)); 1120 1121 switch (rdata->rp_state) { 1122 case RPORT_ST_PRLI: ··· 1252 1253 fh = fc_frame_header_get(fp); 1254 1255 + FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", 1256 + fc_rport_state(rport)); 1257 1258 if (rdata->rp_state == RPORT_ST_NONE) { 1259 fc_frame_free(fp); ··· 1286 1287 fh = fc_frame_header_get(fp); 1288 1289 + FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", 1290 + fc_rport_state(rport)); 1291 1292 if (rdata->rp_state == RPORT_ST_NONE) { 1293 fc_frame_free(fp); ··· 1307 { 1308 flush_workqueue(rport_event_queue); 1309 } 1310 1311 int fc_rport_init(struct fc_lport *lport) 1312 {
+109 -52
drivers/scsi/libiscsi.c
··· 38 #include <scsi/scsi_transport_iscsi.h> 39 #include <scsi/libiscsi.h> 40 41 - static int iscsi_dbg_lib; 42 - module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR); 43 - MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. " 44 - "Set to 1 to turn on, and zero to turn off. Default " 45 - "is off."); 46 47 #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ 48 do { \ 49 - if (iscsi_dbg_lib) \ 50 iscsi_conn_printk(KERN_INFO, _conn, \ 51 "%s " dbg_fmt, \ 52 __func__, ##arg); \ ··· 69 70 #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ 71 do { \ 72 - if (iscsi_dbg_lib) \ 73 iscsi_session_printk(KERN_INFO, _session, \ 74 "%s " dbg_fmt, \ 75 __func__, ##arg); \ ··· 977 task = iscsi_itt_to_ctask(conn, hdr->itt); 978 if (!task) 979 return ISCSI_ERR_BAD_ITT; 980 break; 981 case ISCSI_OP_R2T: 982 /* ··· 1216 spin_unlock_bh(&conn->session->lock); 1217 rc = conn->session->tt->xmit_task(task); 1218 spin_lock_bh(&conn->session->lock); 1219 - __iscsi_put_task(task); 1220 - if (!rc) 1221 /* done with this task */ 1222 conn->task = NULL; 1223 return rc; 1224 } 1225 ··· 1387 task->state = ISCSI_TASK_PENDING; 1388 task->conn = conn; 1389 task->sc = sc; 1390 INIT_LIST_HEAD(&task->running); 1391 return task; 1392 } ··· 1584 spin_lock_bh(&session->lock); 1585 if (session->state == ISCSI_STATE_TERMINATE) { 1586 failed: 1587 - iscsi_session_printk(KERN_INFO, session, 1588 - "failing target reset: Could not log " 1589 - "back into target [age %d]\n", 1590 - session->age); 1591 spin_unlock_bh(&session->lock); 1592 mutex_unlock(&session->eh_mutex); 1593 return FAILED; ··· 1601 */ 1602 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1603 1604 - ISCSI_DBG_SESSION(session, "wait for relogin\n"); 1605 wait_event_interruptible(conn->ehwait, 1606 session->state == ISCSI_STATE_TERMINATE || 1607 session->state == ISCSI_STATE_LOGGED_IN || ··· 1611 1612 mutex_lock(&session->eh_mutex); 1613 spin_lock_bh(&session->lock); 1614 - if (session->state == ISCSI_STATE_LOGGED_IN) 1615 - iscsi_session_printk(KERN_INFO, session, 1616 - "target reset succeeded\n"); 1617 - else 1618 goto failed; 1619 spin_unlock_bh(&session->lock); 1620 mutex_unlock(&session->eh_mutex); ··· 1630 spin_lock(&session->lock); 1631 if (conn->tmf_state == TMF_QUEUED) { 1632 conn->tmf_state = TMF_TIMEDOUT; 1633 - ISCSI_DBG_SESSION(session, "tmf timedout\n"); 1634 /* unblock eh_abort() */ 1635 wake_up(&conn->ehwait); 1636 } ··· 1650 spin_unlock_bh(&session->lock); 1651 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1652 spin_lock_bh(&session->lock); 1653 - ISCSI_DBG_SESSION(session, "tmf exec failure\n"); 1654 return -EPERM; 1655 } 1656 conn->tmfcmd_pdus_cnt++; ··· 1658 conn->tmf_timer.function = iscsi_tmf_timedout; 1659 conn->tmf_timer.data = (unsigned long)conn; 1660 add_timer(&conn->tmf_timer); 1661 - ISCSI_DBG_SESSION(session, "tmf set timeout\n"); 1662 1663 spin_unlock_bh(&session->lock); 1664 mutex_unlock(&session->eh_mutex); ··· 1745 return 0; 1746 } 1747 1748 - static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1749 { 1750 struct iscsi_cls_session *cls_session; 1751 struct iscsi_session *session; 1752 struct iscsi_conn *conn; 1753 - enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1754 1755 - cls_session = starget_to_session(scsi_target(scmd->device)); 1756 session = cls_session->dd_data; 1757 1758 - ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd); 1759 1760 spin_lock(&session->lock); 1761 if (session->state != ISCSI_STATE_LOGGED_IN) { ··· 1775 goto done; 1776 } 1777 1778 if (!conn->recv_timeout && !conn->ping_timeout) 1779 goto done; 1780 /* ··· 1805 rc = BLK_EH_RESET_TIMER; 1806 goto done; 1807 } 1808 /* 1809 - * if we are about to check the transport then give the command 1810 - * more time 1811 */ 1812 - if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1813 - jiffies)) { 1814 rc = BLK_EH_RESET_TIMER; 1815 goto done; 1816 } 1817 1818 - /* if in the middle of checking the transport then give us more time */ 1819 - if (conn->ping_task) 1820 - rc = BLK_EH_RESET_TIMER; 1821 done: 1822 spin_unlock(&session->lock); 1823 - ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? 1824 - "timer reset" : "nh"); 1825 return rc; 1826 } 1827 ··· 1900 cls_session = starget_to_session(scsi_target(sc->device)); 1901 session = cls_session->dd_data; 1902 1903 - ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc); 1904 1905 mutex_lock(&session->eh_mutex); 1906 spin_lock_bh(&session->lock); ··· 1909 * got the command. 1910 */ 1911 if (!sc->SCp.ptr) { 1912 - ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or " 1913 - "it completed.\n"); 1914 spin_unlock_bh(&session->lock); 1915 mutex_unlock(&session->eh_mutex); 1916 return SUCCESS; ··· 1924 sc->SCp.phase != session->age) { 1925 spin_unlock_bh(&session->lock); 1926 mutex_unlock(&session->eh_mutex); 1927 - ISCSI_DBG_SESSION(session, "failing abort due to dropped " 1928 "session.\n"); 1929 return FAILED; 1930 } ··· 1934 age = session->age; 1935 1936 task = (struct iscsi_task *)sc->SCp.ptr; 1937 - ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n", 1938 - sc, task->itt); 1939 1940 /* task completed before time out */ 1941 if (!task->sc) { 1942 - ISCSI_DBG_SESSION(session, "sc completed while abort in " 1943 - "progress\n"); 1944 goto success; 1945 } 1946 ··· 1988 if (!sc->SCp.ptr) { 1989 conn->tmf_state = TMF_INITIAL; 1990 /* task completed before tmf abort response */ 1991 - ISCSI_DBG_SESSION(session, "sc completed while abort " 1992 - "in progress\n"); 1993 goto success; 1994 } 1995 /* fall through */ ··· 2001 success: 2002 spin_unlock_bh(&session->lock); 2003 success_unlocked: 2004 - ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n", 2005 - sc, task->itt); 2006 mutex_unlock(&session->eh_mutex); 2007 return SUCCESS; 2008 2009 failed: 2010 spin_unlock_bh(&session->lock); 2011 failed_unlocked: 2012 - ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc, 2013 - task ? task->itt : 0); 2014 mutex_unlock(&session->eh_mutex); 2015 return FAILED; 2016 } ··· 2037 cls_session = starget_to_session(scsi_target(sc->device)); 2038 session = cls_session->dd_data; 2039 2040 - ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n", 2041 - sc, sc->device->lun); 2042 2043 mutex_lock(&session->eh_mutex); 2044 spin_lock_bh(&session->lock); ··· 2091 unlock: 2092 spin_unlock_bh(&session->lock); 2093 done: 2094 - ISCSI_DBG_SESSION(session, "dev reset result = %s\n", 2095 - rc == SUCCESS ? "SUCCESS" : "FAILED"); 2096 mutex_unlock(&session->eh_mutex); 2097 return rc; 2098 }
··· 38 #include <scsi/scsi_transport_iscsi.h> 39 #include <scsi/libiscsi.h> 40 41 + static int iscsi_dbg_lib_conn; 42 + module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, 43 + S_IRUGO | S_IWUSR); 44 + MODULE_PARM_DESC(debug_libiscsi_conn, 45 + "Turn on debugging for connections in libiscsi module. " 46 + "Set to 1 to turn on, and zero to turn off. Default is off."); 47 + 48 + static int iscsi_dbg_lib_session; 49 + module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, 50 + S_IRUGO | S_IWUSR); 51 + MODULE_PARM_DESC(debug_libiscsi_session, 52 + "Turn on debugging for sessions in libiscsi module. " 53 + "Set to 1 to turn on, and zero to turn off. Default is off."); 54 + 55 + static int iscsi_dbg_lib_eh; 56 + module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, 57 + S_IRUGO | S_IWUSR); 58 + MODULE_PARM_DESC(debug_libiscsi_eh, 59 + "Turn on debugging for error handling in libiscsi module. " 60 + "Set to 1 to turn on, and zero to turn off. Default is off."); 61 62 #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ 63 do { \ 64 + if (iscsi_dbg_lib_conn) \ 65 iscsi_conn_printk(KERN_INFO, _conn, \ 66 "%s " dbg_fmt, \ 67 __func__, ##arg); \ ··· 54 55 #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ 56 do { \ 57 + if (iscsi_dbg_lib_session) \ 58 + iscsi_session_printk(KERN_INFO, _session, \ 59 + "%s " dbg_fmt, \ 60 + __func__, ##arg); \ 61 + } while (0); 62 + 63 + #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ 64 + do { \ 65 + if (iscsi_dbg_lib_eh) \ 66 iscsi_session_printk(KERN_INFO, _session, \ 67 "%s " dbg_fmt, \ 68 __func__, ##arg); \ ··· 954 task = iscsi_itt_to_ctask(conn, hdr->itt); 955 if (!task) 956 return ISCSI_ERR_BAD_ITT; 957 + task->last_xfer = jiffies; 958 break; 959 case ISCSI_OP_R2T: 960 /* ··· 1192 spin_unlock_bh(&conn->session->lock); 1193 rc = conn->session->tt->xmit_task(task); 1194 spin_lock_bh(&conn->session->lock); 1195 + if (!rc) { 1196 /* done with this task */ 1197 + task->last_xfer = jiffies; 1198 conn->task = NULL; 1199 + } 1200 + __iscsi_put_task(task); 1201 return rc; 1202 } 1203 ··· 1361 task->state = ISCSI_TASK_PENDING; 1362 task->conn = conn; 1363 task->sc = sc; 1364 + task->have_checked_conn = false; 1365 + task->last_timeout = jiffies; 1366 + task->last_xfer = jiffies; 1367 INIT_LIST_HEAD(&task->running); 1368 return task; 1369 } ··· 1555 spin_lock_bh(&session->lock); 1556 if (session->state == ISCSI_STATE_TERMINATE) { 1557 failed: 1558 + ISCSI_DBG_EH(session, 1559 + "failing target reset: Could not log back into " 1560 + "target [age %d]\n", 1561 + session->age); 1562 spin_unlock_bh(&session->lock); 1563 mutex_unlock(&session->eh_mutex); 1564 return FAILED; ··· 1572 */ 1573 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1574 1575 + ISCSI_DBG_EH(session, "wait for relogin\n"); 1576 wait_event_interruptible(conn->ehwait, 1577 session->state == ISCSI_STATE_TERMINATE || 1578 session->state == ISCSI_STATE_LOGGED_IN || ··· 1582 1583 mutex_lock(&session->eh_mutex); 1584 spin_lock_bh(&session->lock); 1585 + if (session->state == ISCSI_STATE_LOGGED_IN) { 1586 + ISCSI_DBG_EH(session, 1587 + "target reset succeeded\n"); 1588 + } else 1589 goto failed; 1590 spin_unlock_bh(&session->lock); 1591 mutex_unlock(&session->eh_mutex); ··· 1601 spin_lock(&session->lock); 1602 if (conn->tmf_state == TMF_QUEUED) { 1603 conn->tmf_state = TMF_TIMEDOUT; 1604 + ISCSI_DBG_EH(session, "tmf timedout\n"); 1605 /* unblock eh_abort() */ 1606 wake_up(&conn->ehwait); 1607 } ··· 1621 spin_unlock_bh(&session->lock); 1622 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1623 spin_lock_bh(&session->lock); 1624 + ISCSI_DBG_EH(session, "tmf exec failure\n"); 1625 return -EPERM; 1626 } 1627 conn->tmfcmd_pdus_cnt++; ··· 1629 conn->tmf_timer.function = iscsi_tmf_timedout; 1630 conn->tmf_timer.data = (unsigned long)conn; 1631 add_timer(&conn->tmf_timer); 1632 + ISCSI_DBG_EH(session, "tmf set timeout\n"); 1633 1634 spin_unlock_bh(&session->lock); 1635 mutex_unlock(&session->eh_mutex); ··· 1716 return 0; 1717 } 1718 1719 + static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) 1720 { 1721 + enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1722 + struct iscsi_task *task = NULL; 1723 struct iscsi_cls_session *cls_session; 1724 struct iscsi_session *session; 1725 struct iscsi_conn *conn; 1726 1727 + cls_session = starget_to_session(scsi_target(sc->device)); 1728 session = cls_session->dd_data; 1729 1730 + ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); 1731 1732 spin_lock(&session->lock); 1733 if (session->state != ISCSI_STATE_LOGGED_IN) { ··· 1745 goto done; 1746 } 1747 1748 + task = (struct iscsi_task *)sc->SCp.ptr; 1749 + if (!task) 1750 + goto done; 1751 + /* 1752 + * If we have sent (at least queued to the network layer) a pdu or 1753 + * recvd one for the task since the last timeout ask for 1754 + * more time. If on the next timeout we have not made progress 1755 + * we can check if it is the task or connection when we send the 1756 + * nop as a ping. 1757 + */ 1758 + if (time_after_eq(task->last_xfer, task->last_timeout)) { 1759 + ISCSI_DBG_EH(session, "Command making progress. Asking " 1760 + "scsi-ml for more time to complete. " 1761 + "Last data recv at %lu. Last timeout was at " 1762 + "%lu\n.", task->last_xfer, task->last_timeout); 1763 + task->have_checked_conn = false; 1764 + rc = BLK_EH_RESET_TIMER; 1765 + goto done; 1766 + } 1767 + 1768 if (!conn->recv_timeout && !conn->ping_timeout) 1769 goto done; 1770 /* ··· 1755 rc = BLK_EH_RESET_TIMER; 1756 goto done; 1757 } 1758 + 1759 + /* Assumes nop timeout is shorter than scsi cmd timeout */ 1760 + if (task->have_checked_conn) 1761 + goto done; 1762 + 1763 /* 1764 + * Checking the transport already or nop from a cmd timeout still 1765 + * running 1766 */ 1767 + if (conn->ping_task) { 1768 + task->have_checked_conn = true; 1769 rc = BLK_EH_RESET_TIMER; 1770 goto done; 1771 } 1772 1773 + /* Make sure there is a transport check done */ 1774 + iscsi_send_nopout(conn, NULL); 1775 + task->have_checked_conn = true; 1776 + rc = BLK_EH_RESET_TIMER; 1777 + 1778 done: 1779 + if (task) 1780 + task->last_timeout = jiffies; 1781 spin_unlock(&session->lock); 1782 + ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? 1783 + "timer reset" : "nh"); 1784 return rc; 1785 } 1786 ··· 1841 cls_session = starget_to_session(scsi_target(sc->device)); 1842 session = cls_session->dd_data; 1843 1844 + ISCSI_DBG_EH(session, "aborting sc %p\n", sc); 1845 1846 mutex_lock(&session->eh_mutex); 1847 spin_lock_bh(&session->lock); ··· 1850 * got the command. 1851 */ 1852 if (!sc->SCp.ptr) { 1853 + ISCSI_DBG_EH(session, "sc never reached iscsi layer or " 1854 + "it completed.\n"); 1855 spin_unlock_bh(&session->lock); 1856 mutex_unlock(&session->eh_mutex); 1857 return SUCCESS; ··· 1865 sc->SCp.phase != session->age) { 1866 spin_unlock_bh(&session->lock); 1867 mutex_unlock(&session->eh_mutex); 1868 + ISCSI_DBG_EH(session, "failing abort due to dropped " 1869 "session.\n"); 1870 return FAILED; 1871 } ··· 1875 age = session->age; 1876 1877 task = (struct iscsi_task *)sc->SCp.ptr; 1878 + ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", 1879 + sc, task->itt); 1880 1881 /* task completed before time out */ 1882 if (!task->sc) { 1883 + ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); 1884 goto success; 1885 } 1886 ··· 1930 if (!sc->SCp.ptr) { 1931 conn->tmf_state = TMF_INITIAL; 1932 /* task completed before tmf abort response */ 1933 + ISCSI_DBG_EH(session, "sc completed while abort in " 1934 + "progress\n"); 1935 goto success; 1936 } 1937 /* fall through */ ··· 1943 success: 1944 spin_unlock_bh(&session->lock); 1945 success_unlocked: 1946 + ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", 1947 + sc, task->itt); 1948 mutex_unlock(&session->eh_mutex); 1949 return SUCCESS; 1950 1951 failed: 1952 spin_unlock_bh(&session->lock); 1953 failed_unlocked: 1954 + ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, 1955 + task ? task->itt : 0); 1956 mutex_unlock(&session->eh_mutex); 1957 return FAILED; 1958 } ··· 1979 cls_session = starget_to_session(scsi_target(sc->device)); 1980 session = cls_session->dd_data; 1981 1982 + ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 1983 1984 mutex_lock(&session->eh_mutex); 1985 spin_lock_bh(&session->lock); ··· 2034 unlock: 2035 spin_unlock_bh(&session->lock); 2036 done: 2037 + ISCSI_DBG_EH(session, "dev reset result = %s\n", 2038 + rc == SUCCESS ? "SUCCESS" : "FAILED"); 2039 mutex_unlock(&session->eh_mutex); 2040 return rc; 2041 }
+4 -2
drivers/scsi/libiscsi_tcp.c
··· 686 "offset=%d, datalen=%d)\n", 687 tcp_task->data_offset, 688 tcp_conn->in.datalen); 689 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, 690 sdb->table.sgl, 691 sdb->table.nents, ··· 714 rc = ISCSI_ERR_BAD_ITT; 715 else if (ahslen) 716 rc = ISCSI_ERR_AHSLEN; 717 - else if (task->sc->sc_data_direction == DMA_TO_DEVICE) 718 rc = iscsi_tcp_r2t_rsp(conn, task); 719 - else 720 rc = ISCSI_ERR_PROTO; 721 spin_unlock(&conn->session->lock); 722 break;
··· 686 "offset=%d, datalen=%d)\n", 687 tcp_task->data_offset, 688 tcp_conn->in.datalen); 689 + task->last_xfer = jiffies; 690 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, 691 sdb->table.sgl, 692 sdb->table.nents, ··· 713 rc = ISCSI_ERR_BAD_ITT; 714 else if (ahslen) 715 rc = ISCSI_ERR_AHSLEN; 716 + else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { 717 + task->last_xfer = jiffies; 718 rc = iscsi_tcp_r2t_rsp(conn, task); 719 + } else 720 rc = ISCSI_ERR_PROTO; 721 spin_unlock(&conn->session->lock); 722 break;
+1 -1
drivers/scsi/qla2xxx/qla_dbg.c
··· 216 217 static int 218 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 219 - uint16_t ram_words, void **nxt) 220 { 221 int rval; 222 uint32_t cnt, stat, timer, words, idx;
··· 216 217 static int 218 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 219 + uint32_t ram_words, void **nxt) 220 { 221 int rval; 222 uint32_t cnt, stat, timer, words, idx;
+1 -1
drivers/scsi/qla2xxx/qla_init.c
··· 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 2302 char *link_speed; 2303 int rval; 2304 - uint16_t mb[6]; 2305 struct qla_hw_data *ha = vha->hw; 2306 2307 if (!IS_IIDMA_CAPABLE(ha))
··· 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 2302 char *link_speed; 2303 int rval; 2304 + uint16_t mb[4]; 2305 struct qla_hw_data *ha = vha->hw; 2306 2307 if (!IS_IIDMA_CAPABLE(ha))
+17 -11
drivers/scsi/qla2xxx/qla_mbx.c
··· 1267 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1269 mcp->out_mb = MBX_0; 1270 - mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1271 mcp->tov = MBX_TOV_SECONDS; 1272 mcp->flags = 0; 1273 rval = qla2x00_mailbox_command(vha, mcp); 1274 1275 /* Return firmware states. */ 1276 states[0] = mcp->mb[1]; 1277 - states[1] = mcp->mb[2]; 1278 - states[2] = mcp->mb[3]; 1279 - states[3] = mcp->mb[4]; 1280 - states[4] = mcp->mb[5]; 1281 1282 if (rval != QLA_SUCCESS) { 1283 /*EMPTY*/ ··· 2702 mcp->mb[0] = MBC_PORT_PARAMS; 2703 mcp->mb[1] = loop_id; 2704 mcp->mb[2] = BIT_0; 2705 - mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 2706 - mcp->mb[4] = mcp->mb[5] = 0; 2707 - mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2708 - mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 2709 mcp->tov = MBX_TOV_SECONDS; 2710 mcp->flags = 0; 2711 rval = qla2x00_mailbox_command(vha, mcp); ··· 2718 mb[0] = mcp->mb[0]; 2719 mb[1] = mcp->mb[1]; 2720 mb[3] = mcp->mb[3]; 2721 - mb[4] = mcp->mb[4]; 2722 - mb[5] = mcp->mb[5]; 2723 } 2724 2725 if (rval != QLA_SUCCESS) {
··· 1267 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1269 mcp->out_mb = MBX_0; 1270 + if (IS_FWI2_CAPABLE(vha->hw)) 1271 + mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1272 + else 1273 + mcp->in_mb = MBX_1|MBX_0; 1274 mcp->tov = MBX_TOV_SECONDS; 1275 mcp->flags = 0; 1276 rval = qla2x00_mailbox_command(vha, mcp); 1277 1278 /* Return firmware states. */ 1279 states[0] = mcp->mb[1]; 1280 + if (IS_FWI2_CAPABLE(vha->hw)) { 1281 + states[1] = mcp->mb[2]; 1282 + states[2] = mcp->mb[3]; 1283 + states[3] = mcp->mb[4]; 1284 + states[4] = mcp->mb[5]; 1285 + } 1286 1287 if (rval != QLA_SUCCESS) { 1288 /*EMPTY*/ ··· 2697 mcp->mb[0] = MBC_PORT_PARAMS; 2698 mcp->mb[1] = loop_id; 2699 mcp->mb[2] = BIT_0; 2700 + if (IS_QLA81XX(vha->hw)) 2701 + mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); 2702 + else 2703 + mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 2704 + mcp->mb[9] = vha->vp_idx; 2705 + mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 2706 + mcp->in_mb = MBX_3|MBX_1|MBX_0; 2707 mcp->tov = MBX_TOV_SECONDS; 2708 mcp->flags = 0; 2709 rval = qla2x00_mailbox_command(vha, mcp); ··· 2710 mb[0] = mcp->mb[0]; 2711 mb[1] = mcp->mb[1]; 2712 mb[3] = mcp->mb[3]; 2713 } 2714 2715 if (rval != QLA_SUCCESS) {
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 1663 /* queue 0 uses two msix vectors */ 1664 if (ql2xmultique_tag) { 1665 cpus = num_online_cpus(); 1666 - ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ? 1667 (cpus + 1) : (ha->msix_count - 1); 1668 ha->max_req_queues = 2; 1669 } else if (ql2xmaxqueues > 1) {
··· 1663 /* queue 0 uses two msix vectors */ 1664 if (ql2xmultique_tag) { 1665 cpus = num_online_cpus(); 1666 + ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1667 (cpus + 1) : (ha->msix_count - 1); 1668 ha->max_req_queues = 2; 1669 } else if (ql2xmaxqueues > 1) {
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 /* 8 * Driver version 9 */ 10 - #define QLA2XXX_VERSION "8.03.01-k3" 11 12 #define QLA_DRIVER_MAJOR_VER 8 13 #define QLA_DRIVER_MINOR_VER 3
··· 7 /* 8 * Driver version 9 */ 10 + #define QLA2XXX_VERSION "8.03.01-k4" 11 12 #define QLA_DRIVER_MAJOR_VER 8 13 #define QLA_DRIVER_MINOR_VER 3
+29 -1
drivers/scsi/scsi_debug.c
··· 101 #define DEF_DIF 0 102 #define DEF_GUARD 0 103 #define DEF_ATO 1 104 105 /* bit mask values for scsi_debug_opts */ 106 #define SCSI_DEBUG_OPT_NOISE 1 ··· 158 static int scsi_debug_dif = DEF_DIF; 159 static int scsi_debug_guard = DEF_GUARD; 160 static int scsi_debug_ato = DEF_ATO; 161 162 static int scsi_debug_cmnd_count = 0; 163 ··· 661 662 static int inquiry_evpd_b0(unsigned char * arr) 663 { 664 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 665 if (sdebug_store_sectors > 0x400) { 666 arr[4] = (sdebug_store_sectors >> 24) & 0xff; 667 arr[5] = (sdebug_store_sectors >> 16) & 0xff; ··· 954 arr[9] = (scsi_debug_sector_size >> 16) & 0xff; 955 arr[10] = (scsi_debug_sector_size >> 8) & 0xff; 956 arr[11] = scsi_debug_sector_size & 0xff; 957 958 if (scsi_debug_dif) { 959 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ ··· 2392 module_param_named(dif, scsi_debug_dif, int, S_IRUGO); 2393 module_param_named(guard, scsi_debug_guard, int, S_IRUGO); 2394 module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2395 2396 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2397 MODULE_DESCRIPTION("SCSI debug adapter driver"); ··· 2415 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2416 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2417 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2418 - MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)"); 2419 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 2420 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2421 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); ··· 2887 2888 if (scsi_debug_ato > 1) { 2889 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n"); 2890 return -EINVAL; 2891 } 2892
··· 101 #define DEF_DIF 0 102 #define DEF_GUARD 0 103 #define DEF_ATO 1 104 + #define DEF_PHYSBLK_EXP 0 105 + #define DEF_LOWEST_ALIGNED 0 106 107 /* bit mask values for scsi_debug_opts */ 108 #define SCSI_DEBUG_OPT_NOISE 1 ··· 156 static int scsi_debug_dif = DEF_DIF; 157 static int scsi_debug_guard = DEF_GUARD; 158 static int scsi_debug_ato = DEF_ATO; 159 + static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 160 + static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 161 162 static int scsi_debug_cmnd_count = 0; 163 ··· 657 658 static int inquiry_evpd_b0(unsigned char * arr) 659 { 660 + unsigned int gran; 661 + 662 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 663 + gran = 1 << scsi_debug_physblk_exp; 664 + arr[2] = (gran >> 8) & 0xff; 665 + arr[3] = gran & 0xff; 666 if (sdebug_store_sectors > 0x400) { 667 arr[4] = (sdebug_store_sectors >> 24) & 0xff; 668 arr[5] = (sdebug_store_sectors >> 16) & 0xff; ··· 945 arr[9] = (scsi_debug_sector_size >> 16) & 0xff; 946 arr[10] = (scsi_debug_sector_size >> 8) & 0xff; 947 arr[11] = scsi_debug_sector_size & 0xff; 948 + arr[13] = scsi_debug_physblk_exp & 0xf; 949 + arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 950 + arr[15] = scsi_debug_lowest_aligned & 0xff; 951 952 if (scsi_debug_dif) { 953 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ ··· 2380 module_param_named(dif, scsi_debug_dif, int, S_IRUGO); 2381 module_param_named(guard, scsi_debug_guard, int, S_IRUGO); 2382 module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2383 + module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); 2384 + module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); 2385 2386 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2387 MODULE_DESCRIPTION("SCSI debug adapter driver"); ··· 2401 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2402 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2403 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2404 + MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 2405 + MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 2406 + MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); 2407 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 2408 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2409 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); ··· 2871 2872 if (scsi_debug_ato > 1) { 2873 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n"); 2874 + return -EINVAL; 2875 + } 2876 + 2877 + if (scsi_debug_physblk_exp > 15) { 2878 + printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n", 2879 + scsi_debug_physblk_exp); 2880 + return -EINVAL; 2881 + } 2882 + 2883 + if (scsi_debug_lowest_aligned > 0x3fff) { 2884 + printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n", 2885 + scsi_debug_lowest_aligned); 2886 return -EINVAL; 2887 } 2888
+225 -22
drivers/scsi/scsi_devinfo.c
··· 24 unsigned compatible; /* for use with scsi_static_device_list entries */ 25 }; 26 27 28 static const char spaces[] = " "; /* 16 of them */ 29 static unsigned scsi_default_dev_flags; ··· 254 { NULL, NULL, NULL, 0 }, 255 }; 256 257 /* 258 * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into 259 * devinfo vendor and model strings. ··· 319 static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, 320 char *strflags, int flags) 321 { 322 struct scsi_dev_info_list *devinfo; 323 324 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); 325 if (!devinfo) { ··· 371 devinfo->compatible = compatible; 372 373 if (compatible) 374 - list_add_tail(&devinfo->dev_info_list, &scsi_dev_info_list); 375 else 376 - list_add(&devinfo->dev_info_list, &scsi_dev_info_list); 377 378 return 0; 379 } 380 381 /** 382 * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. ··· 439 * @model: model name 440 * 441 * Description: 442 - * Search the scsi_dev_info_list for an entry matching @vendor and 443 - * @model, if found, return the matching flags value, else return 444 - * the host or global default settings. Called during scan time. 445 **/ 446 int scsi_get_device_flags(struct scsi_device *sdev, 447 const unsigned char *vendor, 448 const unsigned char *model) 449 { 450 struct scsi_dev_info_list *devinfo; 451 - unsigned int bflags; 452 453 - bflags = sdev->sdev_bflags; 454 - if (!bflags) 455 - bflags = scsi_default_dev_flags; 456 457 - list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { 458 if (devinfo->compatible) { 459 /* 460 * Behave like the older version of get_device_flags. ··· 530 return devinfo->flags; 531 } 532 } 533 - return bflags; 534 } 535 536 #ifdef CONFIG_SCSI_PROC_FS 537 static int devinfo_seq_show(struct seq_file *m, void *v) 538 { 539 struct scsi_dev_info_list *devinfo = 540 - list_entry(v, struct scsi_dev_info_list, dev_info_list); 541 542 seq_printf(m, "'%.8s' '%.16s' 0x%x\n", 543 - devinfo->vendor, devinfo->model, devinfo->flags); 544 return 0; 545 } 546 547 - static void * devinfo_seq_start(struct seq_file *m, loff_t *pos) 548 { 549 - return seq_list_start(&scsi_dev_info_list, *pos); 550 } 551 552 - static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos) 553 { 554 - return seq_list_next(v, &scsi_dev_info_list, pos); 555 } 556 557 static void devinfo_seq_stop(struct seq_file *m, void *v) 558 { 559 } 560 561 static const struct seq_operations scsi_devinfo_seq_ops = { ··· 689 **/ 690 void scsi_exit_devinfo(void) 691 { 692 - struct list_head *lh, *lh_next; 693 - struct scsi_dev_info_list *devinfo; 694 - 695 #ifdef CONFIG_SCSI_PROC_FS 696 remove_proc_entry("scsi/device_info", NULL); 697 #endif 698 699 - list_for_each_safe(lh, lh_next, &scsi_dev_info_list) { 700 devinfo = list_entry(lh, struct scsi_dev_info_list, 701 dev_info_list); 702 kfree(devinfo); 703 } 704 } 705 706 /** 707 * scsi_init_devinfo - set up the dynamic device list. ··· 776 #endif 777 int error, i; 778 779 - error = scsi_dev_info_list_add_str(scsi_dev_flags); 780 if (error) 781 return error; 782 783 for (i = 0; scsi_static_device_list[i].vendor; i++) { 784 error = scsi_dev_info_list_add(1 /* compatibile */,
··· 24 unsigned compatible; /* for use with scsi_static_device_list entries */ 25 }; 26 27 + struct scsi_dev_info_list_table { 28 + struct list_head node; /* our node for being on the master list */ 29 + struct list_head scsi_dev_info_list; /* head of dev info list */ 30 + const char *name; /* name of list for /proc (NULL for global) */ 31 + int key; /* unique numeric identifier */ 32 + }; 33 + 34 35 static const char spaces[] = " "; /* 16 of them */ 36 static unsigned scsi_default_dev_flags; ··· 247 { NULL, NULL, NULL, 0 }, 248 }; 249 250 + static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) 251 + { 252 + struct scsi_dev_info_list_table *devinfo_table; 253 + int found = 0; 254 + 255 + list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) 256 + if (devinfo_table->key == key) { 257 + found = 1; 258 + break; 259 + } 260 + if (!found) 261 + return ERR_PTR(-EINVAL); 262 + 263 + return devinfo_table; 264 + } 265 + 266 /* 267 * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into 268 * devinfo vendor and model strings. ··· 296 static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, 297 char *strflags, int flags) 298 { 299 + return scsi_dev_info_list_add_keyed(compatible, vendor, model, 300 + strflags, flags, 301 + SCSI_DEVINFO_GLOBAL); 302 + } 303 + 304 + /** 305 + * scsi_dev_info_list_add_keyed - add one dev_info list entry. 306 + * @compatible: if true, null terminate short strings. Otherwise space pad. 307 + * @vendor: vendor string 308 + * @model: model (product) string 309 + * @strflags: integer string 310 + * @flags: if strflags NULL, use this flag value 311 + * @key: specify list to use 312 + * 313 + * Description: 314 + * Create and add one dev_info entry for @vendor, @model, 315 + * @strflags or @flag in list specified by @key. If @compatible, 316 + * add to the tail of the list, do not space pad, and set 317 + * devinfo->compatible. The scsi_static_device_list entries are 318 + * added with @compatible 1 and @clfags NULL. 319 + * 320 + * Returns: 0 OK, -error on failure. 321 + **/ 322 + int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, 323 + char *strflags, int flags, int key) 324 + { 325 struct scsi_dev_info_list *devinfo; 326 + struct scsi_dev_info_list_table *devinfo_table = 327 + scsi_devinfo_lookup_by_key(key); 328 + 329 + if (IS_ERR(devinfo_table)) 330 + return PTR_ERR(devinfo_table); 331 332 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); 333 if (!devinfo) { ··· 317 devinfo->compatible = compatible; 318 319 if (compatible) 320 + list_add_tail(&devinfo->dev_info_list, 321 + &devinfo_table->scsi_dev_info_list); 322 else 323 + list_add(&devinfo->dev_info_list, 324 + &devinfo_table->scsi_dev_info_list); 325 326 return 0; 327 } 328 + EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); 329 330 /** 331 * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. ··· 382 * @model: model name 383 * 384 * Description: 385 + * Search the global scsi_dev_info_list (specified by list zero) 386 + * for an entry matching @vendor and @model, if found, return the 387 + * matching flags value, else return the host or global default 388 + * settings. Called during scan time. 389 **/ 390 int scsi_get_device_flags(struct scsi_device *sdev, 391 const unsigned char *vendor, 392 const unsigned char *model) 393 { 394 + return scsi_get_device_flags_keyed(sdev, vendor, model, 395 + SCSI_DEVINFO_GLOBAL); 396 + } 397 + 398 + 399 + /** 400 + * get_device_flags_keyed - get device specific flags from the dynamic device list. 401 + * @sdev: &scsi_device to get flags for 402 + * @vendor: vendor name 403 + * @model: model name 404 + * @key: list to look up 405 + * 406 + * Description: 407 + * Search the scsi_dev_info_list specified by @key for an entry 408 + * matching @vendor and @model, if found, return the matching 409 + * flags value, else return the host or global default settings. 410 + * Called during scan time. 411 + **/ 412 + int scsi_get_device_flags_keyed(struct scsi_device *sdev, 413 + const unsigned char *vendor, 414 + const unsigned char *model, 415 + int key) 416 + { 417 struct scsi_dev_info_list *devinfo; 418 + struct scsi_dev_info_list_table *devinfo_table; 419 420 + devinfo_table = scsi_devinfo_lookup_by_key(key); 421 422 + if (IS_ERR(devinfo_table)) 423 + return PTR_ERR(devinfo_table); 424 + 425 + list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, 426 + dev_info_list) { 427 if (devinfo->compatible) { 428 /* 429 * Behave like the older version of get_device_flags. ··· 447 return devinfo->flags; 448 } 449 } 450 + /* nothing found, return nothing */ 451 + if (key != SCSI_DEVINFO_GLOBAL) 452 + return 0; 453 + 454 + /* except for the global list, where we have an exception */ 455 + if (sdev->sdev_bflags) 456 + return sdev->sdev_bflags; 457 + 458 + return scsi_default_dev_flags; 459 } 460 + EXPORT_SYMBOL(scsi_get_device_flags_keyed); 461 462 #ifdef CONFIG_SCSI_PROC_FS 463 + struct double_list { 464 + struct list_head *top; 465 + struct list_head *bottom; 466 + }; 467 + 468 static int devinfo_seq_show(struct seq_file *m, void *v) 469 { 470 + struct double_list *dl = v; 471 + struct scsi_dev_info_list_table *devinfo_table = 472 + list_entry(dl->top, struct scsi_dev_info_list_table, node); 473 struct scsi_dev_info_list *devinfo = 474 + list_entry(dl->bottom, struct scsi_dev_info_list, 475 + dev_info_list); 476 + 477 + if (devinfo_table->scsi_dev_info_list.next == dl->bottom && 478 + devinfo_table->name) 479 + seq_printf(m, "[%s]:\n", devinfo_table->name); 480 481 seq_printf(m, "'%.8s' '%.16s' 0x%x\n", 482 + devinfo->vendor, devinfo->model, devinfo->flags); 483 return 0; 484 } 485 486 + static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos) 487 { 488 + struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL); 489 + loff_t pos = *ppos; 490 + 491 + if (!dl) 492 + return NULL; 493 + 494 + list_for_each(dl->top, &scsi_dev_info_list) { 495 + struct scsi_dev_info_list_table *devinfo_table = 496 + list_entry(dl->top, struct scsi_dev_info_list_table, 497 + node); 498 + list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list) 499 + if (pos-- == 0) 500 + return dl; 501 + } 502 + 503 + kfree(dl); 504 + return NULL; 505 } 506 507 + static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos) 508 { 509 + struct double_list *dl = v; 510 + struct scsi_dev_info_list_table *devinfo_table = 511 + list_entry(dl->top, struct scsi_dev_info_list_table, node); 512 + 513 + ++*ppos; 514 + dl->bottom = dl->bottom->next; 515 + while (&devinfo_table->scsi_dev_info_list == dl->bottom) { 516 + dl->top = dl->top->next; 517 + if (dl->top == &scsi_dev_info_list) { 518 + kfree(dl); 519 + return NULL; 520 + } 521 + devinfo_table = list_entry(dl->top, 522 + struct scsi_dev_info_list_table, 523 + node); 524 + dl->bottom = devinfo_table->scsi_dev_info_list.next; 525 + } 526 + 527 + return dl; 528 } 529 530 static void devinfo_seq_stop(struct seq_file *m, void *v) 531 { 532 + kfree(v); 533 } 534 535 static const struct seq_operations scsi_devinfo_seq_ops = { ··· 549 **/ 550 void scsi_exit_devinfo(void) 551 { 552 #ifdef CONFIG_SCSI_PROC_FS 553 remove_proc_entry("scsi/device_info", NULL); 554 #endif 555 556 + scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL); 557 + } 558 + 559 + /** 560 + * scsi_dev_info_add_list - add a new devinfo list 561 + * @key: key of the list to add 562 + * @name: Name of the list to add (for /proc/scsi/device_info) 563 + * 564 + * Adds the requested list, returns zero on success, -EEXIST if the 565 + * key is already registered to a list, or other error on failure. 566 + */ 567 + int scsi_dev_info_add_list(int key, const char *name) 568 + { 569 + struct scsi_dev_info_list_table *devinfo_table = 570 + scsi_devinfo_lookup_by_key(key); 571 + 572 + if (!IS_ERR(devinfo_table)) 573 + /* list already exists */ 574 + return -EEXIST; 575 + 576 + devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL); 577 + 578 + if (!devinfo_table) 579 + return -ENOMEM; 580 + 581 + INIT_LIST_HEAD(&devinfo_table->node); 582 + INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list); 583 + devinfo_table->name = name; 584 + devinfo_table->key = key; 585 + list_add_tail(&devinfo_table->node, &scsi_dev_info_list); 586 + 587 + return 0; 588 + } 589 + EXPORT_SYMBOL(scsi_dev_info_add_list); 590 + 591 + /** 592 + * scsi_dev_info_remove_list - destroy an added devinfo list 593 + * @key: key of the list to destroy 594 + * 595 + * Iterates over the entire list first, freeing all the values, then 596 + * frees the list itself. Returns 0 on success or -EINVAL if the key 597 + * can't be found. 598 + */ 599 + int scsi_dev_info_remove_list(int key) 600 + { 601 + struct list_head *lh, *lh_next; 602 + struct scsi_dev_info_list_table *devinfo_table = 603 + scsi_devinfo_lookup_by_key(key); 604 + 605 + if (IS_ERR(devinfo_table)) 606 + /* no such list */ 607 + return -EINVAL; 608 + 609 + /* remove from the master list */ 610 + list_del(&devinfo_table->node); 611 + 612 + list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) { 613 + struct scsi_dev_info_list *devinfo; 614 + 615 devinfo = list_entry(lh, struct scsi_dev_info_list, 616 dev_info_list); 617 kfree(devinfo); 618 } 619 + kfree(devinfo_table); 620 + 621 + return 0; 622 } 623 + EXPORT_SYMBOL(scsi_dev_info_remove_list); 624 625 /** 626 * scsi_init_devinfo - set up the dynamic device list. ··· 577 #endif 578 int error, i; 579 580 + error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL); 581 if (error) 582 return error; 583 + 584 + error = scsi_dev_info_list_add_str(scsi_dev_flags); 585 + if (error) 586 + goto out; 587 588 for (i = 0; scsi_static_device_list[i].vendor; i++) { 589 error = scsi_dev_info_list_add(1 /* compatibile */,
+1
drivers/scsi/scsi_lib.c
··· 1207 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1208 return scsi_prep_return(q, req, ret); 1209 } 1210 1211 /* 1212 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
··· 1207 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1208 return scsi_prep_return(q, req, ret); 1209 } 1210 + EXPORT_SYMBOL(scsi_prep_fn); 1211 1212 /* 1213 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
+16 -1
drivers/scsi/scsi_priv.h
··· 39 #endif 40 41 /* scsi_devinfo.c */ 42 extern int scsi_get_device_flags(struct scsi_device *sdev, 43 const unsigned char *vendor, 44 const unsigned char *model); 45 extern int __init scsi_init_devinfo(void); 46 extern void scsi_exit_devinfo(void); 47 ··· 87 extern void scsi_exit_queue(void); 88 struct request_queue; 89 struct request; 90 - extern int scsi_prep_fn(struct request_queue *, struct request *); 91 extern struct kmem_cache *scsi_sdb_cache; 92 93 /* scsi_proc.c */
··· 39 #endif 40 41 /* scsi_devinfo.c */ 42 + 43 + /* list of keys for the lists */ 44 + enum { 45 + SCSI_DEVINFO_GLOBAL = 0, 46 + SCSI_DEVINFO_SPI, 47 + }; 48 + 49 extern int scsi_get_device_flags(struct scsi_device *sdev, 50 const unsigned char *vendor, 51 const unsigned char *model); 52 + extern int scsi_get_device_flags_keyed(struct scsi_device *sdev, 53 + const unsigned char *vendor, 54 + const unsigned char *model, int key); 55 + extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, 56 + char *model, char *strflags, 57 + int flags, int key); 58 + extern int scsi_dev_info_add_list(int key, const char *name); 59 + extern int scsi_dev_info_remove_list(int key); 60 + 61 extern int __init scsi_init_devinfo(void); 62 extern void scsi_exit_devinfo(void); 63 ··· 71 extern void scsi_exit_queue(void); 72 struct request_queue; 73 struct request; 74 extern struct kmem_cache *scsi_sdb_cache; 75 76 /* scsi_proc.c */
-17
drivers/scsi/scsi_sysfs.c
··· 420 return err; 421 } 422 423 - static int scsi_bus_remove(struct device *dev) 424 - { 425 - struct device_driver *drv = dev->driver; 426 - struct scsi_device *sdev = to_scsi_device(dev); 427 - int err = 0; 428 - 429 - /* reset the prep_fn back to the default since the 430 - * driver may have altered it and it's being removed */ 431 - blk_queue_prep_rq(sdev->request_queue, scsi_prep_fn); 432 - 433 - if (drv && drv->remove) 434 - err = drv->remove(dev); 435 - 436 - return 0; 437 - } 438 - 439 struct bus_type scsi_bus_type = { 440 .name = "scsi", 441 .match = scsi_bus_match, 442 .uevent = scsi_bus_uevent, 443 .suspend = scsi_bus_suspend, 444 .resume = scsi_bus_resume, 445 - .remove = scsi_bus_remove, 446 }; 447 EXPORT_SYMBOL_GPL(scsi_bus_type); 448
··· 420 return err; 421 } 422 423 struct bus_type scsi_bus_type = { 424 .name = "scsi", 425 .match = scsi_bus_match, 426 .uevent = scsi_bus_uevent, 427 .suspend = scsi_bus_suspend, 428 .resume = scsi_bus_resume, 429 }; 430 EXPORT_SYMBOL_GPL(scsi_bus_type); 431
+26 -22
drivers/scsi/scsi_transport_fc.c
··· 3397 kfree(job); 3398 } 3399 3400 - 3401 /** 3402 * fc_bsg_jobdone - completion routine for bsg requests that the LLD has 3403 * completed ··· 3407 { 3408 struct request *req = job->req; 3409 struct request *rsp = req->next_rq; 3410 - unsigned long flags; 3411 int err; 3412 3413 - spin_lock_irqsave(&job->job_lock, flags); 3414 - job->state_flags |= FC_RQST_STATE_DONE; 3415 - job->ref_cnt--; 3416 - spin_unlock_irqrestore(&job->job_lock, flags); 3417 - 3418 err = job->req->errors = job->reply->result; 3419 if (err < 0) 3420 /* we're only returning the result field in the reply */ 3421 job->req->sense_len = sizeof(uint32_t); ··· 3427 rsp->resid_len -= min(job->reply->reply_payload_rcv_len, 3428 rsp->resid_len); 3429 } 3430 - 3431 - blk_end_request_all(req, err); 3432 - 3433 - fc_destroy_bsgjob(job); 3434 } 3435 3436 3437 /** 3438 * fc_bsg_job_timeout - handler for when a bsg request timesout ··· 3479 "abort failed with status %d\n", err); 3480 } 3481 3482 - if (!done) { 3483 - spin_lock_irqsave(&job->job_lock, flags); 3484 - job->ref_cnt--; 3485 - spin_unlock_irqrestore(&job->job_lock, flags); 3486 - fc_destroy_bsgjob(job); 3487 - } 3488 - 3489 /* the blk_end_sync_io() doesn't check the error */ 3490 - return BLK_EH_HANDLED; 3491 } 3492 - 3493 - 3494 3495 static int 3496 fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) ··· 3861 struct fc_internal *i = to_fc_internal(shost->transportt); 3862 struct request_queue *q; 3863 int err; 3864 - char bsg_name[BUS_ID_SIZE]; /*20*/ 3865 3866 fc_host->rqst_q = NULL; 3867 ··· 3881 3882 q->queuedata = shost; 3883 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 3884 blk_queue_rq_timed_out(q, fc_bsg_job_timeout); 3885 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); 3886 ··· 3927 3928 q->queuedata = rport; 3929 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 3930 blk_queue_rq_timed_out(q, fc_bsg_job_timeout); 3931 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 3932
··· 3397 kfree(job); 3398 } 3399 3400 /** 3401 * fc_bsg_jobdone - completion routine for bsg requests that the LLD has 3402 * completed ··· 3408 { 3409 struct request *req = job->req; 3410 struct request *rsp = req->next_rq; 3411 int err; 3412 3413 err = job->req->errors = job->reply->result; 3414 + 3415 if (err < 0) 3416 /* we're only returning the result field in the reply */ 3417 job->req->sense_len = sizeof(uint32_t); ··· 3433 rsp->resid_len -= min(job->reply->reply_payload_rcv_len, 3434 rsp->resid_len); 3435 } 3436 + blk_complete_request(req); 3437 } 3438 3439 + /** 3440 + * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests 3441 + * @req: BSG request that holds the job to be destroyed 3442 + */ 3443 + static void fc_bsg_softirq_done(struct request *rq) 3444 + { 3445 + struct fc_bsg_job *job = rq->special; 3446 + unsigned long flags; 3447 + 3448 + spin_lock_irqsave(&job->job_lock, flags); 3449 + job->state_flags |= FC_RQST_STATE_DONE; 3450 + job->ref_cnt--; 3451 + spin_unlock_irqrestore(&job->job_lock, flags); 3452 + 3453 + blk_end_request_all(rq, rq->errors); 3454 + fc_destroy_bsgjob(job); 3455 + } 3456 3457 /** 3458 * fc_bsg_job_timeout - handler for when a bsg request timesout ··· 3471 "abort failed with status %d\n", err); 3472 } 3473 3474 /* the blk_end_sync_io() doesn't check the error */ 3475 + if (done) 3476 + return BLK_EH_NOT_HANDLED; 3477 + else 3478 + return BLK_EH_HANDLED; 3479 } 3480 3481 static int 3482 fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) ··· 3859 struct fc_internal *i = to_fc_internal(shost->transportt); 3860 struct request_queue *q; 3861 int err; 3862 + char bsg_name[20]; 3863 3864 fc_host->rqst_q = NULL; 3865 ··· 3879 3880 q->queuedata = shost; 3881 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 3882 + blk_queue_softirq_done(q, fc_bsg_softirq_done); 3883 blk_queue_rq_timed_out(q, fc_bsg_job_timeout); 3884 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); 3885 ··· 3924 3925 q->queuedata = rport; 3926 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 3927 + blk_queue_softirq_done(q, fc_bsg_softirq_done); 3928 blk_queue_rq_timed_out(q, fc_bsg_job_timeout); 3929 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 3930
+1
drivers/scsi/scsi_transport_iscsi.c
··· 692 "Too many iscsi targets. Max " 693 "number of targets is %d.\n", 694 ISCSI_MAX_TARGET - 1); 695 goto release_host; 696 } 697 }
··· 692 "Too many iscsi targets. Max " 693 "number of targets is %d.\n", 694 ISCSI_MAX_TARGET - 1); 695 + err = -EOVERFLOW; 696 goto release_host; 697 } 698 }
+2 -2
drivers/scsi/scsi_transport_sas.c
··· 173 ret = handler(shost, rphy, req); 174 req->errors = ret; 175 176 - spin_lock_irq(q->queue_lock); 177 178 - req->end_io(req, ret); 179 } 180 } 181
··· 173 ret = handler(shost, rphy, req); 174 req->errors = ret; 175 176 + blk_end_request_all(req, ret); 177 178 + spin_lock_irq(q->queue_lock); 179 } 180 } 181
+45 -6
drivers/scsi/scsi_transport_spi.c
··· 46 #define DV_RETRIES 3 /* should only need at most 47 * two cc/ua clears */ 48 49 /* Private data accessors (keep these out of the header file) */ 50 #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) 51 #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) ··· 223 { 224 struct scsi_device *sdev = to_scsi_device(dev); 225 struct scsi_target *starget = sdev->sdev_target; 226 227 /* Populate the target capability fields with the values 228 * gleaned from the device inquiry */ ··· 235 spi_support_dt(starget) = scsi_device_dt(sdev); 236 spi_support_dt_only(starget) = scsi_device_dt_only(sdev); 237 spi_support_ius(starget) = scsi_device_ius(sdev); 238 spi_support_qas(starget) = scsi_device_qas(sdev); 239 240 return 0; ··· 856 return; 857 } 858 859 - if (!scsi_device_wide(sdev)) { 860 spi_max_width(starget) = 0; 861 max_width = 0; 862 } ··· 883 return; 884 885 /* device can't handle synchronous */ 886 - if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) 887 return; 888 889 /* len == -1 is the signal that we need to ascertain the ··· 899 900 /* try QAS requests; this should be harmless to set if the 901 * target supports it */ 902 - if (scsi_device_qas(sdev) && spi_max_qas(starget)) { 903 DV_SET(qas, 1); 904 } else { 905 DV_SET(qas, 0); 906 } 907 908 - if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) { 909 /* This u320 (or u640). Set IU transfers */ 910 DV_SET(iu, 1); 911 /* Then set the optional parameters */ ··· 926 i->f->get_signalling(shost); 927 if (spi_signalling(shost) == SPI_SIGNAL_SE || 928 spi_signalling(shost) == SPI_SIGNAL_HVD || 929 - !scsi_device_dt(sdev)) { 930 DV_SET(dt, 0); 931 } else { 932 DV_SET(dt, 1); ··· 1547 1548 static __init int spi_transport_init(void) 1549 { 1550 - int error = transport_class_register(&spi_transport_class); 1551 if (error) 1552 return error; 1553 error = anon_transport_class_register(&spi_device_class); ··· 1573 transport_class_unregister(&spi_transport_class); 1574 anon_transport_class_unregister(&spi_device_class); 1575 transport_class_unregister(&spi_host_class); 1576 } 1577 1578 MODULE_AUTHOR("Martin Hicks");
··· 46 #define DV_RETRIES 3 /* should only need at most 47 * two cc/ua clears */ 48 49 + /* Our blacklist flags */ 50 + enum { 51 + SPI_BLIST_NOIUS = 0x1, 52 + }; 53 + 54 + /* blacklist table, modelled on scsi_devinfo.c */ 55 + static struct { 56 + char *vendor; 57 + char *model; 58 + unsigned flags; 59 + } spi_static_device_list[] __initdata = { 60 + {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, 61 + {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, 62 + {NULL, NULL, 0} 63 + }; 64 + 65 /* Private data accessors (keep these out of the header file) */ 66 #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) 67 #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) ··· 207 { 208 struct scsi_device *sdev = to_scsi_device(dev); 209 struct scsi_target *starget = sdev->sdev_target; 210 + unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], 211 + &sdev->inquiry[16], 212 + SCSI_DEVINFO_SPI); 213 214 /* Populate the target capability fields with the values 215 * gleaned from the device inquiry */ ··· 216 spi_support_dt(starget) = scsi_device_dt(sdev); 217 spi_support_dt_only(starget) = scsi_device_dt_only(sdev); 218 spi_support_ius(starget) = scsi_device_ius(sdev); 219 + if (bflags & SPI_BLIST_NOIUS) { 220 + dev_info(dev, "Information Units disabled by blacklist\n"); 221 + spi_support_ius(starget) = 0; 222 + } 223 spi_support_qas(starget) = scsi_device_qas(sdev); 224 225 return 0; ··· 833 return; 834 } 835 836 + if (!spi_support_wide(starget)) { 837 spi_max_width(starget) = 0; 838 max_width = 0; 839 } ··· 860 return; 861 862 /* device can't handle synchronous */ 863 + if (!spi_support_sync(starget) && !spi_support_dt(starget)) 864 return; 865 866 /* len == -1 is the signal that we need to ascertain the ··· 876 877 /* try QAS requests; this should be harmless to set if the 878 * target supports it */ 879 + if (spi_support_qas(starget) && spi_max_qas(starget)) { 880 DV_SET(qas, 1); 881 } else { 882 DV_SET(qas, 0); 883 } 884 885 + if (spi_support_ius(starget) && spi_max_iu(starget) && 886 + min_period < 9) { 887 /* This u320 (or u640). Set IU transfers */ 888 DV_SET(iu, 1); 889 /* Then set the optional parameters */ ··· 902 i->f->get_signalling(shost); 903 if (spi_signalling(shost) == SPI_SIGNAL_SE || 904 spi_signalling(shost) == SPI_SIGNAL_HVD || 905 + !spi_support_dt(starget)) { 906 DV_SET(dt, 0); 907 } else { 908 DV_SET(dt, 1); ··· 1523 1524 static __init int spi_transport_init(void) 1525 { 1526 + int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, 1527 + "SCSI Parallel Transport Class"); 1528 + if (!error) { 1529 + int i; 1530 + 1531 + for (i = 0; spi_static_device_list[i].vendor; i++) 1532 + scsi_dev_info_list_add_keyed(1, /* compatible */ 1533 + spi_static_device_list[i].vendor, 1534 + spi_static_device_list[i].model, 1535 + NULL, 1536 + spi_static_device_list[i].flags, 1537 + SCSI_DEVINFO_SPI); 1538 + } 1539 + 1540 + error = transport_class_register(&spi_transport_class); 1541 if (error) 1542 return error; 1543 error = anon_transport_class_register(&spi_device_class); ··· 1535 transport_class_unregister(&spi_transport_class); 1536 anon_transport_class_unregister(&spi_device_class); 1537 transport_class_unregister(&spi_host_class); 1538 + scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); 1539 } 1540 1541 MODULE_AUTHOR("Martin Hicks");
+72 -2
drivers/scsi/sd.c
··· 1307 int sense_valid = 0; 1308 int the_result; 1309 int retries = 3; 1310 unsigned long long lba; 1311 unsigned sector_size; 1312 ··· 1358 sdkp->capacity = 0; 1359 return -EOVERFLOW; 1360 } 1361 1362 sdkp->capacity = lba + 1; 1363 return sector_size; ··· 1420 } 1421 1422 sdkp->capacity = lba + 1; 1423 return sector_size; 1424 } 1425 ··· 1533 string_get_size(sz, STRING_UNITS_10, cap_str_10, 1534 sizeof(cap_str_10)); 1535 1536 - if (sdkp->first_scan || old_capacity != sdkp->capacity) 1537 sd_printk(KERN_NOTICE, sdkp, 1538 - "%llu %d-byte hardware sectors: (%s/%s)\n", 1539 (unsigned long long)sdkp->capacity, 1540 sector_size, cap_str_10, cap_str_2); 1541 } 1542 1543 /* Rescale capacity to 512-byte units */ ··· 1556 else if (sector_size == 256) 1557 sdkp->capacity >>= 1; 1558 1559 sdkp->device->sector_size = sector_size; 1560 } 1561 ··· 1795 } 1796 1797 /** 1798 * sd_revalidate_disk - called the first time a new disk is seen, 1799 * performs disk spin up, read_capacity, etc. 1800 * @disk: struct gendisk we care about ··· 1877 */ 1878 if (sdkp->media_present) { 1879 sd_read_capacity(sdkp, buffer); 1880 sd_read_write_protect_flag(sdkp, buffer); 1881 sd_read_cache_type(sdkp, buffer); 1882 sd_read_app_tag_own(sdkp, buffer); ··· 2001 add_disk(gd); 2002 sd_dif_config_host(sdkp); 2003 2004 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2005 sdp->removable ? "removable " : ""); 2006 } ··· 2123 2124 async_synchronize_full(); 2125 sdkp = dev_get_drvdata(dev); 2126 device_del(&sdkp->dev); 2127 del_gendisk(sdkp->disk); 2128 sd_shutdown(dev);
··· 1307 int sense_valid = 0; 1308 int the_result; 1309 int retries = 3; 1310 + unsigned int alignment; 1311 unsigned long long lba; 1312 unsigned sector_size; 1313 ··· 1357 sdkp->capacity = 0; 1358 return -EOVERFLOW; 1359 } 1360 + 1361 + /* Logical blocks per physical block exponent */ 1362 + sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size; 1363 + 1364 + /* Lowest aligned logical block */ 1365 + alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; 1366 + blk_queue_alignment_offset(sdp->request_queue, alignment); 1367 + if (alignment && sdkp->first_scan) 1368 + sd_printk(KERN_NOTICE, sdkp, 1369 + "physical block alignment offset: %u\n", alignment); 1370 1371 sdkp->capacity = lba + 1; 1372 return sector_size; ··· 1409 } 1410 1411 sdkp->capacity = lba + 1; 1412 + sdkp->hw_sector_size = sector_size; 1413 return sector_size; 1414 } 1415 ··· 1521 string_get_size(sz, STRING_UNITS_10, cap_str_10, 1522 sizeof(cap_str_10)); 1523 1524 + if (sdkp->first_scan || old_capacity != sdkp->capacity) { 1525 sd_printk(KERN_NOTICE, sdkp, 1526 + "%llu %d-byte logical blocks: (%s/%s)\n", 1527 (unsigned long long)sdkp->capacity, 1528 sector_size, cap_str_10, cap_str_2); 1529 + 1530 + if (sdkp->hw_sector_size != sector_size) 1531 + sd_printk(KERN_NOTICE, sdkp, 1532 + "%u-byte physical blocks\n", 1533 + sdkp->hw_sector_size); 1534 + } 1535 } 1536 1537 /* Rescale capacity to 512-byte units */ ··· 1538 else if (sector_size == 256) 1539 sdkp->capacity >>= 1; 1540 1541 + blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size); 1542 sdkp->device->sector_size = sector_size; 1543 } 1544 ··· 1776 } 1777 1778 /** 1779 + * sd_read_block_limits - Query disk device for preferred I/O sizes. 1780 + * @disk: disk to query 1781 + */ 1782 + static void sd_read_block_limits(struct scsi_disk *sdkp) 1783 + { 1784 + unsigned int sector_sz = sdkp->device->sector_size; 1785 + char *buffer; 1786 + 1787 + /* Block Limits VPD */ 1788 + buffer = scsi_get_vpd_page(sdkp->device, 0xb0); 1789 + 1790 + if (buffer == NULL) 1791 + return; 1792 + 1793 + blk_queue_io_min(sdkp->disk->queue, 1794 + get_unaligned_be16(&buffer[6]) * sector_sz); 1795 + blk_queue_io_opt(sdkp->disk->queue, 1796 + get_unaligned_be32(&buffer[12]) * sector_sz); 1797 + 1798 + kfree(buffer); 1799 + } 1800 + 1801 + /** 1802 + * sd_read_block_characteristics - Query block dev. characteristics 1803 + * @disk: disk to query 1804 + */ 1805 + static void sd_read_block_characteristics(struct scsi_disk *sdkp) 1806 + { 1807 + char *buffer; 1808 + u16 rot; 1809 + 1810 + /* Block Device Characteristics VPD */ 1811 + buffer = scsi_get_vpd_page(sdkp->device, 0xb1); 1812 + 1813 + if (buffer == NULL) 1814 + return; 1815 + 1816 + rot = get_unaligned_be16(&buffer[4]); 1817 + 1818 + if (rot == 1) 1819 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); 1820 + 1821 + kfree(buffer); 1822 + } 1823 + 1824 + /** 1825 * sd_revalidate_disk - called the first time a new disk is seen, 1826 * performs disk spin up, read_capacity, etc. 1827 * @disk: struct gendisk we care about ··· 1812 */ 1813 if (sdkp->media_present) { 1814 sd_read_capacity(sdkp, buffer); 1815 + sd_read_block_limits(sdkp); 1816 + sd_read_block_characteristics(sdkp); 1817 sd_read_write_protect_flag(sdkp, buffer); 1818 sd_read_cache_type(sdkp, buffer); 1819 sd_read_app_tag_own(sdkp, buffer); ··· 1934 add_disk(gd); 1935 sd_dif_config_host(sdkp); 1936 1937 + sd_revalidate_disk(gd); 1938 + 1939 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1940 sdp->removable ? "removable " : ""); 1941 } ··· 2054 2055 async_synchronize_full(); 2056 sdkp = dev_get_drvdata(dev); 2057 + blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); 2058 device_del(&sdkp->dev); 2059 del_gendisk(sdkp->disk); 2060 sd_shutdown(dev);
+1
drivers/scsi/sd.h
··· 45 unsigned int openers; /* protected by BKL for now, yuck */ 46 sector_t capacity; /* size in 512-byte sectors */ 47 u32 index; 48 u8 media_present; 49 u8 write_prot; 50 u8 protection_type;/* Data Integrity Field */
··· 45 unsigned int openers; /* protected by BKL for now, yuck */ 46 sector_t capacity; /* size in 512-byte sectors */ 47 u32 index; 48 + unsigned short hw_sector_size; 49 u8 media_present; 50 u8 write_prot; 51 u8 protection_type;/* Data Integrity Field */
+1
drivers/scsi/sr.c
··· 881 { 882 struct scsi_cd *cd = dev_get_drvdata(dev); 883 884 del_gendisk(cd->disk); 885 886 mutex_lock(&sr_ref_mutex);
··· 881 { 882 struct scsi_cd *cd = dev_get_drvdata(dev); 883 884 + blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); 885 del_gendisk(cd->disk); 886 887 mutex_lock(&sr_ref_mutex);
+3 -2
drivers/scsi/sym53c8xx_2/sym_hipd.c
··· 2321 int phase = cmd & 7; 2322 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); 2323 2324 - printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", 2325 - sym_name(np), hsts, dbc, sbcl); 2326 2327 /* 2328 * Check that the chip is connected to the SCSI BUS.
··· 2321 int phase = cmd & 7; 2322 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); 2323 2324 + if (printk_ratelimit()) 2325 + printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", 2326 + sym_name(np), hsts, dbc, sbcl); 2327 2328 /* 2329 * Check that the chip is connected to the SCSI BUS.
-2
include/scsi/fc_encode.h
··· 107 break; 108 109 default: 110 - FC_DBG("Invalid op code %x \n", op); 111 return -EINVAL; 112 } 113 *r_ctl = FC_RCTL_DD_UNSOL_CTL; ··· 297 break; 298 299 default: 300 - FC_DBG("Invalid op code %x \n", op); 301 return -EINVAL; 302 } 303
··· 107 break; 108 109 default: 110 return -EINVAL; 111 } 112 *r_ctl = FC_RCTL_DD_UNSOL_CTL; ··· 298 break; 299 300 default: 301 return -EINVAL; 302 } 303
+65 -10
include/scsi/libfc.h
··· 34 35 #include <scsi/fc_frame.h> 36 37 - #define LIBFC_DEBUG 38 39 - #ifdef LIBFC_DEBUG 40 - /* Log messages */ 41 - #define FC_DBG(fmt, args...) \ 42 - do { \ 43 - printk(KERN_INFO "%s " fmt, __func__, ##args); \ 44 - } while (0) 45 - #else 46 - #define FC_DBG(fmt, args...) 47 - #endif 48 49 /* 50 * libfc error codes
··· 34 35 #include <scsi/fc_frame.h> 36 37 + #define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ 38 + #define FC_LPORT_LOGGING 0x02 /* lport layer logging */ 39 + #define FC_DISC_LOGGING 0x04 /* discovery layer logging */ 40 + #define FC_RPORT_LOGGING 0x08 /* rport layer logging */ 41 + #define FC_FCP_LOGGING 0x10 /* I/O path logging */ 42 + #define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ 43 + #define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ 44 + #define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ 45 46 + extern unsigned int fc_debug_logging; 47 + 48 + #define FC_CHECK_LOGGING(LEVEL, CMD) \ 49 + do { \ 50 + if (unlikely(fc_debug_logging & LEVEL)) \ 51 + do { \ 52 + CMD; \ 53 + } while (0); \ 54 + } while (0); 55 + 56 + #define FC_LIBFC_DBG(fmt, args...) \ 57 + FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ 58 + printk(KERN_INFO "libfc: " fmt, ##args);) 59 + 60 + #define FC_LPORT_DBG(lport, fmt, args...) \ 61 + FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ 62 + printk(KERN_INFO "lport: %6x: " fmt, \ 63 + fc_host_port_id(lport->host), ##args);) 64 + 65 + #define FC_DISC_DBG(disc, fmt, args...) \ 66 + FC_CHECK_LOGGING(FC_DISC_LOGGING, \ 67 + printk(KERN_INFO "disc: %6x: " fmt, \ 68 + fc_host_port_id(disc->lport->host), \ 69 + ##args);) 70 + 71 + #define FC_RPORT_DBG(rport, fmt, args...) \ 72 + do { \ 73 + struct fc_rport_libfc_priv *rdata = rport->dd_data; \ 74 + struct fc_lport *lport = rdata->local_port; \ 75 + FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ 76 + printk(KERN_INFO "rport: %6x: %6x: " fmt, \ 77 + fc_host_port_id(lport->host), \ 78 + rport->port_id, ##args);) \ 79 + } while (0); 80 + 81 + #define FC_FCP_DBG(pkt, fmt, args...) \ 82 + FC_CHECK_LOGGING(FC_FCP_LOGGING, \ 83 + printk(KERN_INFO "fcp: %6x: %6x: " fmt, \ 84 + fc_host_port_id(pkt->lp->host), \ 85 + pkt->rport->port_id, ##args);) 86 + 87 + #define FC_EM_DBG(em, fmt, args...) \ 88 + FC_CHECK_LOGGING(FC_EM_LOGGING, \ 89 + printk(KERN_INFO "em: %6x: " fmt, \ 90 + fc_host_port_id(em->lp->host), \ 91 + ##args);) 92 + 93 + #define FC_EXCH_DBG(exch, fmt, args...) \ 94 + FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ 95 + printk(KERN_INFO "exch: %6x: %4x: " fmt, \ 96 + fc_host_port_id(exch->lp->host), \ 97 + exch->xid, ##args);) 98 + 99 + #define FC_SCSI_DBG(lport, fmt, args...) \ 100 + FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ 101 + printk(KERN_INFO "scsi: %6x: " fmt, \ 102 + fc_host_port_id(lport->host), ##args);) 103 104 /* 105 * libfc error codes
+4
include/scsi/libiscsi.h
··· 125 struct scsi_cmnd *sc; /* associated SCSI cmd*/ 126 struct iscsi_conn *conn; /* used connection */ 127 128 /* state set/tested under session->lock */ 129 int state; 130 atomic_t refcount;
··· 125 struct scsi_cmnd *sc; /* associated SCSI cmd*/ 126 struct iscsi_conn *conn; /* used connection */ 127 128 + /* data processing tracking */ 129 + unsigned long last_xfer; 130 + unsigned long last_timeout; 131 + bool have_checked_conn; 132 /* state set/tested under session->lock */ 133 int state; 134 atomic_t refcount;
+1
include/scsi/scsi_driver.h
··· 32 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req); 33 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req); 34 int scsi_prep_return(struct request_queue *q, struct request *req, int ret); 35 36 #endif /* _SCSI_SCSI_DRIVER_H */
··· 32 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req); 33 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req); 34 int scsi_prep_return(struct request_queue *q, struct request *req, int ret); 35 + int scsi_prep_fn(struct request_queue *, struct request *); 36 37 #endif /* _SCSI_SCSI_DRIVER_H */