Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (55 commits)
[SCSI] tcm_loop: Add multi-fabric Linux/SCSI LLD fabric module
[SCSI] qla4xxx: Use polling mode for disable interrupt mailbox completion
[SCSI] Revert "[SCSI] Retrieve the Caching mode page"
[SCSI] bnx2fc: IO completion not processed due to missed wakeup
[SCSI] qla4xxx: Update driver version to 5.02.00-k6
[SCSI] qla4xxx: masking required bits of add_fw_options during initialization
[SCSI] qla4xxx: added new function qla4xxx_relogin_all_devices
[SCSI] qla4xxx: add support for ql4xsess_recovery_tmo cmd line param
[SCSI] qla4xxx: Add support for ql4xmaxqdepth command line parameter
[SCSI] qla4xxx: cleanup function qla4xxx_process_ddb_changed
[SCSI] qla4xxx: Prevent other port reinitialization during remove_adapter
[SCSI] qla4xxx: remove unused ddb flag DF_NO_RELOGIN
[SCSI] qla4xxx: cleanup DDB relogin logic during initialization
[SCSI] qla4xxx: Do not retry ISP82XX initialization if H/W state is failed
[SCSI] qla4xxx: Do not send mbox command if FW is in failed state
[SCSI] qla4xxx: cleanup qla4xxx_initialize_ddb_list()
[SCSI] ses: add subenclosure support
[SCSI] bnx2fc: Bump version to 1.0.1
[SCSI] bnx2fc: Remove unnecessary module state checks
[SCSI] bnx2fc: Fix MTU issue by using static MTU
...

+5830 -962
+9 -9
Documentation/target/tcm_mod_builder.py
··· 239 239 buf += "#include <target/target_core_configfs.h>\n" 240 240 buf += "#include <target/target_core_base.h>\n" 241 241 buf += "#include <target/configfs_macros.h>\n\n" 242 - buf += "#include <" + fabric_mod_name + "_base.h>\n" 243 - buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" 242 + buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 243 + buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 244 244 245 245 buf += "/* Local pointer to allocated TCM configfs fabric module */\n" 246 246 buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" ··· 289 289 buf += "{\n" 290 290 buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" 291 291 buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" 292 + buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" 292 293 buf += " kfree(nacl);\n" 293 294 buf += "}\n\n" 294 295 ··· 584 583 buf += "#include <target/target_core_fabric_lib.h>\n" 585 584 buf += "#include <target/target_core_device.h>\n" 586 585 buf += "#include <target/target_core_tpg.h>\n" 587 - buf += "#include <target/target_core_configfs.h>\n" 588 - buf += "#include <" + fabric_mod_name + "_base.h>\n" 589 - buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" 586 + buf += "#include <target/target_core_configfs.h>\n\n" 587 + buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 588 + buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 590 589 591 590 buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" 592 591 buf += "{\n" ··· 974 973 def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): 975 974 976 975 buf = "" 977 - f = fabric_mod_dir_var + "/Kbuild" 976 + f = fabric_mod_dir_var + "/Makefile" 978 977 print "Writing file: " + f 979 978 980 979 p = open(f, 'w') 981 980 if not p: 982 981 tcm_mod_err("Unable to open file: " + f) 983 982 984 - buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n" 985 983 buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" 986 984 buf += " " + fabric_mod_name + "_configfs.o\n" 987 985 buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" ··· 1018 1018 1019 1019 def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): 1020 1020 buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" 1021 - kbuild = tcm_dir + "/drivers/target/Kbuild" 1021 + kbuild = tcm_dir + "/drivers/target/Makefile" 1022 1022 1023 1023 f = open(kbuild, 'a') 1024 1024 f.write(buf) ··· 1064 1064 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) 1065 1065 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) 1066 1066 1067 - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ") 1067 + input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") 1068 1068 if input == "yes" or input == "y": 1069 1069 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) 1070 1070
+1 -1
drivers/scsi/aacraid/Makefile
··· 3 3 obj-$(CONFIG_SCSI_AACRAID) := aacraid.o 4 4 5 5 aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ 6 - dpcsup.o rx.o sa.o rkt.o nark.o 6 + dpcsup.o rx.o sa.o rkt.o nark.o src.o 7 7 8 8 ccflags-y := -Idrivers/scsi
+5 -2
drivers/scsi/aacraid/aachba.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 1487 1486 dev->a_ops.adapter_write = aac_write_block; 1488 1487 } 1489 1488 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 1490 - if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 1489 + if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1) 1490 + dev->adapter_info.options |= AAC_OPT_NEW_COMM; 1491 + if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 1491 1492 /* 1492 1493 * Worst case size that could cause sg overflow when 1493 1494 * we break up SG elements that are larger than 64KB.
+96 -10
drivers/scsi/aacraid/aacraid.h
··· 12 12 *----------------------------------------------------------------------------*/ 13 13 14 14 #ifndef AAC_DRIVER_BUILD 15 - # define AAC_DRIVER_BUILD 26400 15 + # define AAC_DRIVER_BUILD 28000 16 16 # define AAC_DRIVER_BRANCH "-ms" 17 17 #endif 18 18 #define MAXIMUM_NUM_CONTAINERS 32 ··· 277 277 278 278 #define FsaNormal 1 279 279 280 + /* transport FIB header (PMC) */ 281 + struct aac_fib_xporthdr { 282 + u64 HostAddress; /* FIB host address w/o xport header */ 283 + u32 Size; /* FIB size excluding xport header */ 284 + u32 Handle; /* driver handle to reference the FIB */ 285 + u64 Reserved[2]; 286 + }; 287 + 288 + #define ALIGN32 32 289 + 280 290 /* 281 291 * Define the FIB. The FIB is the where all the requested data and 282 292 * command information are put to the application on the FSA adapter. ··· 404 394 AdapterMicroFib = (1<<17), 405 395 BIOSFibPath = (1<<18), 406 396 FastResponseCapable = (1<<19), 407 - ApiFib = (1<<20) // Its an API Fib. 397 + ApiFib = (1<<20), /* Its an API Fib */ 398 + /* PMC NEW COMM: There is no more AIF data pending */ 399 + NoMoreAifDataAvailable = (1<<21) 408 400 }; 409 401 410 402 /* ··· 416 404 417 405 #define ADAPTER_INIT_STRUCT_REVISION 3 418 406 #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science 407 + #define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ 419 408 420 409 struct aac_init 421 410 { ··· 441 428 #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 442 429 #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 443 430 #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 431 + #define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 444 432 __le32 MaxIoCommands; /* max outstanding commands */ 445 433 __le32 MaxIoSize; /* largest I/O command */ 446 434 __le32 MaxFibSize; /* largest FIB to adapter */ 435 + /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ 436 + __le32 MaxNumAif; /* max number of aif */ 437 + /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ 438 + __le32 HostRRQ_AddrLow; 439 + __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ 447 440 }; 448 441 449 442 enum aac_log_level { ··· 704 685 #define OutboundDoorbellReg MUnit.ODR 705 686 706 687 struct rx_registers { 707 - struct rx_mu_registers MUnit; /* 1300h - 1344h */ 688 + struct rx_mu_registers MUnit; /* 1300h - 1347h */ 708 689 __le32 reserved1[2]; /* 1348h - 134ch */ 709 690 struct rx_inbound IndexRegs; 710 691 }; ··· 722 703 #define rkt_inbound rx_inbound 723 704 724 705 struct rkt_registers { 725 - struct rkt_mu_registers MUnit; /* 1300h - 1344h */ 706 + struct rkt_mu_registers MUnit; /* 1300h - 1347h */ 726 707 __le32 reserved1[1006]; /* 1348h - 22fch */ 727 708 struct rkt_inbound IndexRegs; /* 2300h - */ 728 709 }; ··· 731 712 #define rkt_readl(AEP, CSR) readl(&((AEP)->regs.rkt->CSR)) 732 713 #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) 733 714 #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) 715 + 716 + /* 717 + * PMC SRC message unit registers 718 + */ 719 + 720 + #define src_inbound rx_inbound 721 + 722 + struct src_mu_registers { 723 + /* PCI*| Name */ 724 + __le32 reserved0[8]; /* 00h | Reserved */ 725 + __le32 IDR; /* 20h | Inbound Doorbell Register */ 726 + __le32 IISR; /* 24h | Inbound Int. Status Register */ 727 + __le32 reserved1[3]; /* 28h | Reserved */ 728 + __le32 OIMR; /* 34h | Outbound Int. Mask Register */ 729 + __le32 reserved2[25]; /* 38h | Reserved */ 730 + __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ 731 + __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ 732 + __le32 reserved3[6]; /* a4h | Reserved */ 733 + __le32 OMR; /* bch | Outbound Message Register */ 734 + __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ 735 + __le32 IQ_H; /* c4h | Inbound Queue (High address) */ 736 + }; 737 + 738 + struct src_registers { 739 + struct src_mu_registers MUnit; /* 00h - c7h */ 740 + __le32 reserved1[130790]; /* c8h - 7fc5fh */ 741 + struct src_inbound IndexRegs; /* 7fc60h */ 742 + }; 743 + 744 + #define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) 745 + #define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR)) 746 + #define src_writeb(AEP, CSR, value) writeb(value, \ 747 + &((AEP)->regs.src.bar0->CSR)) 748 + #define src_writel(AEP, CSR, value) writel(value, \ 749 + &((AEP)->regs.src.bar0->CSR)) 750 + 751 + #define SRC_ODR_SHIFT 12 752 + #define SRC_IDR_SHIFT 9 734 753 735 754 typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); 736 755 ··· 936 879 #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) 937 880 #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) 938 881 #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) 882 + #define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) 939 883 #define AAC_SIS_VERSION_V3 3 940 884 #define AAC_SIS_SLOT_UNKNOWN 0xFF 941 885 ··· 998 940 #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) 999 941 #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) 1000 942 #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) 943 + #define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) 1001 944 1002 945 struct aac_dev 1003 946 { ··· 1011 952 */ 1012 953 unsigned max_fib_size; 1013 954 unsigned sg_tablesize; 955 + unsigned max_num_aif; 1014 956 1015 957 /* 1016 958 * Map for 128 fib objects (64k) ··· 1040 980 struct adapter_ops a_ops; 1041 981 unsigned long fsrev; /* Main driver's revision number */ 1042 982 1043 - unsigned base_size; /* Size of mapped in region */ 983 + unsigned long dbg_base; /* address of UART 984 + * debug buffer */ 985 + 986 + unsigned base_size, dbg_size; /* Size of 987 + * mapped in region */ 988 + 1044 989 struct aac_init *init; /* Holds initialization info to communicate with adapter */ 1045 990 dma_addr_t init_pa; /* Holds physical address of the init struct */ 991 + 992 + u32 *host_rrq; /* response queue 993 + * if AAC_COMM_MESSAGE_TYPE1 */ 994 + 995 + dma_addr_t host_rrq_pa; /* phys. address */ 996 + u32 host_rrq_idx; /* index into rrq buffer */ 1046 997 1047 998 struct pci_dev *pdev; /* Our PCI interface */ 1048 999 void * printfbuf; /* pointer to buffer used for printf's from the adapter */ ··· 1074 1003 */ 1075 1004 #ifndef AAC_MIN_FOOTPRINT_SIZE 1076 1005 # define AAC_MIN_FOOTPRINT_SIZE 8192 1006 + # define AAC_MIN_SRC_BAR0_SIZE 0x400000 1007 + # define AAC_MIN_SRC_BAR1_SIZE 0x800 1077 1008 #endif 1078 1009 union 1079 1010 { 1080 1011 struct sa_registers __iomem *sa; 1081 1012 struct rx_registers __iomem *rx; 1082 1013 struct rkt_registers __iomem *rkt; 1014 + struct { 1015 + struct src_registers __iomem *bar0; 1016 + char __iomem *bar1; 1017 + } src; 1083 1018 } regs; 1084 - volatile void __iomem *base; 1019 + volatile void __iomem *base, *dbg_base_mapped; 1085 1020 volatile struct rx_inbound __iomem *IndexRegs; 1086 1021 u32 OIMR; /* Mask Register Cache */ 1087 1022 /* ··· 1108 1031 u8 comm_interface; 1109 1032 # define AAC_COMM_PRODUCER 0 1110 1033 # define AAC_COMM_MESSAGE 1 1111 - /* macro side-effects BEWARE */ 1112 - # define raw_io_interface \ 1113 - init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1034 + # define AAC_COMM_MESSAGE_TYPE1 3 1035 + u8 raw_io_interface; 1114 1036 u8 raw_io_64; 1115 1037 u8 printf_enabled; 1116 1038 u8 in_reset; ··· 1865 1789 #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ 1866 1790 #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ 1867 1791 #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ 1792 + #define DoorBellAifPending (1<<6) /* Adapter -> Host */ 1793 + 1794 + /* PMC specific outbound doorbell bits */ 1795 + #define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */ 1868 1796 1869 1797 /* 1870 1798 * For FIB communication, we need all of the following things ··· 1910 1830 #define AifReqAPIJobStart 108 /* Start a job from the API */ 1911 1831 #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ 1912 1832 #define AifReqAPIJobFinish 110 /* Finish a job from the API */ 1833 + 1834 + /* PMC NEW COMM: Request the event data */ 1835 + #define AifReqEvent 200 1913 1836 1914 1837 /* 1915 1838 * Adapter Initiated FIB command structures. Start with the adapter ··· 1969 1886 int aac_rkt_init(struct aac_dev *dev); 1970 1887 int aac_nark_init(struct aac_dev *dev); 1971 1888 int aac_sa_init(struct aac_dev *dev); 1889 + int aac_src_init(struct aac_dev *dev); 1972 1890 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); 1973 1891 unsigned int aac_response_normal(struct aac_queue * q); 1974 1892 unsigned int aac_command_normal(struct aac_queue * q); 1975 - unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1893 + unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, 1894 + int isAif, int isFastResponse, 1895 + struct hw_fib *aif_fib); 1976 1896 int aac_reset_adapter(struct aac_dev * dev, int forced); 1977 1897 int aac_check_health(struct aac_dev * dev); 1978 1898 int aac_command_thread(void *data);
+2 -1
drivers/scsi/aacraid/commctrl.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by
+45 -13
drivers/scsi/aacraid/comminit.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 53 52 unsigned long size, align; 54 53 const unsigned long fibsize = 4096; 55 54 const unsigned long printfbufsiz = 256; 55 + unsigned long host_rrq_size = 0; 56 56 struct aac_init *init; 57 57 dma_addr_t phys; 58 58 unsigned long aac_max_hostphysmempages; 59 59 60 - size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; 61 - 60 + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) 61 + host_rrq_size = (dev->scsi_host_ptr->can_queue 62 + + AAC_NUM_MGT_FIB) * sizeof(u32); 63 + size = fibsize + sizeof(struct aac_init) + commsize + 64 + commalign + printfbufsiz + host_rrq_size; 62 65 63 66 base = pci_alloc_consistent(dev->pdev, size, &phys); 64 67 ··· 75 70 dev->comm_phys = phys; 76 71 dev->comm_size = size; 77 72 78 - dev->init = (struct aac_init *)(base + fibsize); 79 - dev->init_pa = phys + fibsize; 73 + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 74 + dev->host_rrq = (u32 *)(base + fibsize); 75 + dev->host_rrq_pa = phys + fibsize; 76 + memset(dev->host_rrq, 0, host_rrq_size); 77 + } 78 + 79 + dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); 80 + dev->init_pa = phys + fibsize + host_rrq_size; 80 81 81 82 init = dev->init; 82 83 ··· 117 106 118 107 init->InitFlags = 0; 119 108 if (dev->comm_interface == AAC_COMM_MESSAGE) { 120 - init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 109 + init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 121 110 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 111 + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 112 + init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); 113 + init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); 114 + dprintk((KERN_WARNING 115 + "aacraid: New Comm Interface type1 enabled\n")); 122 116 } 123 117 init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | 124 118 INITFLAGS_DRIVER_SUPPORTS_PM); ··· 131 115 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); 132 116 init->MaxFibSize = cpu_to_le32(dev->max_fib_size); 133 117 118 + init->MaxNumAif = cpu_to_le32(dev->max_num_aif); 119 + init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); 120 + init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); 121 + 122 + 134 123 /* 135 124 * Increment the base address by the amount already used 136 125 */ 137 - base = base + fibsize + sizeof(struct aac_init); 138 - phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); 126 + base = base + fibsize + host_rrq_size + sizeof(struct aac_init); 127 + phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + 128 + sizeof(struct aac_init)); 129 + 139 130 /* 140 131 * Align the beginning of Headers to commalign 141 132 */ ··· 337 314 - sizeof(struct aac_write) + sizeof(struct sgentry)) 338 315 / sizeof(struct sgentry); 339 316 dev->comm_interface = AAC_COMM_PRODUCER; 340 - dev->raw_io_64 = 0; 317 + dev->raw_io_interface = dev->raw_io_64 = 0; 318 + 341 319 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 342 320 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && 343 321 (status[0] == 0x00000001)) { 344 322 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) 345 323 dev->raw_io_64 = 1; 346 - if (dev->a_ops.adapter_comm && 347 - (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) 348 - dev->comm_interface = AAC_COMM_MESSAGE; 324 + if (dev->a_ops.adapter_comm) { 325 + if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) { 326 + dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; 327 + dev->raw_io_interface = 1; 328 + } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) { 329 + dev->comm_interface = AAC_COMM_MESSAGE; 330 + dev->raw_io_interface = 1; 331 + } 332 + } 349 333 if ((dev->comm_interface == AAC_COMM_MESSAGE) && 350 334 (status[2] > dev->base_size)) { 351 335 aac_adapter_ioremap(dev, 0); ··· 380 350 * status[3] & 0xFFFF maximum number FIBs outstanding 381 351 */ 382 352 host->max_sectors = (status[1] >> 16) << 1; 383 - dev->max_fib_size = status[1] & 0xFFFF; 353 + /* Multiple of 32 for PMC */ 354 + dev->max_fib_size = status[1] & 0xFFE0; 384 355 host->sg_tablesize = status[2] >> 16; 385 356 dev->sg_tablesize = status[2] & 0xFFFF; 386 357 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; 358 + dev->max_num_aif = status[4] & 0xFFFF; 387 359 /* 388 360 * NOTE: 389 361 * All these overrides are based on a fixed internal
+32 -9
drivers/scsi/aacraid/commsup.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 64 63 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 65 64 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 66 65 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 67 - if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size 68 - * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), 69 - &dev->hw_fib_pa))==NULL) 66 + dev->hw_fib_va = pci_alloc_consistent(dev->pdev, 67 + (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) 68 + * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), 69 + &dev->hw_fib_pa); 70 + if (dev->hw_fib_va == NULL) 70 71 return -ENOMEM; 71 72 return 0; 72 73 } ··· 113 110 if (i<0) 114 111 return -ENOMEM; 115 112 113 + /* 32 byte alignment for PMC */ 114 + hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); 115 + dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + 116 + (hw_fib_pa - dev->hw_fib_pa)); 117 + dev->hw_fib_pa = hw_fib_pa; 118 + memset(dev->hw_fib_va, 0, 119 + (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * 120 + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 121 + 122 + /* add Xport header */ 123 + dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + 124 + sizeof(struct aac_fib_xporthdr)); 125 + dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); 126 + 116 127 hw_fib = dev->hw_fib_va; 117 128 hw_fib_pa = dev->hw_fib_pa; 118 - memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 119 129 /* 120 130 * Initialise the fibs 121 131 */ ··· 145 129 hw_fib->header.XferState = cpu_to_le32(0xffffffff); 146 130 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); 147 131 fibptr->hw_fib_pa = hw_fib_pa; 148 - hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size); 149 - hw_fib_pa = hw_fib_pa + dev->max_fib_size; 132 + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + 133 + dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); 134 + hw_fib_pa = hw_fib_pa + 135 + dev->max_fib_size + sizeof(struct aac_fib_xporthdr); 150 136 } 151 137 /* 152 138 * Add the fib chain to the free list ··· 682 664 unsigned long nointr = 0; 683 665 unsigned long qflags; 684 666 667 + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 668 + kfree(hw_fib); 669 + return 0; 670 + } 671 + 685 672 if (hw_fib->header.XferState == 0) { 686 673 if (dev->comm_interface == AAC_COMM_MESSAGE) 687 - kfree (hw_fib); 674 + kfree(hw_fib); 688 675 return 0; 689 676 } 690 677 /* ··· 697 674 */ 698 675 if (hw_fib->header.StructType != FIB_MAGIC) { 699 676 if (dev->comm_interface == AAC_COMM_MESSAGE) 700 - kfree (hw_fib); 677 + kfree(hw_fib); 701 678 return -EINVAL; 702 679 } 703 680 /*
+74 -11
drivers/scsi/aacraid/dpcsup.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 229 228 return 0; 230 229 } 231 230 231 + /* 232 + * 233 + * aac_aif_callback 234 + * @context: the context set in the fib - here it is scsi cmd 235 + * @fibptr: pointer to the fib 236 + * 237 + * Handles the AIFs - new method (SRC) 238 + * 239 + */ 240 + 241 + static void aac_aif_callback(void *context, struct fib * fibptr) 242 + { 243 + struct fib *fibctx; 244 + struct aac_dev *dev; 245 + struct aac_aifcmd *cmd; 246 + int status; 247 + 248 + fibctx = (struct fib *)context; 249 + BUG_ON(fibptr == NULL); 250 + dev = fibptr->dev; 251 + 252 + if (fibptr->hw_fib_va->header.XferState & 253 + cpu_to_le32(NoMoreAifDataAvailable)) { 254 + aac_fib_complete(fibptr); 255 + aac_fib_free(fibptr); 256 + return; 257 + } 258 + 259 + aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); 260 + 261 + aac_fib_init(fibctx); 262 + cmd = (struct aac_aifcmd *) fib_data(fibctx); 263 + cmd->command = cpu_to_le32(AifReqEvent); 264 + 265 + status = aac_fib_send(AifRequest, 266 + fibctx, 267 + sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), 268 + FsaNormal, 269 + 0, 1, 270 + (fib_callback)aac_aif_callback, fibctx); 271 + } 272 + 232 273 233 274 /** 234 275 * aac_intr_normal - Handle command replies ··· 281 238 * know there is a response on our normal priority queue. We will pull off 282 239 * all QE there are and wake up all the waiters before exiting. 283 240 */ 284 - 285 - unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 241 + unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, 242 + int isAif, int isFastResponse, struct hw_fib *aif_fib) 286 243 { 287 244 unsigned long mflags; 288 245 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 289 - if ((index & 0x00000002L)) { 246 + if (isAif == 1) { /* AIF - common */ 290 247 struct hw_fib * hw_fib; 291 248 struct fib * fib; 292 249 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; 293 250 unsigned long flags; 294 251 295 - if (index == 0xFFFFFFFEL) /* Special Case */ 296 - return 0; /* Do nothing */ 297 252 /* 298 253 * Allocate a FIB. For non queued stuff we can just use 299 254 * the stack so we are happy. We need a fib object in order to ··· 304 263 kfree (fib); 305 264 return 1; 306 265 } 307 - memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + 308 - (index & ~0x00000002L)), sizeof(struct hw_fib)); 266 + if (aif_fib != NULL) { 267 + memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); 268 + } else { 269 + memcpy(hw_fib, 270 + (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + 271 + index), sizeof(struct hw_fib)); 272 + } 309 273 INIT_LIST_HEAD(&fib->fiblink); 310 274 fib->type = FSAFS_NTC_FIB_CONTEXT; 311 275 fib->size = sizeof(struct fib); ··· 323 277 wake_up_interruptible(&q->cmdready); 324 278 spin_unlock_irqrestore(q->lock, flags); 325 279 return 1; 280 + } else if (isAif == 2) { /* AIF - new (SRC) */ 281 + struct fib *fibctx; 282 + struct aac_aifcmd *cmd; 283 + 284 + fibctx = aac_fib_alloc(dev); 285 + if (!fibctx) 286 + return 1; 287 + aac_fib_init(fibctx); 288 + 289 + cmd = (struct aac_aifcmd *) fib_data(fibctx); 290 + cmd->command = cpu_to_le32(AifReqEvent); 291 + 292 + return aac_fib_send(AifRequest, 293 + fibctx, 294 + sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), 295 + FsaNormal, 296 + 0, 1, 297 + (fib_callback)aac_aif_callback, fibctx); 326 298 } else { 327 - int fast = index & 0x01; 328 - struct fib * fib = &dev->fibs[index >> 2]; 299 + struct fib *fib = &dev->fibs[index]; 329 300 struct hw_fib * hwfib = fib->hw_fib_va; 330 301 331 302 /* ··· 361 298 return 0; 362 299 } 363 300 364 - if (fast) { 301 + if (isFastResponse) { 365 302 /* 366 303 * Doctor the fib 367 304 */
+10 -5
drivers/scsi/aacraid/linit.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 55 54 56 55 #include "aacraid.h" 57 56 58 - #define AAC_DRIVER_VERSION "1.1-5" 57 + #define AAC_DRIVER_VERSION "1.1-7" 59 58 #ifndef AAC_DRIVER_BRANCH 60 59 #define AAC_DRIVER_BRANCH "" 61 60 #endif ··· 162 161 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ 163 162 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ 164 163 { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ 164 + { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */ 165 165 { 0,} 166 166 }; 167 167 MODULE_DEVICE_TABLE(pci, aac_pci_tbl); ··· 237 235 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ 238 236 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ 239 237 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ 240 - { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ 238 + { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ 239 + { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */ 241 240 }; 242 241 243 242 /** ··· 656 653 * This adapter needs a blind reset, only do so for Adapters that 657 654 * support a register, instead of a commanded, reset. 658 655 */ 659 - if ((aac->supplement_adapter_info.SupportedOptions2 & 660 - AAC_OPTION_MU_RESET) && 656 + if (((aac->supplement_adapter_info.SupportedOptions2 & 657 + AAC_OPTION_MU_RESET) || 658 + (aac->supplement_adapter_info.SupportedOptions2 & 659 + AAC_OPTION_DOORBELL_RESET)) && 661 660 aac_check_reset && 662 661 ((aac_check_reset != 1) || 663 662 !(aac->supplement_adapter_info.SupportedOptions2 &
+2 -1
drivers/scsi/aacraid/nark.c
··· 4 4 * based on the old aacraid driver that is.. 5 5 * Adaptec aacraid device driver for Linux. 6 6 * 7 - * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com) 7 + * Copyright (c) 2000-2010 Adaptec, Inc. 8 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 8 9 * 9 10 * This program is free software; you can redistribute it and/or modify 10 11 * it under the terms of the GNU General Public License as published by
+2 -1
drivers/scsi/aacraid/rkt.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by
+29 -4
drivers/scsi/aacraid/rx.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 85 84 86 85 static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) 87 86 { 87 + int isAif, isFastResponse, isSpecial; 88 88 struct aac_dev *dev = dev_id; 89 89 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 90 90 if (unlikely(Index == 0xFFFFFFFFL)) 91 91 Index = rx_readl(dev, MUnit.OutboundQueue); 92 92 if (likely(Index != 0xFFFFFFFFL)) { 93 93 do { 94 - if (unlikely(aac_intr_normal(dev, Index))) { 95 - rx_writel(dev, MUnit.OutboundQueue, Index); 96 - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 94 + isAif = isFastResponse = isSpecial = 0; 95 + if (Index & 0x00000002L) { 96 + isAif = 1; 97 + if (Index == 0xFFFFFFFEL) 98 + isSpecial = 1; 99 + Index &= ~0x00000002L; 100 + } else { 101 + if (Index & 0x00000001L) 102 + isFastResponse = 1; 103 + Index >>= 2; 104 + } 105 + if (!isSpecial) { 106 + if (unlikely(aac_intr_normal(dev, 107 + Index, isAif, 108 + isFastResponse, NULL))) { 109 + rx_writel(dev, 110 + MUnit.OutboundQueue, 111 + Index); 112 + rx_writel(dev, 113 + MUnit.ODR, 114 + DoorBellAdapterNormRespReady); 115 + } 97 116 } 98 117 Index = rx_readl(dev, MUnit.OutboundQueue); 99 118 } while (Index != 0xFFFFFFFFL); ··· 652 631 name, instance); 653 632 goto error_iounmap; 654 633 } 634 + dev->dbg_base = dev->scsi_host_ptr->base; 635 + dev->dbg_base_mapped = dev->base; 636 + dev->dbg_size = dev->base_size; 637 + 655 638 aac_adapter_enable_int(dev); 656 639 /* 657 640 * Tell the adapter that all is configured, and it can
+6 -1
drivers/scsi/aacraid/sa.c
··· 5 5 * based on the old aacraid driver that is.. 6 6 * Adaptec aacraid device driver for Linux. 7 7 * 8 - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of the GNU General Public License as published by ··· 392 391 name, instance); 393 392 goto error_iounmap; 394 393 } 394 + dev->dbg_base = dev->scsi_host_ptr->base; 395 + dev->dbg_base_mapped = dev->base; 396 + dev->dbg_size = dev->base_size; 397 + 395 398 aac_adapter_enable_int(dev); 396 399 397 400 /*
+594
drivers/scsi/aacraid/src.c
··· 1 + /* 2 + * Adaptec AAC series RAID controller driver 3 + * (c) Copyright 2001 Red Hat Inc. 4 + * 5 + * based on the old aacraid driver that is.. 6 + * Adaptec aacraid device driver for Linux. 7 + * 8 + * Copyright (c) 2000-2010 Adaptec, Inc. 9 + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2, or (at your option) 14 + * any later version. 15 + * 16 + * This program is distributed in the hope that it will be useful, 17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 + * GNU General Public License for more details. 20 + * 21 + * You should have received a copy of the GNU General Public License 22 + * along with this program; see the file COPYING. If not, write to 23 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 + * 25 + * Module Name: 26 + * src.c 27 + * 28 + * Abstract: Hardware Device Interface for PMC SRC based controllers 29 + * 30 + */ 31 + 32 + #include <linux/kernel.h> 33 + #include <linux/init.h> 34 + #include <linux/types.h> 35 + #include <linux/pci.h> 36 + #include <linux/spinlock.h> 37 + #include <linux/slab.h> 38 + #include <linux/blkdev.h> 39 + #include <linux/delay.h> 40 + #include <linux/version.h> 41 + #include <linux/completion.h> 42 + #include <linux/time.h> 43 + #include <linux/interrupt.h> 44 + #include <scsi/scsi_host.h> 45 + 46 + #include "aacraid.h" 47 + 48 + static irqreturn_t aac_src_intr_message(int irq, void *dev_id) 49 + { 50 + struct aac_dev *dev = dev_id; 51 + unsigned long bellbits, bellbits_shifted; 52 + int our_interrupt = 0; 53 + int isFastResponse; 54 + u32 index, handle; 55 + 56 + bellbits = src_readl(dev, MUnit.ODR_R); 57 + if (bellbits & PmDoorBellResponseSent) { 58 + bellbits = PmDoorBellResponseSent; 59 + /* handle async. status */ 60 + our_interrupt = 1; 61 + index = dev->host_rrq_idx; 62 + if (dev->host_rrq[index] == 0) { 63 + u32 old_index = index; 64 + /* adjust index */ 65 + do { 66 + index++; 67 + if (index == dev->scsi_host_ptr->can_queue + 68 + AAC_NUM_MGT_FIB) 69 + index = 0; 70 + if (dev->host_rrq[index] != 0) 71 + break; 72 + } while (index != old_index); 73 + dev->host_rrq_idx = index; 74 + } 75 + for (;;) { 76 + isFastResponse = 0; 77 + /* remove toggle bit (31) */ 78 + handle = (dev->host_rrq[index] & 0x7fffffff); 79 + /* check fast response bit (30) */ 80 + if (handle & 0x40000000) 81 + isFastResponse = 1; 82 + handle &= 0x0000ffff; 83 + if (handle == 0) 84 + break; 85 + 86 + aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); 87 + 88 + dev->host_rrq[index++] = 0; 89 + if (index == dev->scsi_host_ptr->can_queue + 90 + AAC_NUM_MGT_FIB) 91 + index = 0; 92 + dev->host_rrq_idx = index; 93 + } 94 + } else { 95 + bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); 96 + if (bellbits_shifted & DoorBellAifPending) { 97 + our_interrupt = 1; 98 + /* handle AIF */ 99 + aac_intr_normal(dev, 0, 2, 0, NULL); 100 + } 101 + } 102 + 103 + if (our_interrupt) { 104 + src_writel(dev, MUnit.ODR_C, bellbits); 105 + return IRQ_HANDLED; 106 + } 107 + return IRQ_NONE; 108 + } 109 + 110 + /** 111 + * aac_src_disable_interrupt - Disable interrupts 112 + * @dev: Adapter 113 + */ 114 + 115 + static void aac_src_disable_interrupt(struct aac_dev *dev) 116 + { 117 + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 118 + } 119 + 120 + /** 121 + * aac_src_enable_interrupt_message - Enable interrupts 122 + * @dev: Adapter 123 + */ 124 + 125 + static void aac_src_enable_interrupt_message(struct aac_dev *dev) 126 + { 127 + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); 128 + } 129 + 130 + /** 131 + * src_sync_cmd - send a command and wait 132 + * @dev: Adapter 133 + * @command: Command to execute 134 + * @p1: first parameter 135 + * @ret: adapter status 136 + * 137 + * This routine will send a synchronous command to the adapter and wait 138 + * for its completion. 139 + */ 140 + 141 + static int src_sync_cmd(struct aac_dev *dev, u32 command, 142 + u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 143 + u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 144 + { 145 + unsigned long start; 146 + int ok; 147 + 148 + /* 149 + * Write the command into Mailbox 0 150 + */ 151 + writel(command, &dev->IndexRegs->Mailbox[0]); 152 + /* 153 + * Write the parameters into Mailboxes 1 - 6 154 + */ 155 + writel(p1, &dev->IndexRegs->Mailbox[1]); 156 + writel(p2, &dev->IndexRegs->Mailbox[2]); 157 + writel(p3, &dev->IndexRegs->Mailbox[3]); 158 + writel(p4, &dev->IndexRegs->Mailbox[4]); 159 + 160 + /* 161 + * Clear the synch command doorbell to start on a clean slate. 162 + */ 163 + src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 164 + 165 + /* 166 + * Disable doorbell interrupts 167 + */ 168 + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 169 + 170 + /* 171 + * Force the completion of the mask register write before issuing 172 + * the interrupt. 173 + */ 174 + src_readl(dev, MUnit.OIMR); 175 + 176 + /* 177 + * Signal that there is a new synch command 178 + */ 179 + src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); 180 + 181 + ok = 0; 182 + start = jiffies; 183 + 184 + /* 185 + * Wait up to 30 seconds 186 + */ 187 + while (time_before(jiffies, start+30*HZ)) { 188 + /* Delay 5 microseconds to let Mon960 get info. */ 189 + udelay(5); 190 + 191 + /* Mon960 will set doorbell0 bit 192 + * when it has completed the command 193 + */ 194 + if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { 195 + /* Clear the doorbell */ 196 + src_writel(dev, 197 + MUnit.ODR_C, 198 + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 199 + ok = 1; 200 + break; 201 + } 202 + 203 + /* Yield the processor in case we are slow */ 204 + msleep(1); 205 + } 206 + if (unlikely(ok != 1)) { 207 + /* Restore interrupt mask even though we timed out */ 208 + aac_adapter_enable_int(dev); 209 + return -ETIMEDOUT; 210 + } 211 + 212 + /* Pull the synch status from Mailbox 0 */ 213 + if (status) 214 + *status = readl(&dev->IndexRegs->Mailbox[0]); 215 + if (r1) 216 + *r1 = readl(&dev->IndexRegs->Mailbox[1]); 217 + if (r2) 218 + *r2 = readl(&dev->IndexRegs->Mailbox[2]); 219 + if (r3) 220 + *r3 = readl(&dev->IndexRegs->Mailbox[3]); 221 + if (r4) 222 + *r4 = readl(&dev->IndexRegs->Mailbox[4]); 223 + 224 + /* Clear the synch command doorbell */ 225 + src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 226 + 227 + /* Restore interrupt mask */ 228 + aac_adapter_enable_int(dev); 229 + return 0; 230 + 231 + } 232 + 233 + /** 234 + * aac_src_interrupt_adapter - interrupt adapter 235 + * @dev: Adapter 236 + * 237 + * Send an interrupt to the i960 and breakpoint it. 238 + */ 239 + 240 + static void aac_src_interrupt_adapter(struct aac_dev *dev) 241 + { 242 + src_sync_cmd(dev, BREAKPOINT_REQUEST, 243 + 0, 0, 0, 0, 0, 0, 244 + NULL, NULL, NULL, NULL, NULL); 245 + } 246 + 247 + /** 248 + * aac_src_notify_adapter - send an event to the adapter 249 + * @dev: Adapter 250 + * @event: Event to send 251 + * 252 + * Notify the i960 that something it probably cares about has 253 + * happened. 254 + */ 255 + 256 + static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) 257 + { 258 + switch (event) { 259 + 260 + case AdapNormCmdQue: 261 + src_writel(dev, MUnit.ODR_C, 262 + INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); 263 + break; 264 + case HostNormRespNotFull: 265 + src_writel(dev, MUnit.ODR_C, 266 + INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); 267 + break; 268 + case AdapNormRespQue: 269 + src_writel(dev, MUnit.ODR_C, 270 + INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); 271 + break; 272 + case HostNormCmdNotFull: 273 + src_writel(dev, MUnit.ODR_C, 274 + INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); 275 + break; 276 + case FastIo: 277 + src_writel(dev, MUnit.ODR_C, 278 + INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); 279 + break; 280 + case AdapPrintfDone: 281 + src_writel(dev, MUnit.ODR_C, 282 + INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); 283 + break; 284 + default: 285 + BUG(); 286 + break; 287 + } 288 + } 289 + 290 + /** 291 + * aac_src_start_adapter - activate adapter 292 + * @dev: Adapter 293 + * 294 + * Start up processing on an i960 based AAC adapter 295 + */ 296 + 297 + static void aac_src_start_adapter(struct aac_dev *dev) 298 + { 299 + struct aac_init *init; 300 + 301 + init = dev->init; 302 + init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 303 + 304 + /* We can only use a 32 bit address here */ 305 + src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 306 + 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 307 + } 308 + 309 + /** 310 + * aac_src_check_health 311 + * @dev: device to check if healthy 312 + * 313 + * Will attempt to determine if the specified adapter is alive and 314 + * capable of handling requests, returning 0 if alive. 315 + */ 316 + static int aac_src_check_health(struct aac_dev *dev) 317 + { 318 + u32 status = src_readl(dev, MUnit.OMR); 319 + 320 + /* 321 + * Check to see if the board failed any self tests. 322 + */ 323 + if (unlikely(status & SELF_TEST_FAILED)) 324 + return -1; 325 + 326 + /* 327 + * Check to see if the board panic'd. 328 + */ 329 + if (unlikely(status & KERNEL_PANIC)) 330 + return (status >> 16) & 0xFF; 331 + /* 332 + * Wait for the adapter to be up and running. 333 + */ 334 + if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) 335 + return -3; 336 + /* 337 + * Everything is OK 338 + */ 339 + return 0; 340 + } 341 + 342 + /** 343 + * aac_src_deliver_message 344 + * @fib: fib to issue 345 + * 346 + * Will send a fib, returning 0 if successful. 347 + */ 348 + static int aac_src_deliver_message(struct fib *fib) 349 + { 350 + struct aac_dev *dev = fib->dev; 351 + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 352 + unsigned long qflags; 353 + u32 fibsize; 354 + u64 address; 355 + struct aac_fib_xporthdr *pFibX; 356 + 357 + spin_lock_irqsave(q->lock, qflags); 358 + q->numpending++; 359 + spin_unlock_irqrestore(q->lock, qflags); 360 + 361 + /* Calculate the amount to the fibsize bits */ 362 + fibsize = (sizeof(struct aac_fib_xporthdr) + 363 + fib->hw_fib_va->header.Size + 127) / 128 - 1; 364 + if (fibsize > (ALIGN32 - 1)) 365 + fibsize = ALIGN32 - 1; 366 + 367 + /* Fill XPORT header */ 368 + pFibX = (struct aac_fib_xporthdr *) 369 + ((unsigned char *)fib->hw_fib_va - 370 + sizeof(struct aac_fib_xporthdr)); 371 + pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; 372 + pFibX->HostAddress = fib->hw_fib_pa; 373 + pFibX->Size = fib->hw_fib_va->header.Size; 374 + address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); 375 + 376 + src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); 377 + src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); 378 + return 0; 379 + } 380 + 381 + /** 382 + * aac_src_ioremap 383 + * @size: mapping resize request 384 + * 385 + */ 386 + static int aac_src_ioremap(struct aac_dev *dev, u32 size) 387 + { 388 + if (!size) { 389 + iounmap(dev->regs.src.bar0); 390 + dev->regs.src.bar0 = NULL; 391 + iounmap(dev->base); 392 + dev->base = NULL; 393 + return 0; 394 + } 395 + dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), 396 + AAC_MIN_SRC_BAR1_SIZE); 397 + dev->base = NULL; 398 + if (dev->regs.src.bar1 == NULL) 399 + return -1; 400 + dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, 401 + size); 402 + if (dev->base == NULL) { 403 + iounmap(dev->regs.src.bar1); 404 + dev->regs.src.bar1 = NULL; 405 + return -1; 406 + } 407 + dev->IndexRegs = &((struct src_registers __iomem *) 408 + dev->base)->IndexRegs; 409 + return 0; 410 + } 411 + 412 + static int aac_src_restart_adapter(struct aac_dev *dev, int bled) 413 + { 414 + u32 var, reset_mask; 415 + 416 + if (bled >= 0) { 417 + if (bled) 418 + printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 419 + dev->name, dev->id, bled); 420 + bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 421 + 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); 422 + if (bled || (var != 0x00000001)) 423 + bled = -EINVAL; 424 + if (dev->supplement_adapter_info.SupportedOptions2 & 425 + AAC_OPTION_DOORBELL_RESET) { 426 + src_writel(dev, MUnit.IDR, reset_mask); 427 + msleep(5000); /* Delay 5 seconds */ 428 + } 429 + } 430 + 431 + if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) 432 + return -ENODEV; 433 + 434 + if (startup_timeout < 300) 435 + startup_timeout = 300; 436 + 437 + return 0; 438 + } 439 + 440 + /** 441 + * aac_src_select_comm - Select communications method 442 + * @dev: Adapter 443 + * @comm: communications method 444 + */ 445 + int aac_src_select_comm(struct aac_dev *dev, int comm) 446 + { 447 + switch (comm) { 448 + case AAC_COMM_MESSAGE: 449 + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 450 + dev->a_ops.adapter_intr = aac_src_intr_message; 451 + dev->a_ops.adapter_deliver = aac_src_deliver_message; 452 + break; 453 + default: 454 + return 1; 455 + } 456 + return 0; 457 + } 458 + 459 + /** 460 + * aac_src_init - initialize an Cardinal Frey Bar card 461 + * @dev: device to configure 462 + * 463 + */ 464 + 465 + int aac_src_init(struct aac_dev *dev) 466 + { 467 + unsigned long start; 468 + unsigned long status; 469 + int restart = 0; 470 + int instance = dev->id; 471 + const char *name = dev->name; 472 + 473 + dev->a_ops.adapter_ioremap = aac_src_ioremap; 474 + dev->a_ops.adapter_comm = aac_src_select_comm; 475 + 476 + dev->base_size = AAC_MIN_SRC_BAR0_SIZE; 477 + if (aac_adapter_ioremap(dev, dev->base_size)) { 478 + printk(KERN_WARNING "%s: unable to map adapter.\n", name); 479 + goto error_iounmap; 480 + } 481 + 482 + /* Failure to reset here is an option ... */ 483 + dev->a_ops.adapter_sync_cmd = src_sync_cmd; 484 + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 485 + if ((aac_reset_devices || reset_devices) && 486 + !aac_src_restart_adapter(dev, 0)) 487 + ++restart; 488 + /* 489 + * Check to see if the board panic'd while booting. 490 + */ 491 + status = src_readl(dev, MUnit.OMR); 492 + if (status & KERNEL_PANIC) { 493 + if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 494 + goto error_iounmap; 495 + ++restart; 496 + } 497 + /* 498 + * Check to see if the board failed any self tests. 499 + */ 500 + status = src_readl(dev, MUnit.OMR); 501 + if (status & SELF_TEST_FAILED) { 502 + printk(KERN_ERR "%s%d: adapter self-test failed.\n", 503 + dev->name, instance); 504 + goto error_iounmap; 505 + } 506 + /* 507 + * Check to see if the monitor panic'd while booting. 508 + */ 509 + if (status & MONITOR_PANIC) { 510 + printk(KERN_ERR "%s%d: adapter monitor panic.\n", 511 + dev->name, instance); 512 + goto error_iounmap; 513 + } 514 + start = jiffies; 515 + /* 516 + * Wait for the adapter to be up and running. Wait up to 3 minutes 517 + */ 518 + while (!((status = src_readl(dev, MUnit.OMR)) & 519 + KERNEL_UP_AND_RUNNING)) { 520 + if ((restart && 521 + (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 522 + time_after(jiffies, start+HZ*startup_timeout)) { 523 + printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 524 + dev->name, instance, status); 525 + goto error_iounmap; 526 + } 527 + if (!restart && 528 + ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 529 + time_after(jiffies, start + HZ * 530 + ((startup_timeout > 60) 531 + ? (startup_timeout - 60) 532 + : (startup_timeout / 2))))) { 533 + if (likely(!aac_src_restart_adapter(dev, 534 + aac_src_check_health(dev)))) 535 + start = jiffies; 536 + ++restart; 537 + } 538 + msleep(1); 539 + } 540 + if (restart && aac_commit) 541 + aac_commit = 1; 542 + /* 543 + * Fill in the common function dispatch table. 544 + */ 545 + dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 546 + dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 547 + dev->a_ops.adapter_notify = aac_src_notify_adapter; 548 + dev->a_ops.adapter_sync_cmd = src_sync_cmd; 549 + dev->a_ops.adapter_check_health = aac_src_check_health; 550 + dev->a_ops.adapter_restart = aac_src_restart_adapter; 551 + 552 + /* 553 + * First clear out all interrupts. Then enable the one's that we 554 + * can handle. 555 + */ 556 + aac_adapter_comm(dev, AAC_COMM_MESSAGE); 557 + aac_adapter_disable_int(dev); 558 + src_writel(dev, MUnit.ODR_C, 0xffffffff); 559 + aac_adapter_enable_int(dev); 560 + 561 + if (aac_init_adapter(dev) == NULL) 562 + goto error_iounmap; 563 + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) 564 + goto error_iounmap; 565 + 566 + dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 567 + 568 + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 569 + IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 570 + 571 + if (dev->msi) 572 + pci_disable_msi(dev->pdev); 573 + 574 + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 575 + name, instance); 576 + goto error_iounmap; 577 + } 578 + dev->dbg_base = pci_resource_start(dev->pdev, 2); 579 + dev->dbg_base_mapped = dev->regs.src.bar1; 580 + dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; 581 + 582 + aac_adapter_enable_int(dev); 583 + /* 584 + * Tell the adapter that all is configured, and it can 585 + * start accepting requests 586 + */ 587 + aac_src_start_adapter(dev); 588 + 589 + return 0; 590 + 591 + error_iounmap: 592 + 593 + return -1; 594 + }
+14 -8
drivers/scsi/bnx2fc/bnx2fc.h
··· 62 62 #include "bnx2fc_constants.h" 63 63 64 64 #define BNX2FC_NAME "bnx2fc" 65 - #define BNX2FC_VERSION "1.0.0" 65 + #define BNX2FC_VERSION "1.0.1" 66 66 67 67 #define PFX "bnx2fc: " 68 68 ··· 84 84 #define BNX2FC_NUM_MAX_SESS 128 85 85 #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) 86 86 87 - #define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 87 + #define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 88 + #define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS 89 + #define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE 88 90 #define BNX2FC_MIN_PAYLOAD 256 89 91 #define BNX2FC_MAX_PAYLOAD 2048 92 + #define BNX2FC_MFS \ 93 + (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) 94 + #define BNX2FC_MINI_JUMBO_MTU 2500 95 + 90 96 91 97 #define BNX2FC_RQ_BUF_SZ 256 92 98 #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) ··· 104 98 #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 105 99 #define BNX2FC_5771X_DB_PAGE_SIZE 128 106 100 107 - #define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS 101 + #define BNX2FC_MAX_TASKS \ 102 + (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS) 108 103 #define BNX2FC_TASK_SIZE 128 109 104 #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) 110 105 #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) ··· 119 112 #define BNX2FC_WRITE (1 << 0) 120 113 121 114 #define BNX2FC_MIN_XID 0 122 - #define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) 123 - #define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) 124 - #define FCOE_MAX_XID \ 125 - (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) 115 + #define BNX2FC_MAX_XID \ 116 + (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) 117 + #define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) 118 + #define FCOE_MAX_XID (FCOE_MIN_XID + 4095) 126 119 #define BNX2FC_MAX_LUN 0xFFFF 127 120 #define BNX2FC_MAX_FCP_TGT 256 128 121 #define BNX2FC_MAX_CMD_LEN 16 ··· 132 125 133 126 #define BNX2FC_WAIT_CNT 120 134 127 #define BNX2FC_FW_TIMEOUT (3 * HZ) 135 - 136 128 #define PORT_MAX 2 137 129 138 130 #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+12 -86
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 21 21 22 22 #define DRV_MODULE_NAME "bnx2fc" 23 23 #define DRV_MODULE_VERSION BNX2FC_VERSION 24 - #define DRV_MODULE_RELDATE "Jan 25, 2011" 24 + #define DRV_MODULE_RELDATE "Mar 17, 2011" 25 25 26 26 27 27 static char version[] __devinitdata = ··· 437 437 set_current_state(TASK_INTERRUPTIBLE); 438 438 while (!kthread_should_stop()) { 439 439 schedule(); 440 - set_current_state(TASK_RUNNING); 441 440 spin_lock_bh(&bg->fcoe_rx_list.lock); 442 441 while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { 443 442 spin_unlock_bh(&bg->fcoe_rx_list.lock); 444 443 bnx2fc_recv_frame(skb); 445 444 spin_lock_bh(&bg->fcoe_rx_list.lock); 446 445 } 446 + __set_current_state(TASK_INTERRUPTIBLE); 447 447 spin_unlock_bh(&bg->fcoe_rx_list.lock); 448 - set_current_state(TASK_INTERRUPTIBLE); 449 448 } 450 - set_current_state(TASK_RUNNING); 449 + __set_current_state(TASK_RUNNING); 451 450 return 0; 452 451 } 453 452 ··· 568 569 set_current_state(TASK_INTERRUPTIBLE); 569 570 while (!kthread_should_stop()) { 570 571 schedule(); 571 - set_current_state(TASK_RUNNING); 572 572 spin_lock_bh(&p->fp_work_lock); 573 573 while (!list_empty(&p->work_list)) { 574 574 list_splice_init(&p->work_list, &work_list); ··· 581 583 582 584 spin_lock_bh(&p->fp_work_lock); 583 585 } 586 + __set_current_state(TASK_INTERRUPTIBLE); 584 587 spin_unlock_bh(&p->fp_work_lock); 585 - set_current_state(TASK_INTERRUPTIBLE); 586 588 } 587 - set_current_state(TASK_RUNNING); 589 + __set_current_state(TASK_RUNNING); 588 590 589 591 return 0; 590 592 } ··· 659 661 return 0; 660 662 } 661 663 662 - static int bnx2fc_mfs_update(struct fc_lport *lport) 663 - { 664 - struct fcoe_port *port = lport_priv(lport); 665 - struct bnx2fc_hba *hba = port->priv; 666 - struct net_device *netdev = hba->netdev; 667 - u32 mfs; 668 - u32 max_mfs; 669 - 670 - mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + 671 - sizeof(struct fcoe_crc_eof)); 672 - max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); 673 - BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); 674 - if (mfs > max_mfs) 675 - mfs = max_mfs; 676 - 677 - /* Adjust mfs to be a multiple of 256 bytes */ 678 - mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * 679 - BNX2FC_MIN_PAYLOAD); 680 - mfs = mfs + sizeof(struct fc_frame_header); 681 - 682 - BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); 683 - if (fc_set_mfs(lport, mfs)) 684 - return -EINVAL; 685 - return 0; 686 - } 687 664 static void bnx2fc_link_speed_update(struct fc_lport *lport) 688 665 { 689 666 struct fcoe_port *port = lport_priv(lport); ··· 727 754 !hba->phys_dev->ethtool_ops->get_pauseparam) 728 755 return -EOPNOTSUPP; 729 756 730 - if (bnx2fc_mfs_update(lport)) 757 + if (fc_set_mfs(lport, BNX2FC_MFS)) 731 758 return -EINVAL; 732 759 733 760 skb_queue_head_init(&port->fcoe_pending_queue); ··· 798 825 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 799 826 printk(KERN_ERR "indicate_netevent: "\ 800 827 "adapter is not UP!!\n"); 801 - /* fall thru to update mfs if MTU has changed */ 802 - case NETDEV_CHANGEMTU: 803 - BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); 804 - bnx2fc_mfs_update(lport); 805 - mutex_lock(&lport->lp_mutex); 806 - list_for_each_entry(vport, &lport->vports, list) 807 - bnx2fc_mfs_update(vport); 808 - mutex_unlock(&lport->lp_mutex); 809 828 break; 810 829 811 830 case NETDEV_DOWN: ··· 1059 1094 struct net_device *physdev = hba->phys_dev; 1060 1095 struct netdev_hw_addr *ha; 1061 1096 int sel_san_mac = 0; 1062 - 1063 - /* Do not support for bonding device */ 1064 - if ((netdev->priv_flags & IFF_MASTER_ALB) || 1065 - (netdev->priv_flags & IFF_SLAVE_INACTIVE) || 1066 - (netdev->priv_flags & IFF_MASTER_8023AD)) { 1067 - return -EOPNOTSUPP; 1068 - } 1069 1097 1070 1098 /* setup Source MAC Address */ 1071 1099 rcu_read_lock(); ··· 1390 1432 struct net_device *phys_dev; 1391 1433 int rc = 0; 1392 1434 1393 - if (!rtnl_trylock()) 1394 - return restart_syscall(); 1435 + rtnl_lock(); 1395 1436 1396 1437 mutex_lock(&bnx2fc_dev_lock); 1397 - #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE 1398 - if (THIS_MODULE->state != MODULE_STATE_LIVE) { 1399 - rc = -ENODEV; 1400 - goto netdev_err; 1401 - } 1402 - #endif 1403 1438 /* obtain physical netdev */ 1404 1439 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1405 1440 phys_dev = vlan_dev_real_dev(netdev); ··· 1756 1805 struct ethtool_drvinfo drvinfo; 1757 1806 int rc = 0; 1758 1807 1759 - if (!rtnl_trylock()) { 1760 - printk(KERN_ERR PFX "retrying for rtnl_lock\n"); 1761 - return -EIO; 1762 - } 1808 + rtnl_lock(); 1763 1809 1764 1810 mutex_lock(&bnx2fc_dev_lock); 1765 - 1766 - if (THIS_MODULE->state != MODULE_STATE_LIVE) { 1767 - rc = -ENODEV; 1768 - goto nodev; 1769 - } 1770 1811 1771 1812 /* obtain physical netdev */ 1772 1813 if (netdev->priv_flags & IFF_802_1Q_VLAN) ··· 1810 1867 struct ethtool_drvinfo drvinfo; 1811 1868 int rc = 0; 1812 1869 1813 - if (!rtnl_trylock()) { 1814 - printk(KERN_ERR PFX "retrying for rtnl_lock\n"); 1815 - return -EIO; 1816 - } 1870 + rtnl_lock(); 1817 1871 1818 1872 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1819 1873 mutex_lock(&bnx2fc_dev_lock); 1820 - 1821 - if (THIS_MODULE->state != MODULE_STATE_LIVE) { 1822 - rc = -ENODEV; 1823 - goto nodev; 1824 - } 1825 1874 1826 1875 /* obtain physical netdev */ 1827 1876 if (netdev->priv_flags & IFF_802_1Q_VLAN) ··· 1877 1942 return -EIO; 1878 1943 } 1879 1944 1880 - if (!rtnl_trylock()) { 1881 - printk(KERN_ERR "trying for rtnl_lock\n"); 1882 - return -EIO; 1883 - } 1884 - mutex_lock(&bnx2fc_dev_lock); 1945 + rtnl_lock(); 1885 1946 1886 - #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE 1887 - if (THIS_MODULE->state != MODULE_STATE_LIVE) { 1888 - rc = -ENODEV; 1889 - goto mod_err; 1890 - } 1891 - #endif 1947 + mutex_lock(&bnx2fc_dev_lock); 1892 1948 1893 1949 if (!try_module_get(THIS_MODULE)) { 1894 1950 rc = -EINVAL; ··· 2432 2506 .change_queue_type = fc_change_queue_type, 2433 2507 .this_id = -1, 2434 2508 .cmd_per_lun = 3, 2435 - .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), 2509 + .can_queue = BNX2FC_CAN_QUEUE, 2436 2510 .use_clustering = ENABLE_CLUSTERING, 2437 2511 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2438 2512 .max_sectors = 512,
+12 -3
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 87 87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; 88 88 fcoe_init1.task_list_pbl_addr_hi = 89 89 (u32) ((u64) hba->task_ctx_bd_dma >> 32); 90 - fcoe_init1.mtu = hba->netdev->mtu; 90 + fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; 91 91 92 92 fcoe_init1.flags = (PAGE_SHIFT << 93 93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); ··· 590 590 591 591 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; 592 592 593 + spin_lock_bh(&tgt->tgt_lock); 593 594 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); 595 + spin_unlock_bh(&tgt->tgt_lock); 596 + 594 597 if (rq_data) { 595 598 buf = rq_data; 596 599 } else { ··· 606 603 } 607 604 608 605 for (i = 0; i < num_rq; i++) { 606 + spin_lock_bh(&tgt->tgt_lock); 609 607 rq_data = (unsigned char *) 610 608 bnx2fc_get_next_rqe(tgt, 1); 609 + spin_unlock_bh(&tgt->tgt_lock); 611 610 len = BNX2FC_RQ_BUF_SZ; 612 611 memcpy(buf1, rq_data, len); 613 612 buf1 += len; ··· 620 615 621 616 if (buf != rq_data) 622 617 kfree(buf); 618 + spin_lock_bh(&tgt->tgt_lock); 623 619 bnx2fc_return_rqe(tgt, num_rq); 620 + spin_unlock_bh(&tgt->tgt_lock); 624 621 break; 625 622 626 623 case FCOE_ERROR_DETECTION_CQE_TYPE: 627 624 /* 628 - *In case of error reporting CQE a single RQ entry 629 - * is consumes. 625 + * In case of error reporting CQE a single RQ entry 626 + * is consumed. 630 627 */ 631 628 spin_lock_bh(&tgt->tgt_lock); 632 629 num_rq = 1; ··· 712 705 *In case of warning reporting CQE a single RQ entry 713 706 * is consumes. 714 707 */ 708 + spin_lock_bh(&tgt->tgt_lock); 715 709 num_rq = 1; 716 710 err_entry = (struct fcoe_err_report_entry *) 717 711 bnx2fc_get_next_rqe(tgt, 1); ··· 725 717 err_entry->tx_buf_off, err_entry->rx_buf_off); 726 718 727 719 bnx2fc_return_rqe(tgt, 1); 720 + spin_unlock_bh(&tgt->tgt_lock); 728 721 break; 729 722 730 723 default:
+48 -25
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 11 11 */ 12 12 13 13 #include "bnx2fc.h" 14 + 15 + #define RESERVE_FREE_LIST_INDEX num_possible_cpus() 16 + 14 17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 15 18 int bd_index); 16 19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); ··· 245 242 u32 mem_size; 246 243 u16 xid; 247 244 int i; 248 - int num_ios; 245 + int num_ios, num_pri_ios; 249 246 size_t bd_tbl_sz; 247 + int arr_sz = num_possible_cpus() + 1; 250 248 251 249 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 252 250 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ ··· 267 263 } 268 264 269 265 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 270 - num_possible_cpus(), GFP_KERNEL); 266 + arr_sz, GFP_KERNEL); 271 267 if (!cmgr->free_list) { 272 268 printk(KERN_ERR PFX "failed to alloc free_list\n"); 273 269 goto mem_err; 274 270 } 275 271 276 272 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 277 - num_possible_cpus(), GFP_KERNEL); 273 + arr_sz, GFP_KERNEL); 278 274 if (!cmgr->free_list_lock) { 279 275 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 280 276 goto mem_err; ··· 283 279 cmgr->hba = hba; 284 280 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 285 281 286 - for (i = 0; i < num_possible_cpus(); i++) { 282 + for (i = 0; i < arr_sz; i++) { 287 283 INIT_LIST_HEAD(&cmgr->free_list[i]); 288 284 spin_lock_init(&cmgr->free_list_lock[i]); 289 285 } 290 286 291 - /* Pre-allocated pool of bnx2fc_cmds */ 287 + /* 288 + * Pre-allocated pool of bnx2fc_cmds. 289 + * Last entry in the free list array is the free list 290 + * of slow path requests. 291 + */ 292 292 xid = BNX2FC_MIN_XID; 293 + num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 293 294 for (i = 0; i < num_ios; i++) { 294 295 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 295 296 ··· 307 298 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 308 299 309 300 io_req->xid = xid++; 310 - if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) 311 - printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", 312 - io_req->xid); 313 - list_add_tail(&io_req->link, 314 - &cmgr->free_list[io_req->xid % num_possible_cpus()]); 301 + if (i < num_pri_ios) 302 + list_add_tail(&io_req->link, 303 + &cmgr->free_list[io_req->xid % 304 + num_possible_cpus()]); 305 + else 306 + list_add_tail(&io_req->link, 307 + &cmgr->free_list[num_possible_cpus()]); 315 308 io_req++; 316 309 } 317 310 ··· 400 389 if (!cmgr->free_list) 401 390 goto free_cmgr; 402 391 403 - for (i = 0; i < num_possible_cpus(); i++) { 392 + for (i = 0; i < num_possible_cpus() + 1; i++) { 404 393 struct list_head *list; 405 394 struct list_head *tmp; 406 395 ··· 424 413 struct bnx2fc_cmd *io_req; 425 414 struct list_head *listp; 426 415 struct io_bdt *bd_tbl; 416 + int index = RESERVE_FREE_LIST_INDEX; 427 417 u32 max_sqes; 428 418 u16 xid; 429 419 ··· 444 432 * NOTE: Free list insertions and deletions are protected with 445 433 * cmgr lock 446 434 */ 447 - spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 448 - if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || 435 + spin_lock_bh(&cmd_mgr->free_list_lock[index]); 436 + if ((list_empty(&(cmd_mgr->free_list[index]))) || 449 437 (tgt->num_active_ios.counter >= max_sqes)) { 450 438 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 451 439 "ios(%d):sqes(%d)\n", 452 440 tgt->num_active_ios.counter, tgt->max_sqes); 453 - if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) 441 + if (list_empty(&(cmd_mgr->free_list[index]))) 454 442 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 455 - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 443 + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 456 444 return NULL; 457 445 } 458 446 459 447 listp = (struct list_head *) 460 - cmd_mgr->free_list[smp_processor_id()].next; 448 + cmd_mgr->free_list[index].next; 461 449 list_del_init(listp); 462 450 io_req = (struct bnx2fc_cmd *) listp; 463 451 xid = io_req->xid; 464 452 cmd_mgr->cmds[xid] = io_req; 465 453 atomic_inc(&tgt->num_active_ios); 466 - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 454 + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 467 455 468 456 INIT_LIST_HEAD(&io_req->link); 469 457 ··· 491 479 struct io_bdt *bd_tbl; 492 480 u32 max_sqes; 493 481 u16 xid; 482 + int index = get_cpu(); 494 483 495 484 max_sqes = BNX2FC_SCSI_MAX_SQES; 496 485 /* 497 486 * NOTE: Free list insertions and deletions are protected with 498 487 * cmgr lock 499 488 */ 500 - spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 501 - if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || 489 + spin_lock_bh(&cmd_mgr->free_list_lock[index]); 490 + if ((list_empty(&cmd_mgr->free_list[index])) || 502 491 (tgt->num_active_ios.counter >= max_sqes)) { 503 - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 492 + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 493 + put_cpu(); 504 494 return NULL; 505 495 } 506 496 507 497 listp = (struct list_head *) 508 - cmd_mgr->free_list[smp_processor_id()].next; 498 + cmd_mgr->free_list[index].next; 509 499 list_del_init(listp); 510 500 io_req = (struct bnx2fc_cmd *) listp; 511 501 xid = io_req->xid; 512 502 cmd_mgr->cmds[xid] = io_req; 513 503 atomic_inc(&tgt->num_active_ios); 514 - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 504 + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 505 + put_cpu(); 515 506 516 507 INIT_LIST_HEAD(&io_req->link); 517 508 ··· 537 522 struct bnx2fc_cmd *io_req = container_of(ref, 538 523 struct bnx2fc_cmd, refcount); 539 524 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 525 + int index; 540 526 541 - spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 527 + if (io_req->cmd_type == BNX2FC_SCSI_CMD) 528 + index = io_req->xid % num_possible_cpus(); 529 + else 530 + index = RESERVE_FREE_LIST_INDEX; 531 + 532 + 533 + spin_lock_bh(&cmd_mgr->free_list_lock[index]); 542 534 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 543 535 bnx2fc_free_mp_resc(io_req); 544 536 cmd_mgr->cmds[io_req->xid] = NULL; ··· 553 531 list_del_init(&io_req->link); 554 532 /* Add it to the free list */ 555 533 list_add(&io_req->link, 556 - &cmd_mgr->free_list[smp_processor_id()]); 534 + &cmd_mgr->free_list[index]); 557 535 atomic_dec(&io_req->tgt->num_active_ios); 558 - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); 536 + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 537 + 559 538 } 560 539 561 540 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+2 -2
drivers/scsi/bnx2fc/bnx2fc_tgt.c
··· 304 304 " not sent to FW\n"); 305 305 306 306 /* Free session resources */ 307 - spin_lock_bh(&tgt->cq_lock); 308 307 bnx2fc_free_session_resc(hba, tgt); 309 308 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); 310 - spin_unlock_bh(&tgt->cq_lock); 311 309 } 312 310 313 311 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, ··· 828 830 tgt->rq = NULL; 829 831 } 830 832 /* Free CQ */ 833 + spin_lock_bh(&tgt->cq_lock); 831 834 if (tgt->cq) { 832 835 dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 833 836 tgt->cq, tgt->cq_dma); 834 837 tgt->cq = NULL; 835 838 } 839 + spin_unlock_bh(&tgt->cq_lock); 836 840 /* Free SQ */ 837 841 if (tgt->sq) { 838 842 dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+13 -2
drivers/scsi/libiscsi_tcp.c
··· 132 132 if (page_count(sg_page(sg)) >= 1 && !recv) 133 133 return; 134 134 135 - segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 135 + if (recv) { 136 + segment->atomic_mapped = true; 137 + segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 138 + } else { 139 + segment->atomic_mapped = false; 140 + /* the xmit path can sleep with the page mapped so use kmap */ 141 + segment->sg_mapped = kmap(sg_page(sg)); 142 + } 143 + 136 144 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; 137 145 } 138 146 139 147 void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) 140 148 { 141 149 if (segment->sg_mapped) { 142 - kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); 150 + if (segment->atomic_mapped) 151 + kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); 152 + else 153 + kunmap(sg_page(segment->sg)); 143 154 segment->sg_mapped = NULL; 144 155 segment->data = NULL; 145 156 }
+1 -1
drivers/scsi/lpfc/Makefile
··· 1 1 #/******************************************************************* 2 2 # * This file is part of the Emulex Linux Device Driver for * 3 3 # * Fibre Channel Host Bus Adapters. * 4 - # * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 + # * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 5 # * EMULEX and SLI are trademarks of Emulex. * 6 6 # * www.emulex.com * 7 7 # * *
+21 -6
drivers/scsi/lpfc/lpfc.h
··· 539 539 (struct lpfc_hba *, uint32_t); 540 540 int (*lpfc_hba_down_link) 541 541 (struct lpfc_hba *, uint32_t); 542 + int (*lpfc_selective_reset) 543 + (struct lpfc_hba *); 542 544 543 545 /* SLI4 specific HBA data structure */ 544 546 struct lpfc_sli4_hba sli4_hba; ··· 897 895 return; 898 896 } 899 897 900 - static inline void 898 + static inline int 899 + lpfc_readl(void __iomem *addr, uint32_t *data) 900 + { 901 + uint32_t temp; 902 + temp = readl(addr); 903 + if (temp == 0xffffffff) 904 + return -EIO; 905 + *data = temp; 906 + return 0; 907 + } 908 + 909 + static inline int 901 910 lpfc_sli_read_hs(struct lpfc_hba *phba) 902 911 { 903 912 /* ··· 917 904 */ 918 905 phba->sli.slistat.err_attn_event++; 919 906 920 - /* Save status info */ 921 - phba->work_hs = readl(phba->HSregaddr); 922 - phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 923 - phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 907 + /* Save status info and check for unplug error */ 908 + if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || 909 + lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || 910 + lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { 911 + return -EIO; 912 + } 924 913 925 914 /* Clear chip Host Attention error bit */ 926 915 writel(HA_ERATT, phba->HAregaddr); 927 916 readl(phba->HAregaddr); /* flush */ 928 917 phba->pport->stopped = 1; 929 918 930 - return; 919 + return 0; 931 920 }
+10 -4
drivers/scsi/lpfc/lpfc_attr.c
··· 685 685 * -EIO reset not configured or error posting the event 686 686 * zero for success 687 687 **/ 688 - static int 688 + int 689 689 lpfc_selective_reset(struct lpfc_hba *phba) 690 690 { 691 691 struct completion online_compl; ··· 746 746 int status = -EINVAL; 747 747 748 748 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 749 - status = lpfc_selective_reset(phba); 749 + status = phba->lpfc_selective_reset(phba); 750 750 751 751 if (status == 0) 752 752 return strlen(buf); ··· 1224 1224 if (val & ENABLE_FCP_RING_POLLING) { 1225 1225 if ((val & DISABLE_FCP_RING_INT) && 1226 1226 !(old_val & DISABLE_FCP_RING_INT)) { 1227 - creg_val = readl(phba->HCregaddr); 1227 + if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1228 + spin_unlock_irq(&phba->hbalock); 1229 + return -EINVAL; 1230 + } 1228 1231 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 1229 1232 writel(creg_val, phba->HCregaddr); 1230 1233 readl(phba->HCregaddr); /* flush */ ··· 1245 1242 spin_unlock_irq(&phba->hbalock); 1246 1243 del_timer(&phba->fcp_poll_timer); 1247 1244 spin_lock_irq(&phba->hbalock); 1248 - creg_val = readl(phba->HCregaddr); 1245 + if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1246 + spin_unlock_irq(&phba->hbalock); 1247 + return -EINVAL; 1248 + } 1249 1249 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1250 1250 writel(creg_val, phba->HCregaddr); 1251 1251 readl(phba->HCregaddr); /* flush */
+24 -12
drivers/scsi/lpfc/lpfc_bsg.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2009-2010 Emulex. All rights reserved. * 4 + * Copyright (C) 2009-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 348 348 dd_data->context_un.iocb.bmp = bmp; 349 349 350 350 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 351 - creg_val = readl(phba->HCregaddr); 351 + if (lpfc_readl(phba->HCregaddr, &creg_val)) { 352 + rc = -EIO ; 353 + goto free_cmdiocbq; 354 + } 352 355 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 353 356 writel(creg_val, phba->HCregaddr); 354 357 readl(phba->HCregaddr); /* flush */ ··· 602 599 dd_data->context_un.iocb.ndlp = ndlp; 603 600 604 601 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 605 - creg_val = readl(phba->HCregaddr); 602 + if (lpfc_readl(phba->HCregaddr, &creg_val)) { 603 + rc = -EIO; 604 + goto linkdown_err; 605 + } 606 606 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 607 607 writel(creg_val, phba->HCregaddr); 608 608 readl(phba->HCregaddr); /* flush */ ··· 619 613 else 620 614 rc = -EIO; 621 615 616 + linkdown_err: 622 617 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 623 618 job->request_payload.sg_cnt, DMA_TO_DEVICE); 624 619 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, ··· 1364 1357 dd_data->context_un.iocb.ndlp = ndlp; 1365 1358 1366 1359 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1367 - creg_val = readl(phba->HCregaddr); 1360 + if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1361 + rc = -IOCB_ERROR; 1362 + goto issue_ct_rsp_exit; 1363 + } 1368 1364 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1369 1365 writel(creg_val, phba->HCregaddr); 1370 1366 readl(phba->HCregaddr); /* flush */ ··· 2489 2479 2490 2480 from = (uint8_t *)dd_data->context_un.mbox.mb; 2491 2481 job = dd_data->context_un.mbox.set_job; 2492 - size = job->reply_payload.payload_len; 2493 - job->reply->reply_payload_rcv_len = 2494 - sg_copy_from_buffer(job->reply_payload.sg_list, 2495 - job->reply_payload.sg_cnt, 2496 - from, size); 2497 - job->reply->result = 0; 2482 + if (job) { 2483 + size = job->reply_payload.payload_len; 2484 + job->reply->reply_payload_rcv_len = 2485 + sg_copy_from_buffer(job->reply_payload.sg_list, 2486 + job->reply_payload.sg_cnt, 2487 + from, size); 2488 + job->reply->result = 0; 2498 2489 2490 + job->dd_data = NULL; 2491 + job->job_done(job); 2492 + } 2499 2493 dd_data->context_un.mbox.set_job = NULL; 2500 - job->dd_data = NULL; 2501 - job->job_done(job); 2502 2494 /* need to hold the lock until we call job done to hold off 2503 2495 * the timeout handler returning to the midlayer while 2504 2496 * we are stillprocessing the job
+3 -3
drivers/scsi/lpfc/lpfc_crtn.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2010 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 254 254 void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 255 255 uint32_t); 256 256 void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); 257 - 258 - void lpfc_reset_barrier(struct lpfc_hba * phba); 257 + int lpfc_selective_reset(struct lpfc_hba *); 258 + void lpfc_reset_barrier(struct lpfc_hba *); 259 259 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 260 260 int lpfc_sli_brdkill(struct lpfc_hba *); 261 261 int lpfc_sli_brdreset(struct lpfc_hba *);
+2 -1
drivers/scsi/lpfc/lpfc_els.c
··· 89 89 return 0; 90 90 91 91 /* Read the HBA Host Attention Register */ 92 - ha_copy = readl(phba->HAregaddr); 92 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 93 + return 1; 93 94 94 95 if (!(ha_copy & HA_LATT)) 95 96 return 0;
+13 -2
drivers/scsi/lpfc/lpfc_hw.h
··· 1344 1344 #define HS_FFER1 0x80000000 /* Bit 31 */ 1345 1345 #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ 1346 1346 #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ 1347 - 1347 + #define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ 1348 1348 /* Host Control Register */ 1349 1349 1350 1350 #define HC_REG_OFFSET 12 /* Byte offset from register base address */ ··· 1713 1713 #define pde6_apptagval_WORD word2 1714 1714 }; 1715 1715 1716 + struct lpfc_pde7 { 1717 + uint32_t word0; 1718 + #define pde7_type_SHIFT 24 1719 + #define pde7_type_MASK 0x000000ff 1720 + #define pde7_type_WORD word0 1721 + #define pde7_rsvd0_SHIFT 0 1722 + #define pde7_rsvd0_MASK 0x00ffffff 1723 + #define pde7_rsvd0_WORD word0 1724 + uint32_t addrHigh; 1725 + uint32_t addrLow; 1726 + }; 1716 1727 1717 1728 /* Structure for MB Command LOAD_SM and DOWN_LOAD */ 1718 1729 ··· 3632 3621 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ 3633 3622 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ 3634 3623 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ 3635 - struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ 3624 + struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ 3636 3625 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ 3637 3626 } un; 3638 3627 union {
+82 -31
drivers/scsi/lpfc/lpfc_hw4.h
··· 215 215 #define lpfc_fip_flag_WORD word0 216 216 }; 217 217 218 - struct sli4_bls_acc { 218 + struct sli4_bls_rsp { 219 219 uint32_t word0_rsvd; /* Word0 must be reserved */ 220 220 uint32_t word1; 221 221 #define lpfc_abts_orig_SHIFT 0 ··· 231 231 #define lpfc_abts_oxid_MASK 0x0000FFFF 232 232 #define lpfc_abts_oxid_WORD word2 233 233 uint32_t word3; 234 + #define lpfc_vndr_code_SHIFT 0 235 + #define lpfc_vndr_code_MASK 0x000000FF 236 + #define lpfc_vndr_code_WORD word3 237 + #define lpfc_rsn_expln_SHIFT 8 238 + #define lpfc_rsn_expln_MASK 0x000000FF 239 + #define lpfc_rsn_expln_WORD word3 240 + #define lpfc_rsn_code_SHIFT 16 241 + #define lpfc_rsn_code_MASK 0x000000FF 242 + #define lpfc_rsn_code_WORD word3 243 + 234 244 uint32_t word4; 235 245 uint32_t word5_rsvd; /* Word5 must be reserved */ 236 246 }; ··· 721 711 union lpfc_sli4_cfg_shdr { 722 712 struct { 723 713 uint32_t word6; 724 - #define lpfc_mbox_hdr_opcode_SHIFT 0 725 - #define lpfc_mbox_hdr_opcode_MASK 0x000000FF 726 - #define lpfc_mbox_hdr_opcode_WORD word6 727 - #define lpfc_mbox_hdr_subsystem_SHIFT 8 728 - #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF 729 - #define lpfc_mbox_hdr_subsystem_WORD word6 730 - #define lpfc_mbox_hdr_port_number_SHIFT 16 731 - #define lpfc_mbox_hdr_port_number_MASK 0x000000FF 732 - #define lpfc_mbox_hdr_port_number_WORD word6 733 - #define lpfc_mbox_hdr_domain_SHIFT 24 734 - #define lpfc_mbox_hdr_domain_MASK 0x000000FF 735 - #define lpfc_mbox_hdr_domain_WORD word6 714 + #define lpfc_mbox_hdr_opcode_SHIFT 0 715 + #define lpfc_mbox_hdr_opcode_MASK 0x000000FF 716 + #define lpfc_mbox_hdr_opcode_WORD word6 717 + #define lpfc_mbox_hdr_subsystem_SHIFT 8 718 + #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF 719 + #define lpfc_mbox_hdr_subsystem_WORD word6 720 + #define lpfc_mbox_hdr_port_number_SHIFT 16 721 + #define lpfc_mbox_hdr_port_number_MASK 0x000000FF 722 + #define lpfc_mbox_hdr_port_number_WORD word6 723 + #define lpfc_mbox_hdr_domain_SHIFT 24 724 + #define lpfc_mbox_hdr_domain_MASK 0x000000FF 725 + #define lpfc_mbox_hdr_domain_WORD word6 736 726 uint32_t timeout; 737 727 uint32_t request_length; 738 - uint32_t reserved9; 728 + uint32_t word9; 729 + #define lpfc_mbox_hdr_version_SHIFT 0 730 + #define lpfc_mbox_hdr_version_MASK 0x000000FF 731 + #define lpfc_mbox_hdr_version_WORD word9 732 + #define LPFC_Q_CREATE_VERSION_2 2 733 + #define LPFC_Q_CREATE_VERSION_1 1 734 + #define LPFC_Q_CREATE_VERSION_0 0 739 735 } request; 740 736 struct { 741 737 uint32_t word6; ··· 933 917 #define LPFC_CQ_CNT_512 0x1 934 918 #define LPFC_CQ_CNT_1024 0x2 935 919 uint32_t word1; 936 - #define lpfc_cq_eq_id_SHIFT 22 920 + #define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ 937 921 #define lpfc_cq_eq_id_MASK 0x000000FF 938 922 #define lpfc_cq_eq_id_WORD word1 923 + #define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ 924 + #define lpfc_cq_eq_id_2_MASK 0x0000FFFF 925 + #define lpfc_cq_eq_id_2_WORD word1 939 926 uint32_t reserved0; 940 927 uint32_t reserved1; 941 928 }; ··· 948 929 union { 949 930 struct { 950 931 uint32_t word0; 932 + #define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ 933 + #define lpfc_mbx_cq_create_page_size_MASK 0x000000FF 934 + #define lpfc_mbx_cq_create_page_size_WORD word0 951 935 #define lpfc_mbx_cq_create_num_pages_SHIFT 0 952 936 #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF 953 937 #define lpfc_mbx_cq_create_num_pages_WORD word0 ··· 991 969 struct lpfc_mbx_wq_create { 992 970 struct mbox_header header; 993 971 union { 994 - struct { 972 + struct { /* Version 0 Request */ 995 973 uint32_t word0; 996 974 #define lpfc_mbx_wq_create_num_pages_SHIFT 0 997 975 #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF ··· 1001 979 #define lpfc_mbx_wq_create_cq_id_WORD word0 1002 980 struct dma_address page[LPFC_MAX_WQ_PAGE]; 1003 981 } request; 982 + struct { /* Version 1 Request */ 983 + uint32_t word0; /* Word 0 is the same as in v0 */ 984 + uint32_t word1; 985 + #define lpfc_mbx_wq_create_page_size_SHIFT 0 986 + #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF 987 + #define lpfc_mbx_wq_create_page_size_WORD word1 988 + #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 989 + #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F 990 + #define lpfc_mbx_wq_create_wqe_size_WORD word1 991 + #define LPFC_WQ_WQE_SIZE_64 0x5 992 + #define LPFC_WQ_WQE_SIZE_128 0x6 993 + #define lpfc_mbx_wq_create_wqe_count_SHIFT 16 994 + #define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF 995 + #define lpfc_mbx_wq_create_wqe_count_WORD word1 996 + uint32_t word2; 997 + struct dma_address page[LPFC_MAX_WQ_PAGE-1]; 998 + } request_1; 1004 999 struct { 1005 1000 uint32_t word0; 1006 1001 #define lpfc_mbx_wq_create_q_id_SHIFT 0 ··· 1046 1007 #define LPFC_DATA_BUF_SIZE 2048 1047 1008 struct rq_context { 1048 1009 uint32_t word0; 1049 - #define lpfc_rq_context_rq_size_SHIFT 16 1050 - #define lpfc_rq_context_rq_size_MASK 0x0000000F 1051 - #define lpfc_rq_context_rq_size_WORD word0 1010 + #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ 1011 + #define lpfc_rq_context_rqe_count_MASK 0x0000000F 1012 + #define lpfc_rq_context_rqe_count_WORD word0 1052 1013 #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ 1053 1014 #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ 1054 1015 #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ 1055 1016 #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ 1017 + #define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ 1018 + #define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF 1019 + #define lpfc_rq_context_rqe_count_1_WORD word0 1020 + #define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ 1021 + #define lpfc_rq_context_rqe_size_MASK 0x0000000F 1022 + #define lpfc_rq_context_rqe_size_WORD word0 1023 + #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ 1024 + #define lpfc_rq_context_page_size_MASK 0x000000FF 1025 + #define lpfc_rq_context_page_size_WORD word0 1056 1026 uint32_t reserved1; 1057 1027 uint32_t word2; 1058 1028 #define lpfc_rq_context_cq_id_SHIFT 16 ··· 1070 1022 #define lpfc_rq_context_buf_size_SHIFT 0 1071 1023 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF 1072 1024 #define lpfc_rq_context_buf_size_WORD word2 1073 - uint32_t reserved3; 1025 + uint32_t buffer_size; /* Version 1 Only */ 1074 1026 }; 1075 1027 1076 1028 struct lpfc_mbx_rq_create { ··· 1110 1062 1111 1063 struct mq_context { 1112 1064 uint32_t word0; 1113 - #define lpfc_mq_context_cq_id_SHIFT 22 1065 + #define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ 1114 1066 #define lpfc_mq_context_cq_id_MASK 0x000003FF 1115 1067 #define lpfc_mq_context_cq_id_WORD word0 1116 - #define lpfc_mq_context_count_SHIFT 16 1117 - #define lpfc_mq_context_count_MASK 0x0000000F 1118 - #define lpfc_mq_context_count_WORD word0 1119 - #define LPFC_MQ_CNT_16 0x5 1120 - #define LPFC_MQ_CNT_32 0x6 1121 - #define LPFC_MQ_CNT_64 0x7 1122 - #define LPFC_MQ_CNT_128 0x8 1068 + #define lpfc_mq_context_ring_size_SHIFT 16 1069 + #define lpfc_mq_context_ring_size_MASK 0x0000000F 1070 + #define lpfc_mq_context_ring_size_WORD word0 1071 + #define LPFC_MQ_RING_SIZE_16 0x5 1072 + #define LPFC_MQ_RING_SIZE_32 0x6 1073 + #define LPFC_MQ_RING_SIZE_64 0x7 1074 + #define LPFC_MQ_RING_SIZE_128 0x8 1123 1075 uint32_t word1; 1124 1076 #define lpfc_mq_context_valid_SHIFT 31 1125 1077 #define lpfc_mq_context_valid_MASK 0x00000001 ··· 1153 1105 union { 1154 1106 struct { 1155 1107 uint32_t word0; 1156 - #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 1157 - #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF 1158 - #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 1108 + #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 1109 + #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF 1110 + #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 1111 + #define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ 1112 + #define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF 1113 + #define lpfc_mbx_mq_create_ext_cq_id_WORD word0 1159 1114 uint32_t async_evt_bmap; 1160 1115 #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK 1161 1116 #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
+28 -13
drivers/scsi/lpfc/lpfc_init.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2010 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 507 507 phba->hba_flag &= ~HBA_ERATT_HANDLED; 508 508 509 509 /* Enable appropriate host interrupts */ 510 - status = readl(phba->HCregaddr); 510 + if (lpfc_readl(phba->HCregaddr, &status)) { 511 + spin_unlock_irq(&phba->hbalock); 512 + return -EIO; 513 + } 511 514 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 512 515 if (psli->num_rings > 0) 513 516 status |= HC_R0INT_ENA; ··· 1225 1222 /* Wait for the ER1 bit to clear.*/ 1226 1223 while (phba->work_hs & HS_FFER1) { 1227 1224 msleep(100); 1228 - phba->work_hs = readl(phba->HSregaddr); 1225 + if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1226 + phba->work_hs = UNPLUG_ERR ; 1227 + break; 1228 + } 1229 1229 /* If driver is unloading let the worker thread continue */ 1230 1230 if (phba->pport->load_flag & FC_UNLOADING) { 1231 1231 phba->work_hs = 0; ··· 4480 4474 { 4481 4475 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4482 4476 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4477 + phba->lpfc_selective_reset = lpfc_selective_reset; 4483 4478 switch (dev_grp) { 4484 4479 case LPFC_PCI_DEV_LP: 4485 4480 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; ··· 5392 5385 int i, port_error = 0; 5393 5386 uint32_t if_type; 5394 5387 5388 + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 5389 + memset(&reg_data, 0, sizeof(reg_data)); 5395 5390 if (!phba->sli4_hba.PSMPHRregaddr) 5396 5391 return -ENODEV; 5397 5392 5398 5393 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5399 5394 for (i = 0; i < 3000; i++) { 5400 - portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); 5401 - if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { 5395 + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 5396 + &portsmphr_reg.word0) || 5397 + (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 5402 5398 /* Port has a fatal POST error, break out */ 5403 5399 port_error = -ENODEV; 5404 5400 break; ··· 5482 5472 break; 5483 5473 case LPFC_SLI_INTF_IF_TYPE_2: 5484 5474 /* Final checks. The port status should be clean. */ 5485 - reg_data.word0 = 5486 - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 5487 - if (bf_get(lpfc_sliport_status_err, &reg_data)) { 5475 + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5476 + &reg_data.word0) || 5477 + bf_get(lpfc_sliport_status_err, &reg_data)) { 5488 5478 phba->work_status[0] = 5489 5479 readl(phba->sli4_hba.u.if_type2. 5490 5480 ERR1regaddr); ··· 6770 6760 * the loop again. 6771 6761 */ 6772 6762 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 6773 - reg_data.word0 = 6774 - readl(phba->sli4_hba.u.if_type2. 6775 - STATUSregaddr); 6763 + if (lpfc_readl(phba->sli4_hba.u.if_type2. 6764 + STATUSregaddr, &reg_data.word0)) { 6765 + rc = -ENODEV; 6766 + break; 6767 + } 6776 6768 if (bf_get(lpfc_sliport_status_rdy, &reg_data)) 6777 6769 break; 6778 6770 if (bf_get(lpfc_sliport_status_rn, &reg_data)) { ··· 6795 6783 } 6796 6784 6797 6785 /* Detect any port errors. */ 6798 - reg_data.word0 = readl(phba->sli4_hba.u.if_type2. 6799 - STATUSregaddr); 6786 + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6787 + &reg_data.word0)) { 6788 + rc = -ENODEV; 6789 + break; 6790 + } 6800 6791 if ((bf_get(lpfc_sliport_status_err, &reg_data)) || 6801 6792 (rdy_chk >= 1000)) { 6802 6793 phba->work_status[0] = readl(
+31 -12
drivers/scsi/lpfc/lpfc_scsi.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 1514 1514 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1515 1515 struct lpfc_pde5 *pde5 = NULL; 1516 1516 struct lpfc_pde6 *pde6 = NULL; 1517 - struct ulp_bde64 *prot_bde = NULL; 1517 + struct lpfc_pde7 *pde7 = NULL; 1518 1518 dma_addr_t dataphysaddr, protphysaddr; 1519 1519 unsigned short curr_data = 0, curr_prot = 0; 1520 - unsigned int split_offset, protgroup_len; 1520 + unsigned int split_offset; 1521 + unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1521 1522 unsigned int protgrp_blks, protgrp_bytes; 1522 1523 unsigned int remainder, subtotal; 1523 1524 int status; ··· 1586 1585 bpl++; 1587 1586 1588 1587 /* setup the first BDE that points to protection buffer */ 1589 - prot_bde = (struct ulp_bde64 *) bpl; 1590 - protphysaddr = sg_dma_address(sgpe); 1591 - prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1592 - prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1593 - protgroup_len = sg_dma_len(sgpe); 1588 + protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1589 + protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1594 1590 1595 1591 /* must be integer multiple of the DIF block length */ 1596 1592 BUG_ON(protgroup_len % 8); 1597 1593 1594 + pde7 = (struct lpfc_pde7 *) bpl; 1595 + memset(pde7, 0, sizeof(struct lpfc_pde7)); 1596 + bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1597 + 1598 + pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1599 + pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1600 + 1598 1601 protgrp_blks = protgroup_len / 8; 1599 1602 protgrp_bytes = protgrp_blks * blksize; 1600 1603 1601 - prot_bde->tus.f.bdeSize = protgroup_len; 1602 - prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; 1603 - prot_bde->tus.w = le32_to_cpu(bpl->tus.w); 1604 + /* check if this pde is crossing the 4K boundary; if so split */ 1605 + if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1606 + protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1607 + protgroup_offset += protgroup_remainder; 1608 + protgrp_blks = protgroup_remainder / 8; 1609 + protgrp_bytes = protgroup_remainder * blksize; 1610 + } else { 1611 + protgroup_offset = 0; 1612 + curr_prot++; 1613 + } 1604 1614 1605 - curr_prot++; 1606 1615 num_bde++; 1607 1616 1608 1617 /* setup BDE's for data blocks associated with DIF data */ ··· 1664 1653 1665 1654 } 1666 1655 1656 + if (protgroup_offset) { 1657 + /* update the reference tag */ 1658 + reftag += protgrp_blks; 1659 + bpl++; 1660 + continue; 1661 + } 1662 + 1667 1663 /* are we done ? */ 1668 1664 if (curr_prot == protcnt) { 1669 1665 alldone = 1; ··· 1693 1675 1694 1676 return num_bde; 1695 1677 } 1678 + 1696 1679 /* 1697 1680 * Given a SCSI command that supports DIF, determine composition of protection 1698 1681 * groups involved in setting up buffer lists
+356 -157
drivers/scsi/lpfc/lpfc_sli.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 3477 3477 int retval = 0; 3478 3478 3479 3479 /* Read the HBA Host Status Register */ 3480 - status = readl(phba->HSregaddr); 3480 + if (lpfc_readl(phba->HSregaddr, &status)) 3481 + return 1; 3481 3482 3482 3483 /* 3483 3484 * Check status register every 100ms for 5 retries, then every ··· 3503 3502 lpfc_sli_brdrestart(phba); 3504 3503 } 3505 3504 /* Read the HBA Host Status Register */ 3506 - status = readl(phba->HSregaddr); 3505 + if (lpfc_readl(phba->HSregaddr, &status)) { 3506 + retval = 1; 3507 + break; 3508 + } 3507 3509 } 3508 3510 3509 3511 /* Check to see if any errors occurred during init */ ··· 3588 3584 uint32_t __iomem *resp_buf; 3589 3585 uint32_t __iomem *mbox_buf; 3590 3586 volatile uint32_t mbox; 3591 - uint32_t hc_copy; 3587 + uint32_t hc_copy, ha_copy, resp_data; 3592 3588 int i; 3593 3589 uint8_t hdrtype; 3594 3590 ··· 3605 3601 resp_buf = phba->MBslimaddr; 3606 3602 3607 3603 /* Disable the error attention */ 3608 - hc_copy = readl(phba->HCregaddr); 3604 + if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3605 + return; 3609 3606 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3610 3607 readl(phba->HCregaddr); /* flush */ 3611 3608 phba->link_flag |= LS_IGNORE_ERATT; 3612 3609 3613 - if (readl(phba->HAregaddr) & HA_ERATT) { 3610 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3611 + return; 3612 + if (ha_copy & HA_ERATT) { 3614 3613 /* Clear Chip error bit */ 3615 3614 writel(HA_ERATT, phba->HAregaddr); 3616 3615 phba->pport->stopped = 1; ··· 3627 3620 mbox_buf = phba->MBslimaddr; 3628 3621 writel(mbox, mbox_buf); 3629 3622 3630 - for (i = 0; 3631 - readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 3632 - mdelay(1); 3633 - 3634 - if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3623 + for (i = 0; i < 50; i++) { 3624 + if (lpfc_readl((resp_buf + 1), &resp_data)) 3625 + return; 3626 + if (resp_data != ~(BARRIER_TEST_PATTERN)) 3627 + mdelay(1); 3628 + else 3629 + break; 3630 + } 3631 + resp_data = 0; 3632 + if (lpfc_readl((resp_buf + 1), &resp_data)) 3633 + return; 3634 + if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3635 3635 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3636 3636 phba->pport->stopped) 3637 3637 goto restore_hc; ··· 3647 3633 } 3648 3634 3649 3635 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3650 - for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 3651 - mdelay(1); 3636 + resp_data = 0; 3637 + for (i = 0; i < 500; i++) { 3638 + if (lpfc_readl(resp_buf, &resp_data)) 3639 + return; 3640 + if (resp_data != mbox) 3641 + mdelay(1); 3642 + else 3643 + break; 3644 + } 3652 3645 3653 3646 clear_errat: 3654 3647 3655 - while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 3656 - mdelay(1); 3648 + while (++i < 500) { 3649 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3650 + return; 3651 + if (!(ha_copy & HA_ERATT)) 3652 + mdelay(1); 3653 + else 3654 + break; 3655 + } 3657 3656 3658 3657 if (readl(phba->HAregaddr) & HA_ERATT) { 3659 3658 writel(HA_ERATT, phba->HAregaddr); ··· 3713 3686 3714 3687 /* Disable the error attention */ 3715 3688 spin_lock_irq(&phba->hbalock); 3716 - status = readl(phba->HCregaddr); 3689 + if (lpfc_readl(phba->HCregaddr, &status)) { 3690 + spin_unlock_irq(&phba->hbalock); 3691 + mempool_free(pmb, phba->mbox_mem_pool); 3692 + return 1; 3693 + } 3717 3694 status &= ~HC_ERINT_ENA; 3718 3695 writel(status, phba->HCregaddr); 3719 3696 readl(phba->HCregaddr); /* flush */ ··· 3751 3720 * 3 seconds we still set HBA_ERROR state because the status of the 3752 3721 * board is now undefined. 3753 3722 */ 3754 - ha_copy = readl(phba->HAregaddr); 3755 - 3723 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3724 + return 1; 3756 3725 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3757 3726 mdelay(100); 3758 - ha_copy = readl(phba->HAregaddr); 3727 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3728 + return 1; 3759 3729 } 3760 3730 3761 3731 del_timer_sync(&psli->mbox_tmo); ··· 4050 4018 uint32_t status, i = 0; 4051 4019 4052 4020 /* Read the HBA Host Status Register */ 4053 - status = readl(phba->HSregaddr); 4021 + if (lpfc_readl(phba->HSregaddr, &status)) 4022 + return -EIO; 4054 4023 4055 4024 /* Check status register to see what current state is */ 4056 4025 i = 0; ··· 4106 4073 lpfc_sli_brdrestart(phba); 4107 4074 } 4108 4075 /* Read the HBA Host Status Register */ 4109 - status = readl(phba->HSregaddr); 4076 + if (lpfc_readl(phba->HSregaddr, &status)) 4077 + return -EIO; 4110 4078 } 4111 4079 4112 4080 /* Check to see if any errors occurred during init */ ··· 5170 5136 MAILBOX_t *mb; 5171 5137 struct lpfc_sli *psli = &phba->sli; 5172 5138 uint32_t status, evtctr; 5173 - uint32_t ha_copy; 5139 + uint32_t ha_copy, hc_copy; 5174 5140 int i; 5175 5141 unsigned long timeout; 5176 5142 unsigned long drvr_flag = 0; ··· 5236 5202 goto out_not_finished; 5237 5203 } 5238 5204 5239 - if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 5240 - !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 5241 - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5242 - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5205 + if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 5206 + if (lpfc_readl(phba->HCregaddr, &hc_copy) || 5207 + !(hc_copy & HC_MBINT_ENA)) { 5208 + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5209 + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5243 5210 "(%d):2528 Mailbox command x%x cannot " 5244 5211 "issue Data: x%x x%x\n", 5245 5212 pmbox->vport ? pmbox->vport->vpi : 0, 5246 5213 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 5247 - goto out_not_finished; 5214 + goto out_not_finished; 5215 + } 5248 5216 } 5249 5217 5250 5218 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { ··· 5444 5408 word0 = le32_to_cpu(word0); 5445 5409 } else { 5446 5410 /* First read mbox status word */ 5447 - word0 = readl(phba->MBslimaddr); 5411 + if (lpfc_readl(phba->MBslimaddr, &word0)) { 5412 + spin_unlock_irqrestore(&phba->hbalock, 5413 + drvr_flag); 5414 + goto out_not_finished; 5415 + } 5448 5416 } 5449 5417 5450 5418 /* Read the HBA Host Attention Register */ 5451 - ha_copy = readl(phba->HAregaddr); 5419 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 5420 + spin_unlock_irqrestore(&phba->hbalock, 5421 + drvr_flag); 5422 + goto out_not_finished; 5423 + } 5452 5424 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 5453 5425 mb->mbxCommand) * 5454 5426 1000) + jiffies; ··· 5507 5463 word0 = readl(phba->MBslimaddr); 5508 5464 } 5509 5465 /* Read the HBA Host Attention Register */ 5510 - ha_copy = readl(phba->HAregaddr); 5466 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 5467 + spin_unlock_irqrestore(&phba->hbalock, 5468 + drvr_flag); 5469 + goto out_not_finished; 5470 + } 5511 5471 } 5512 5472 5513 5473 if (psli->sli_flag & LPFC_SLI_ACTIVE) { ··· 6311 6263 bf_set(lpfc_sli4_sge_last, sgl, 1); 6312 6264 else 6313 6265 bf_set(lpfc_sli4_sge_last, sgl, 0); 6314 - sgl->word2 = cpu_to_le32(sgl->word2); 6315 6266 /* swap the size field back to the cpu so we 6316 6267 * can assign it to the sgl. 6317 6268 */ ··· 6330 6283 bf_set(lpfc_sli4_sge_offset, sgl, offset); 6331 6284 offset += bde.tus.f.bdeSize; 6332 6285 } 6286 + sgl->word2 = cpu_to_le32(sgl->word2); 6333 6287 bpl++; 6334 6288 sgl++; 6335 6289 } ··· 6576 6528 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 6577 6529 sizeof(struct ulp_bde64); 6578 6530 for (i = 0; i < numBdes; i++) { 6579 - if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) 6580 - break; 6581 6531 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 6532 + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 6533 + break; 6582 6534 xmit_len += bde.tus.f.bdeSize; 6583 6535 } 6584 6536 /* word3 iocb=IO_TAG wqe=request_payload_len */ ··· 6668 6620 xritag = 0; 6669 6621 break; 6670 6622 case CMD_XMIT_BLS_RSP64_CX: 6671 - /* As BLS ABTS-ACC WQE is very different from other WQEs, 6623 + /* As BLS ABTS RSP WQE is very different from other WQEs, 6672 6624 * we re-construct this WQE here based on information in 6673 6625 * iocbq from scratch. 6674 6626 */ 6675 6627 memset(wqe, 0, sizeof(union lpfc_wqe)); 6676 6628 /* OX_ID is invariable to who sent ABTS to CT exchange */ 6677 6629 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 6678 - bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); 6679 - if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == 6630 + bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 6631 + if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 6680 6632 LPFC_ABTS_UNSOL_INT) { 6681 6633 /* ABTS sent by initiator to CT exchange, the 6682 6634 * RX_ID field will be filled with the newly ··· 6690 6642 * RX_ID from ABTS. 6691 6643 */ 6692 6644 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6693 - bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); 6645 + bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 6694 6646 } 6695 6647 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 6696 6648 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); ··· 6701 6653 LPFC_WQE_LENLOC_NONE); 6702 6654 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6703 6655 command_type = OTHER_COMMAND; 6656 + if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 6657 + bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 6658 + bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 6659 + bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 6660 + bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 6661 + bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 6662 + bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 6663 + } 6664 + 6704 6665 break; 6705 6666 case CMD_XRI_ABORTED_CX: 6706 6667 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ ··· 6758 6701 6759 6702 if (piocb->sli4_xritag == NO_XRI) { 6760 6703 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6761 - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6704 + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 6705 + piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) 6762 6706 sglq = NULL; 6763 6707 else { 6764 6708 if (pring->txq_cnt) { ··· 8252 8194 piocb->iocb_flag &= ~LPFC_IO_WAKE; 8253 8195 8254 8196 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 8255 - creg_val = readl(phba->HCregaddr); 8197 + if (lpfc_readl(phba->HCregaddr, &creg_val)) 8198 + return IOCB_ERROR; 8256 8199 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 8257 8200 writel(creg_val, phba->HCregaddr); 8258 8201 readl(phba->HCregaddr); /* flush */ ··· 8295 8236 } 8296 8237 8297 8238 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 8298 - creg_val = readl(phba->HCregaddr); 8239 + if (lpfc_readl(phba->HCregaddr, &creg_val)) 8240 + return IOCB_ERROR; 8299 8241 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 8300 8242 writel(creg_val, phba->HCregaddr); 8301 8243 readl(phba->HCregaddr); /* flush */ ··· 8447 8387 uint32_t ha_copy; 8448 8388 8449 8389 /* Read chip Host Attention (HA) register */ 8450 - ha_copy = readl(phba->HAregaddr); 8390 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 8391 + goto unplug_err; 8392 + 8451 8393 if (ha_copy & HA_ERATT) { 8452 8394 /* Read host status register to retrieve error event */ 8453 - lpfc_sli_read_hs(phba); 8395 + if (lpfc_sli_read_hs(phba)) 8396 + goto unplug_err; 8454 8397 8455 8398 /* Check if there is a deferred error condition is active */ 8456 8399 if ((HS_FFER1 & phba->work_hs) && ··· 8472 8409 return 1; 8473 8410 } 8474 8411 return 0; 8412 + 8413 + unplug_err: 8414 + /* Set the driver HS work bitmap */ 8415 + phba->work_hs |= UNPLUG_ERR; 8416 + /* Set the driver HA work bitmap */ 8417 + phba->work_ha |= HA_ERATT; 8418 + /* Indicate polling handles this ERATT */ 8419 + phba->hba_flag |= HBA_ERATT_HANDLED; 8420 + return 1; 8475 8421 } 8476 8422 8477 8423 /** ··· 8508 8436 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8509 8437 switch (if_type) { 8510 8438 case LPFC_SLI_INTF_IF_TYPE_0: 8511 - uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 8512 - uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 8439 + if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 8440 + &uerr_sta_lo) || 8441 + lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 8442 + &uerr_sta_hi)) { 8443 + phba->work_hs |= UNPLUG_ERR; 8444 + phba->work_ha |= HA_ERATT; 8445 + phba->hba_flag |= HBA_ERATT_HANDLED; 8446 + return 1; 8447 + } 8513 8448 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 8514 8449 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 8515 8450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 8535 8456 } 8536 8457 break; 8537 8458 case LPFC_SLI_INTF_IF_TYPE_2: 8538 - portstat_reg.word0 = 8539 - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 8540 - portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); 8459 + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 8460 + &portstat_reg.word0) || 8461 + lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 8462 + &portsmphr)){ 8463 + phba->work_hs |= UNPLUG_ERR; 8464 + phba->work_ha |= HA_ERATT; 8465 + phba->hba_flag |= HBA_ERATT_HANDLED; 8466 + return 1; 8467 + } 8541 8468 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 8542 8469 phba->work_status[0] = 8543 8470 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); ··· 8724 8639 return IRQ_NONE; 8725 8640 /* Need to read HA REG for slow-path events */ 8726 8641 spin_lock_irqsave(&phba->hbalock, iflag); 8727 - ha_copy = readl(phba->HAregaddr); 8642 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 8643 + goto unplug_error; 8728 8644 /* If somebody is waiting to handle an eratt don't process it 8729 8645 * here. The brdkill function will do this. 8730 8646 */ ··· 8751 8665 } 8752 8666 8753 8667 /* Clear up only attention source related to slow-path */ 8754 - hc_copy = readl(phba->HCregaddr); 8668 + if (lpfc_readl(phba->HCregaddr, &hc_copy)) 8669 + goto unplug_error; 8670 + 8755 8671 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 8756 8672 HC_LAINT_ENA | HC_ERINT_ENA), 8757 8673 phba->HCregaddr); ··· 8776 8688 */ 8777 8689 spin_lock_irqsave(&phba->hbalock, iflag); 8778 8690 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 8779 - control = readl(phba->HCregaddr); 8691 + if (lpfc_readl(phba->HCregaddr, &control)) 8692 + goto unplug_error; 8780 8693 control &= ~HC_LAINT_ENA; 8781 8694 writel(control, phba->HCregaddr); 8782 8695 readl(phba->HCregaddr); /* flush */ ··· 8797 8708 status >>= (4*LPFC_ELS_RING); 8798 8709 if (status & HA_RXMASK) { 8799 8710 spin_lock_irqsave(&phba->hbalock, iflag); 8800 - control = readl(phba->HCregaddr); 8711 + if (lpfc_readl(phba->HCregaddr, &control)) 8712 + goto unplug_error; 8801 8713 8802 8714 lpfc_debugfs_slow_ring_trc(phba, 8803 8715 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", ··· 8831 8741 } 8832 8742 spin_lock_irqsave(&phba->hbalock, iflag); 8833 8743 if (work_ha_copy & HA_ERATT) { 8834 - lpfc_sli_read_hs(phba); 8744 + if (lpfc_sli_read_hs(phba)) 8745 + goto unplug_error; 8835 8746 /* 8836 8747 * Check if there is a deferred error condition 8837 8748 * is active ··· 8963 8872 lpfc_worker_wake_up(phba); 8964 8873 } 8965 8874 return IRQ_HANDLED; 8875 + unplug_error: 8876 + spin_unlock_irqrestore(&phba->hbalock, iflag); 8877 + return IRQ_HANDLED; 8966 8878 8967 8879 } /* lpfc_sli_sp_intr_handler */ 8968 8880 ··· 9013 8919 if (lpfc_intr_state_check(phba)) 9014 8920 return IRQ_NONE; 9015 8921 /* Need to read HA REG for FCP ring and other ring events */ 9016 - ha_copy = readl(phba->HAregaddr); 8922 + if (lpfc_readl(phba->HAregaddr, &ha_copy)) 8923 + return IRQ_HANDLED; 9017 8924 /* Clear up only attention source related to fast-path */ 9018 8925 spin_lock_irqsave(&phba->hbalock, iflag); 9019 8926 /* ··· 9099 9004 return IRQ_NONE; 9100 9005 9101 9006 spin_lock(&phba->hbalock); 9102 - phba->ha_copy = readl(phba->HAregaddr); 9007 + if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 9008 + spin_unlock(&phba->hbalock); 9009 + return IRQ_HANDLED; 9010 + } 9011 + 9103 9012 if (unlikely(!phba->ha_copy)) { 9104 9013 spin_unlock(&phba->hbalock); 9105 9014 return IRQ_NONE; ··· 9125 9026 } 9126 9027 9127 9028 /* Clear attention sources except link and error attentions */ 9128 - hc_copy = readl(phba->HCregaddr); 9029 + if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 9030 + spin_unlock(&phba->hbalock); 9031 + return IRQ_HANDLED; 9032 + } 9129 9033 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 9130 9034 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 9131 9035 phba->HCregaddr); ··· 10505 10403 if (!phba->sli4_hba.pc_sli4_params.supported) 10506 10404 hw_page_size = SLI4_PAGE_SIZE; 10507 10405 10508 - 10509 10406 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10510 10407 if (!mbox) 10511 10408 return -ENOMEM; ··· 10514 10413 LPFC_MBOX_OPCODE_CQ_CREATE, 10515 10414 length, LPFC_SLI4_MBX_EMBED); 10516 10415 cq_create = &mbox->u.mqe.un.cq_create; 10416 + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 10517 10417 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 10518 10418 cq->page_count); 10519 10419 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 10520 10420 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 10521 - bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); 10421 + bf_set(lpfc_mbox_hdr_version, &shdr->request, 10422 + phba->sli4_hba.pc_sli4_params.cqv); 10423 + if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 10424 + bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 10425 + (PAGE_SIZE/SLI4_PAGE_SIZE)); 10426 + bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 10427 + eq->queue_id); 10428 + } else { 10429 + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 10430 + eq->queue_id); 10431 + } 10522 10432 switch (cq->entry_count) { 10523 10433 default: 10524 10434 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, ··· 10561 10449 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10562 10450 10563 10451 /* The IOCTL status is embedded in the mailbox subheader. */ 10564 - shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 10565 10452 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10566 10453 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10567 10454 if (shdr_status || shdr_add_status || rc) { ··· 10626 10515 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 10627 10516 switch (mq->entry_count) { 10628 10517 case 16: 10629 - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10630 - LPFC_MQ_CNT_16); 10518 + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 10519 + LPFC_MQ_RING_SIZE_16); 10631 10520 break; 10632 10521 case 32: 10633 - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10634 - LPFC_MQ_CNT_32); 10522 + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 10523 + LPFC_MQ_RING_SIZE_32); 10635 10524 break; 10636 10525 case 64: 10637 - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10638 - LPFC_MQ_CNT_64); 10526 + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 10527 + LPFC_MQ_RING_SIZE_64); 10639 10528 break; 10640 10529 case 128: 10641 - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10642 - LPFC_MQ_CNT_128); 10530 + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 10531 + LPFC_MQ_RING_SIZE_128); 10643 10532 break; 10644 10533 } 10645 10534 list_for_each_entry(dmabuf, &mq->page_list, list) { ··· 10697 10586 length, LPFC_SLI4_MBX_EMBED); 10698 10587 10699 10588 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 10589 + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 10700 10590 bf_set(lpfc_mbx_mq_create_ext_num_pages, 10701 10591 &mq_create_ext->u.request, mq->page_count); 10702 10592 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, ··· 10710 10598 &mq_create_ext->u.request, 1); 10711 10599 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 10712 10600 &mq_create_ext->u.request, 1); 10713 - bf_set(lpfc_mq_context_cq_id, 10714 - &mq_create_ext->u.request.context, cq->queue_id); 10715 10601 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 10602 + bf_set(lpfc_mbox_hdr_version, &shdr->request, 10603 + phba->sli4_hba.pc_sli4_params.mqv); 10604 + if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 10605 + bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 10606 + cq->queue_id); 10607 + else 10608 + bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 10609 + cq->queue_id); 10716 10610 switch (mq->entry_count) { 10717 10611 default: 10718 10612 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, ··· 10728 10610 return -EINVAL; 10729 10611 /* otherwise default to smallest count (drop through) */ 10730 10612 case 16: 10731 - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10732 - LPFC_MQ_CNT_16); 10613 + bf_set(lpfc_mq_context_ring_size, 10614 + &mq_create_ext->u.request.context, 10615 + LPFC_MQ_RING_SIZE_16); 10733 10616 break; 10734 10617 case 32: 10735 - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10736 - LPFC_MQ_CNT_32); 10618 + bf_set(lpfc_mq_context_ring_size, 10619 + &mq_create_ext->u.request.context, 10620 + LPFC_MQ_RING_SIZE_32); 10737 10621 break; 10738 10622 case 64: 10739 - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10740 - LPFC_MQ_CNT_64); 10623 + bf_set(lpfc_mq_context_ring_size, 10624 + &mq_create_ext->u.request.context, 10625 + LPFC_MQ_RING_SIZE_64); 10741 10626 break; 10742 10627 case 128: 10743 - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10744 - LPFC_MQ_CNT_128); 10628 + bf_set(lpfc_mq_context_ring_size, 10629 + &mq_create_ext->u.request.context, 10630 + LPFC_MQ_RING_SIZE_128); 10745 10631 break; 10746 10632 } 10747 10633 list_for_each_entry(dmabuf, &mq->page_list, list) { ··· 10756 10634 putPaddrHigh(dmabuf->phys); 10757 10635 } 10758 10636 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10759 - shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 10760 10637 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10761 10638 &mq_create_ext->u.response); 10762 10639 if (rc != MBX_SUCCESS) { ··· 10832 10711 uint32_t shdr_status, shdr_add_status; 10833 10712 union lpfc_sli4_cfg_shdr *shdr; 10834 10713 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10714 + struct dma_address *page; 10835 10715 10836 10716 if (!phba->sli4_hba.pc_sli4_params.supported) 10837 10717 hw_page_size = SLI4_PAGE_SIZE; ··· 10846 10724 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 10847 10725 length, LPFC_SLI4_MBX_EMBED); 10848 10726 wq_create = &mbox->u.mqe.un.wq_create; 10727 + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 10849 10728 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 10850 10729 wq->page_count); 10851 10730 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10852 10731 cq->queue_id); 10732 + bf_set(lpfc_mbox_hdr_version, &shdr->request, 10733 + phba->sli4_hba.pc_sli4_params.wqv); 10734 + if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 10735 + bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 10736 + wq->entry_count); 10737 + switch (wq->entry_size) { 10738 + default: 10739 + case 64: 10740 + bf_set(lpfc_mbx_wq_create_wqe_size, 10741 + &wq_create->u.request_1, 10742 + LPFC_WQ_WQE_SIZE_64); 10743 + break; 10744 + case 128: 10745 + bf_set(lpfc_mbx_wq_create_wqe_size, 10746 + &wq_create->u.request_1, 10747 + LPFC_WQ_WQE_SIZE_128); 10748 + break; 10749 + } 10750 + bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 10751 + (PAGE_SIZE/SLI4_PAGE_SIZE)); 10752 + page = wq_create->u.request_1.page; 10753 + } else { 10754 + page = wq_create->u.request.page; 10755 + } 10853 10756 list_for_each_entry(dmabuf, &wq->page_list, list) { 10854 10757 memset(dmabuf->virt, 0, hw_page_size); 10855 - wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10856 - putPaddrLow(dmabuf->phys); 10857 - wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10858 - putPaddrHigh(dmabuf->phys); 10758 + page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 10759 + page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 10859 10760 } 10860 10761 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10861 10762 /* The IOCTL status is embedded in the mailbox subheader. */ 10862 - shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 10863 10763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10864 10764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10865 10765 if (shdr_status || shdr_add_status || rc) { ··· 10959 10815 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10960 10816 length, LPFC_SLI4_MBX_EMBED); 10961 10817 rq_create = &mbox->u.mqe.un.rq_create; 10962 - switch (hrq->entry_count) { 10963 - default: 10964 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10965 - "2535 Unsupported RQ count. (%d)\n", 10966 - hrq->entry_count); 10967 - if (hrq->entry_count < 512) 10968 - return -EINVAL; 10969 - /* otherwise default to smallest count (drop through) */ 10970 - case 512: 10971 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10972 - LPFC_RQ_RING_SIZE_512); 10973 - break; 10974 - case 1024: 10975 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10976 - LPFC_RQ_RING_SIZE_1024); 10977 - break; 10978 - case 2048: 10979 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10980 - LPFC_RQ_RING_SIZE_2048); 10981 - break; 10982 - case 4096: 10983 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10984 - LPFC_RQ_RING_SIZE_4096); 10985 - break; 10818 + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10819 + bf_set(lpfc_mbox_hdr_version, &shdr->request, 10820 + phba->sli4_hba.pc_sli4_params.rqv); 10821 + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 10822 + bf_set(lpfc_rq_context_rqe_count_1, 10823 + &rq_create->u.request.context, 10824 + hrq->entry_count); 10825 + rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 10826 + } else { 10827 + switch (hrq->entry_count) { 10828 + default: 10829 + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10830 + "2535 Unsupported RQ count. (%d)\n", 10831 + hrq->entry_count); 10832 + if (hrq->entry_count < 512) 10833 + return -EINVAL; 10834 + /* otherwise default to smallest count (drop through) */ 10835 + case 512: 10836 + bf_set(lpfc_rq_context_rqe_count, 10837 + &rq_create->u.request.context, 10838 + LPFC_RQ_RING_SIZE_512); 10839 + break; 10840 + case 1024: 10841 + bf_set(lpfc_rq_context_rqe_count, 10842 + &rq_create->u.request.context, 10843 + LPFC_RQ_RING_SIZE_1024); 10844 + break; 10845 + case 2048: 10846 + bf_set(lpfc_rq_context_rqe_count, 10847 + &rq_create->u.request.context, 10848 + LPFC_RQ_RING_SIZE_2048); 10849 + break; 10850 + case 4096: 10851 + bf_set(lpfc_rq_context_rqe_count, 10852 + &rq_create->u.request.context, 10853 + LPFC_RQ_RING_SIZE_4096); 10854 + break; 10855 + } 10856 + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10857 + LPFC_HDR_BUF_SIZE); 10986 10858 } 10987 10859 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10988 10860 cq->queue_id); 10989 10861 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10990 10862 hrq->page_count); 10991 - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10992 - LPFC_HDR_BUF_SIZE); 10993 10863 list_for_each_entry(dmabuf, &hrq->page_list, list) { 10994 10864 memset(dmabuf->virt, 0, hw_page_size); 10995 10865 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = ··· 11013 10855 } 11014 10856 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11015 10857 /* The IOCTL status is embedded in the mailbox subheader. */ 11016 - shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 11017 10858 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11018 10859 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11019 10860 if (shdr_status || shdr_add_status || rc) { ··· 11038 10881 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11039 10882 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 11040 10883 length, LPFC_SLI4_MBX_EMBED); 11041 - switch (drq->entry_count) { 11042 - default: 11043 - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11044 - "2536 Unsupported RQ count. (%d)\n", 11045 - drq->entry_count); 11046 - if (drq->entry_count < 512) 11047 - return -EINVAL; 11048 - /* otherwise default to smallest count (drop through) */ 11049 - case 512: 11050 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11051 - LPFC_RQ_RING_SIZE_512); 11052 - break; 11053 - case 1024: 11054 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11055 - LPFC_RQ_RING_SIZE_1024); 11056 - break; 11057 - case 2048: 11058 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11059 - LPFC_RQ_RING_SIZE_2048); 11060 - break; 11061 - case 4096: 11062 - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11063 - LPFC_RQ_RING_SIZE_4096); 11064 - break; 10884 + bf_set(lpfc_mbox_hdr_version, &shdr->request, 10885 + phba->sli4_hba.pc_sli4_params.rqv); 10886 + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 10887 + bf_set(lpfc_rq_context_rqe_count_1, 10888 + &rq_create->u.request.context, 10889 + hrq->entry_count); 10890 + rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 10891 + } else { 10892 + switch (drq->entry_count) { 10893 + default: 10894 + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10895 + "2536 Unsupported RQ count. (%d)\n", 10896 + drq->entry_count); 10897 + if (drq->entry_count < 512) 10898 + return -EINVAL; 10899 + /* otherwise default to smallest count (drop through) */ 10900 + case 512: 10901 + bf_set(lpfc_rq_context_rqe_count, 10902 + &rq_create->u.request.context, 10903 + LPFC_RQ_RING_SIZE_512); 10904 + break; 10905 + case 1024: 10906 + bf_set(lpfc_rq_context_rqe_count, 10907 + &rq_create->u.request.context, 10908 + LPFC_RQ_RING_SIZE_1024); 10909 + break; 10910 + case 2048: 10911 + bf_set(lpfc_rq_context_rqe_count, 10912 + &rq_create->u.request.context, 10913 + LPFC_RQ_RING_SIZE_2048); 10914 + break; 10915 + case 4096: 10916 + bf_set(lpfc_rq_context_rqe_count, 10917 + &rq_create->u.request.context, 10918 + LPFC_RQ_RING_SIZE_4096); 10919 + break; 10920 + } 10921 + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10922 + LPFC_DATA_BUF_SIZE); 11065 10923 } 11066 10924 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 11067 10925 cq->queue_id); 11068 10926 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 11069 10927 drq->page_count); 11070 - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 11071 - LPFC_DATA_BUF_SIZE); 11072 10928 list_for_each_entry(dmabuf, &drq->page_list, list) { 11073 10929 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11074 10930 putPaddrLow(dmabuf->phys); ··· 11750 11580 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 11751 11581 char *type_names[] = FC_TYPE_NAMES_INIT; 11752 11582 struct fc_vft_header *fc_vft_hdr; 11583 + uint32_t *header = (uint32_t *) fc_hdr; 11753 11584 11754 11585 switch (fc_hdr->fh_r_ctl) { 11755 11586 case FC_RCTL_DD_UNCAT: /* uncategorized information */ ··· 11799 11628 default: 11800 11629 goto drop; 11801 11630 } 11631 + 11802 11632 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11803 - "2538 Received frame rctl:%s type:%s\n", 11633 + "2538 Received frame rctl:%s type:%s " 11634 + "Frame Data:%08x %08x %08x %08x %08x %08x\n", 11804 11635 rctl_names[fc_hdr->fh_r_ctl], 11805 - type_names[fc_hdr->fh_type]); 11636 + type_names[fc_hdr->fh_type], 11637 + be32_to_cpu(header[0]), be32_to_cpu(header[1]), 11638 + be32_to_cpu(header[2]), be32_to_cpu(header[3]), 11639 + be32_to_cpu(header[4]), be32_to_cpu(header[5])); 11806 11640 return 0; 11807 11641 drop: 11808 11642 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, ··· 12104 11928 } 12105 11929 12106 11930 /** 12107 - * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler 11931 + * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 12108 11932 * @phba: Pointer to HBA context object. 12109 11933 * @cmd_iocbq: pointer to the command iocbq structure. 12110 11934 * @rsp_iocbq: pointer to the response iocbq structure. 12111 11935 * 12112 - * This function handles the sequence abort accept iocb command complete 11936 + * This function handles the sequence abort response iocb command complete 12113 11937 * event. It properly releases the memory allocated to the sequence abort 12114 11938 * accept iocb. 12115 11939 **/ 12116 11940 static void 12117 - lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, 11941 + lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 12118 11942 struct lpfc_iocbq *cmd_iocbq, 12119 11943 struct lpfc_iocbq *rsp_iocbq) 12120 11944 { ··· 12123 11947 } 12124 11948 12125 11949 /** 12126 - * lpfc_sli4_seq_abort_acc - Accept sequence abort 11950 + * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 12127 11951 * @phba: Pointer to HBA context object. 12128 11952 * @fc_hdr: pointer to a FC frame header. 12129 11953 * 12130 - * This function sends a basic accept to a previous unsol sequence abort 11954 + * This function sends a basic response to a previous unsol sequence abort 12131 11955 * event after aborting the sequence handling. 12132 11956 **/ 12133 11957 static void 12134 - lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, 11958 + lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 12135 11959 struct fc_frame_header *fc_hdr) 12136 11960 { 12137 11961 struct lpfc_iocbq *ctiocb = NULL; ··· 12139 11963 uint16_t oxid, rxid; 12140 11964 uint32_t sid, fctl; 12141 11965 IOCB_t *icmd; 11966 + int rc; 12142 11967 12143 11968 if (!lpfc_is_link_up(phba)) 12144 11969 return; ··· 12160 11983 + phba->sli4_hba.max_cfg_param.xri_base)) 12161 11984 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 12162 11985 12163 - /* Allocate buffer for acc iocb */ 11986 + /* Allocate buffer for rsp iocb */ 12164 11987 ctiocb = lpfc_sli_get_iocbq(phba); 12165 11988 if (!ctiocb) 12166 11989 return; ··· 12185 12008 12186 12009 ctiocb->iocb_cmpl = NULL; 12187 12010 ctiocb->vport = phba->pport; 12188 - ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; 12011 + ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 12012 + ctiocb->sli4_xritag = NO_XRI; 12013 + 12014 + /* If the oxid maps to the FCP XRI range or if it is out of range, 12015 + * send a BLS_RJT. The driver no longer has that exchange. 12016 + * Override the IOCB for a BA_RJT. 12017 + */ 12018 + if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + 12019 + phba->sli4_hba.max_cfg_param.xri_base) || 12020 + oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + 12021 + phba->sli4_hba.max_cfg_param.xri_base)) { 12022 + icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 12023 + bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 12024 + bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 12025 + bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 12026 + } 12189 12027 12190 12028 if (fctl & FC_FC_EX_CTX) { 12191 12029 /* ABTS sent by responder to CT exchange, construction 12192 12030 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 12193 12031 * field and RX_ID from ABTS for RX_ID field. 12194 12032 */ 12195 - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); 12196 - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); 12197 - ctiocb->sli4_xritag = oxid; 12033 + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 12034 + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 12198 12035 } else { 12199 12036 /* ABTS sent by initiator to CT exchange, construction 12200 12037 * of BA_ACC will need to allocate a new XRI as for the 12201 12038 * XRI_TAG and RX_ID fields. 12202 12039 */ 12203 - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); 12204 - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); 12205 - ctiocb->sli4_xritag = NO_XRI; 12040 + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 12041 + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); 12206 12042 } 12207 - bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); 12043 + bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 12208 12044 12209 - /* Xmit CT abts accept on exchange <xid> */ 12045 + /* Xmit CT abts response on exchange <xid> */ 12210 12046 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 12211 - "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", 12212 - CMD_XMIT_BLS_RSP64_CX, phba->link_state); 12213 - lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 12047 + "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 12048 + icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 12049 + 12050 + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 12051 + if (rc == IOCB_ERROR) { 12052 + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 12053 + "2925 Failed to issue CT ABTS RSP x%x on " 12054 + "xri x%x, Data x%x\n", 12055 + icmd->un.xseq64.w5.hcsw.Rctl, oxid, 12056 + phba->link_state); 12057 + lpfc_sli_release_iocbq(phba, ctiocb); 12058 + } 12214 12059 } 12215 12060 12216 12061 /** ··· 12280 12081 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12281 12082 } 12282 12083 /* Send basic accept (BA_ACC) to the abort requester */ 12283 - lpfc_sli4_seq_abort_acc(phba, &fc_hdr); 12084 + lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 12284 12085 } 12285 12086 12286 12087 /**
+1 -1
drivers/scsi/lpfc/lpfc_sli4.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2009 Emulex. All rights reserved. * 4 + * Copyright (C) 2009-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * *
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.3.21" 21 + #define LPFC_DRIVER_VERSION "8.3.22" 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24 24 #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
+49
drivers/scsi/mpt2sas/mpt2sas_base.c
··· 1748 1748 } 1749 1749 1750 1750 /** 1751 + * _base_display_hp_branding - Display branding string 1752 + * @ioc: per adapter object 1753 + * 1754 + * Return nothing. 1755 + */ 1756 + static void 1757 + _base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc) 1758 + { 1759 + if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID) 1760 + return; 1761 + 1762 + switch (ioc->pdev->device) { 1763 + case MPI2_MFGPAGE_DEVID_SAS2004: 1764 + switch (ioc->pdev->subsystem_device) { 1765 + case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: 1766 + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1767 + MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); 1768 + break; 1769 + default: 1770 + break; 1771 + } 1772 + case MPI2_MFGPAGE_DEVID_SAS2308_2: 1773 + switch (ioc->pdev->subsystem_device) { 1774 + case MPT2SAS_HP_2_4_INTERNAL_SSDID: 1775 + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1776 + MPT2SAS_HP_2_4_INTERNAL_BRANDING); 1777 + break; 1778 + case MPT2SAS_HP_2_4_EXTERNAL_SSDID: 1779 + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1780 + MPT2SAS_HP_2_4_EXTERNAL_BRANDING); 1781 + break; 1782 + case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: 1783 + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1784 + MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); 1785 + break; 1786 + case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: 1787 + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1788 + MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); 1789 + break; 1790 + default: 1791 + break; 1792 + } 1793 + default: 1794 + break; 1795 + } 1796 + } 1797 + 1798 + /** 1751 1799 * _base_display_ioc_capabilities - Disply IOC's capabilities. 1752 1800 * @ioc: per adapter object 1753 1801 * ··· 1826 1778 1827 1779 _base_display_dell_branding(ioc); 1828 1780 _base_display_intel_branding(ioc); 1781 + _base_display_hp_branding(ioc); 1829 1782 1830 1783 printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); 1831 1784
+20
drivers/scsi/mpt2sas/mpt2sas_base.h
··· 168 168 #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E 169 169 #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F 170 170 171 + 172 + /* 173 + * HP HBA branding 174 + */ 175 + #define MPT2SAS_HP_3PAR_SSVID 0x1590 176 + #define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter" 177 + #define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter" 178 + #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter" 179 + #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter" 180 + #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter" 181 + 182 + /* 183 + * HO HBA SSDIDs 184 + */ 185 + #define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 186 + #define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 187 + #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 188 + #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 189 + #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 190 + 171 191 /* 172 192 * per target private data 173 193 */
+7
drivers/scsi/mvsas/mv_init.c
··· 663 663 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, 664 664 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, 665 665 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, 666 + { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, 667 + { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, 668 + { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, 669 + { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, 670 + { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, 671 + { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, 672 + { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, 666 673 667 674 { } /* terminate list */ 668 675 };
+4 -8
drivers/scsi/qla4xxx/ql4_def.h
··· 53 53 #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 54 54 #endif 55 55 56 + #define ISP4XXX_PCI_FN_1 0x1 57 + #define ISP4XXX_PCI_FN_2 0x3 58 + 56 59 #define QLA_SUCCESS 0 57 60 #define QLA_ERROR 1 58 61 ··· 236 233 237 234 unsigned long flags; /* DDB Flags */ 238 235 239 - unsigned long dev_scan_wait_to_start_relogin; 240 - unsigned long dev_scan_wait_to_complete_relogin; 241 - 242 236 uint16_t fw_ddb_index; /* DDB firmware index */ 243 237 uint16_t options; 244 238 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ ··· 289 289 * DDB flags. 290 290 */ 291 291 #define DF_RELOGIN 0 /* Relogin to device */ 292 - #define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL 293 - * logged it out */ 294 292 #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ 295 293 #define DF_FO_MASKED 3 296 294 ··· 374 376 #define AF_LINK_UP 8 /* 0x00000100 */ 375 377 #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 376 378 #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ 377 - #define AF_HBA_GOING_AWAY 12 /* 0x00001000 */ 379 + #define AF_HA_REMOVAL 12 /* 0x00001000 */ 378 380 #define AF_INTx_ENABLED 15 /* 0x00008000 */ 379 381 #define AF_MSI_ENABLED 16 /* 0x00010000 */ 380 382 #define AF_MSIX_ENABLED 17 /* 0x00020000 */ ··· 477 479 uint32_t timer_active; 478 480 479 481 /* Recovery Timers */ 480 - uint32_t discovery_wait; 481 482 atomic_t check_relogin_timeouts; 482 483 uint32_t retry_reset_ha_cnt; 483 484 uint32_t isp_reset_timer; /* reset test timer */ ··· 762 765 /* Defines for process_aen() */ 763 766 #define PROCESS_ALL_AENS 0 764 767 #define FLUSH_DDB_CHANGED_AENS 1 765 - #define RELOGIN_DDB_CHANGED_AENS 2 766 768 767 769 #endif /*_QLA4XXX_H */
+1
drivers/scsi/qla4xxx/ql4_fw.h
··· 455 455 uint8_t res0; /* 07 */ 456 456 uint16_t eth_mtu_size; /* 08-09 */ 457 457 uint16_t add_fw_options; /* 0A-0B */ 458 + #define SERIALIZE_TASK_MGMT 0x0400 458 459 459 460 uint8_t hb_interval; /* 0C */ 460 461 uint8_t inst_num; /* 0D */
-1
drivers/scsi/qla4xxx/ql4_glbl.h
··· 136 136 void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); 137 137 138 138 extern int ql4xextended_error_logging; 139 - extern int ql4xdiscoverywait; 140 139 extern int ql4xdontresethba; 141 140 extern int ql4xenablemsix; 142 141
+34 -173
drivers/scsi/qla4xxx/ql4_init.c
··· 723 723 return relogin; 724 724 } 725 725 726 + static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) 727 + { 728 + unsigned long wtime; 729 + 730 + /* Flush the 0x8014 AEN from the firmware as a result of 731 + * Auto connect. We are basically doing get_firmware_ddb() 732 + * to determine whether we need to log back in or not. 733 + * Trying to do a set ddb before we have processed 0x8014 734 + * will result in another set_ddb() for the same ddb. In other 735 + * words there will be stale entries in the aen_q. 736 + */ 737 + wtime = jiffies + (2 * HZ); 738 + do { 739 + if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) 740 + if (ha->firmware_state & (BIT_2 | BIT_0)) 741 + return; 742 + 743 + if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 744 + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 745 + 746 + msleep(1000); 747 + } while (!time_after_eq(jiffies, wtime)); 748 + } 749 + 726 750 /** 727 - * qla4xxx_configure_ddbs - builds driver ddb list 751 + * qla4xxx_build_ddb_list - builds driver ddb list 728 752 * @ha: Pointer to host adapter structure. 729 753 * 730 754 * This routine searches for all valid firmware ddb entries and builds 731 755 * an internal ddb list. Ddbs that are considered valid are those with 732 756 * a device state of SESSION_ACTIVE. 757 + * A relogin (set_ddb) is issued for DDBs that are not online. 733 758 **/ 734 759 static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) 735 760 { ··· 768 743 dma_addr_t fw_ddb_entry_dma; 769 744 uint32_t ipv6_device; 770 745 uint32_t new_tgt; 746 + 747 + qla4xxx_flush_AENS(ha); 771 748 772 749 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 773 750 &fw_ddb_entry_dma, GFP_KERNEL); ··· 874 847 return status; 875 848 } 876 849 877 - struct qla4_relog_scan { 878 - int halt_wait; 879 - uint32_t conn_err; 880 - uint32_t fw_ddb_index; 881 - uint32_t next_fw_ddb_index; 882 - uint32_t fw_ddb_device_state; 883 - }; 884 - 885 - static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) 886 - { 887 - struct ddb_entry *ddb_entry; 888 - 889 - if (qla4_is_relogin_allowed(ha, rs->conn_err)) { 890 - /* We either have a device that is in 891 - * the process of relogging in or a 892 - * device that is waiting to be 893 - * relogged in */ 894 - rs->halt_wait = 0; 895 - 896 - ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 897 - rs->fw_ddb_index); 898 - if (ddb_entry == NULL) 899 - return QLA_ERROR; 900 - 901 - if (ddb_entry->dev_scan_wait_to_start_relogin != 0 902 - && time_after_eq(jiffies, 903 - ddb_entry-> 904 - dev_scan_wait_to_start_relogin)) 905 - { 906 - ddb_entry->dev_scan_wait_to_start_relogin = 0; 907 - qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0); 908 - } 909 - } 910 - return QLA_SUCCESS; 911 - } 912 - 913 - static int qla4_scan_for_relogin(struct scsi_qla_host *ha, 914 - struct qla4_relog_scan *rs) 915 - { 916 - int error; 917 - 918 - /* scan for relogins 919 - * ----------------- */ 920 - for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES; 921 - rs->fw_ddb_index = rs->next_fw_ddb_index) { 922 - if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0, 923 - NULL, &rs->next_fw_ddb_index, 924 - &rs->fw_ddb_device_state, 925 - &rs->conn_err, NULL, NULL) 926 - == QLA_ERROR) 927 - return QLA_ERROR; 928 - 929 - if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS) 930 - rs->halt_wait = 0; 931 - 932 - if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED || 933 - rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) { 934 - error = qla4_test_rdy(ha, rs); 935 - if (error) 936 - return error; 937 - } 938 - 939 - /* We know we've reached the last device when 940 - * next_fw_ddb_index is 0 */ 941 - if (rs->next_fw_ddb_index == 0) 942 - break; 943 - } 944 - return QLA_SUCCESS; 945 - } 946 - 947 - /** 948 - * qla4xxx_devices_ready - wait for target devices to be logged in 949 - * @ha: pointer to adapter structure 950 - * 951 - * This routine waits up to ql4xdiscoverywait seconds 952 - * F/W database during driver load time. 953 - **/ 954 - static int qla4xxx_devices_ready(struct scsi_qla_host *ha) 955 - { 956 - int error; 957 - unsigned long discovery_wtime; 958 - struct qla4_relog_scan rs; 959 - 960 - discovery_wtime = jiffies + (ql4xdiscoverywait * HZ); 961 - 962 - DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait)); 963 - do { 964 - /* poll for AEN. */ 965 - qla4xxx_get_firmware_state(ha); 966 - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) { 967 - /* Set time-between-relogin timer */ 968 - qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS); 969 - } 970 - 971 - /* if no relogins active or needed, halt discvery wait */ 972 - rs.halt_wait = 1; 973 - 974 - error = qla4_scan_for_relogin(ha, &rs); 975 - 976 - if (rs.halt_wait) { 977 - DEBUG2(printk("scsi%ld: %s: Delay halted. Devices " 978 - "Ready.\n", ha->host_no, __func__)); 979 - return QLA_SUCCESS; 980 - } 981 - 982 - msleep(2000); 983 - } while (!time_after_eq(jiffies, discovery_wtime)); 984 - 985 - DEBUG3(qla4xxx_get_conn_event_log(ha)); 986 - 987 - return QLA_SUCCESS; 988 - } 989 - 990 - static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) 991 - { 992 - unsigned long wtime; 993 - 994 - /* Flush the 0x8014 AEN from the firmware as a result of 995 - * Auto connect. We are basically doing get_firmware_ddb() 996 - * to determine whether we need to log back in or not. 997 - * Trying to do a set ddb before we have processed 0x8014 998 - * will result in another set_ddb() for the same ddb. In other 999 - * words there will be stale entries in the aen_q. 1000 - */ 1001 - wtime = jiffies + (2 * HZ); 1002 - do { 1003 - if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) 1004 - if (ha->firmware_state & (BIT_2 | BIT_0)) 1005 - return; 1006 - 1007 - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 1008 - qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1009 - 1010 - msleep(1000); 1011 - } while (!time_after_eq(jiffies, wtime)); 1012 - 1013 - } 1014 - 1015 850 static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) 1016 851 { 1017 852 uint16_t fw_ddb_index; ··· 885 996 886 997 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) 887 998 ha->fw_ddb_index_map[fw_ddb_index] = 888 - (struct ddb_entry *)INVALID_ENTRY; 999 + (struct ddb_entry *)INVALID_ENTRY; 889 1000 890 1001 ha->tot_ddbs = 0; 891 1002 892 - qla4xxx_flush_AENS(ha); 893 - 894 - /* Wait for an AEN */ 895 - qla4xxx_devices_ready(ha); 896 - 897 - /* 898 - * First perform device discovery for active 899 - * fw ddb indexes and build 900 - * ddb list. 901 - */ 902 - if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) 903 - return status; 904 - 905 - /* 906 - * Targets can come online after the inital discovery, so processing 907 - * the aens here will catch them. 908 - */ 909 - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 910 - qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 1003 + /* Perform device discovery and build ddb list. */ 1004 + status = qla4xxx_build_ddb_list(ha); 911 1005 912 1006 return status; 913 1007 } ··· 1409 1537 uint32_t state, uint32_t conn_err) 1410 1538 { 1411 1539 struct ddb_entry * ddb_entry; 1412 - uint32_t old_fw_ddb_device_state; 1413 1540 1414 1541 /* check for out of range index */ 1415 1542 if (fw_ddb_index >= MAX_DDB_ENTRIES) ··· 1424 1553 } 1425 1554 1426 1555 /* Device already exists in our database. */ 1427 - old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; 1428 1556 DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " 1429 1557 "index [%d]\n", ha->host_no, __func__, 1430 1558 ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); 1431 - if (old_fw_ddb_device_state == state && 1432 - state == DDB_DS_SESSION_ACTIVE) { 1433 - if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 1434 - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); 1435 - iscsi_unblock_session(ddb_entry->sess); 1436 - } 1437 - return QLA_SUCCESS; 1438 - } 1439 1559 1440 1560 ddb_entry->fw_ddb_device_state = state; 1441 1561 /* Device is back online. */ 1442 - if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 1562 + if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && 1563 + (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { 1443 1564 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); 1444 1565 atomic_set(&ddb_entry->relogin_retry_count, 0); 1445 1566 atomic_set(&ddb_entry->relogin_timer, 0); 1446 1567 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1447 - clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); 1448 1568 iscsi_unblock_session(ddb_entry->sess); 1449 1569 iscsi_session_event(ddb_entry->sess, 1450 1570 ISCSI_KEVENT_CREATE_SESSION); ··· 1443 1581 * Change the lun state to READY in case the lun TIMEOUT before 1444 1582 * the device came back. 1445 1583 */ 1446 - } else { 1584 + } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { 1447 1585 /* Device went away, mark device missing */ 1448 1586 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { 1449 1587 DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " ··· 1460 1598 */ 1461 1599 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && 1462 1600 !test_bit(DF_RELOGIN, &ddb_entry->flags) && 1463 - !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && 1464 1601 qla4_is_relogin_allowed(ha, conn_err)) { 1465 1602 /* 1466 1603 * This triggers a relogin. After the relogin_timer
+3 -28
drivers/scsi/qla4xxx/ql4_isr.c
··· 801 801 &ha->reg->ctrl_status); 802 802 readl(&ha->reg->ctrl_status); 803 803 804 - if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags)) 804 + if (!test_bit(AF_HA_REMOVAL, &ha->flags)) 805 805 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 806 806 807 807 break; ··· 1008 1008 mbox_sts[0], mbox_sts[2], 1009 1009 mbox_sts[3])); 1010 1010 break; 1011 - } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { 1012 - /* for use during init time, we only want to 1013 - * relogin non-active ddbs */ 1014 - struct ddb_entry *ddb_entry; 1015 - 1016 - ddb_entry = 1017 - /* FIXME: name length? */ 1018 - qla4xxx_lookup_ddb_by_fw_index(ha, 1019 - mbox_sts[2]); 1020 - if (!ddb_entry) 1021 - break; 1022 - 1023 - ddb_entry->dev_scan_wait_to_complete_relogin = 1024 - 0; 1025 - ddb_entry->dev_scan_wait_to_start_relogin = 1026 - jiffies + 1027 - ((ddb_entry->default_time2wait + 1028 - 4) * HZ); 1029 - 1030 - DEBUG2(printk("scsi%ld: ddb [%d] initiate" 1031 - " RELOGIN after %d seconds\n", 1032 - ha->host_no, 1033 - ddb_entry->fw_ddb_index, 1034 - ddb_entry->default_time2wait + 1035 - 4)); 1036 - break; 1037 1011 } 1038 - 1012 + case PROCESS_ALL_AENS: 1013 + default: 1039 1014 if (mbox_sts[1] == 0) { /* Global DB change. */ 1040 1015 qla4xxx_reinitialize_ddb_list(ha); 1041 1016 } else if (mbox_sts[1] == 1) { /* Specific device. */
+24 -10
drivers/scsi/qla4xxx/ql4_mbx.c
··· 32 32 u_long wait_count; 33 33 uint32_t intr_status; 34 34 unsigned long flags = 0; 35 + uint32_t dev_state; 35 36 36 37 /* Make sure that pointers are valid */ 37 38 if (!mbx_cmd || !mbx_sts) { ··· 41 40 return status; 42 41 } 43 42 44 - if (is_qla8022(ha) && 45 - test_bit(AF_FW_RECOVERY, &ha->flags)) { 46 - DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " 47 - "completing mbx cmd as firmware recovery detected\n", 48 - ha->host_no, __func__)); 49 - return status; 43 + if (is_qla8022(ha)) { 44 + if (test_bit(AF_FW_RECOVERY, &ha->flags)) { 45 + DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " 46 + "prematurely completing mbx cmd as firmware " 47 + "recovery detected\n", ha->host_no, __func__)); 48 + return status; 49 + } 50 + /* Do not send any mbx cmd if h/w is in failed state*/ 51 + qla4_8xxx_idc_lock(ha); 52 + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 53 + qla4_8xxx_idc_unlock(ha); 54 + if (dev_state == QLA82XX_DEV_FAILED) { 55 + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " 56 + "failed state, do not send any mailbox commands\n", 57 + ha->host_no, __func__); 58 + return status; 59 + } 50 60 } 51 61 52 62 if ((is_aer_supported(ha)) && ··· 151 139 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && 152 140 test_bit(AF_INTERRUPTS_ON, &ha->flags) && 153 141 test_bit(AF_ONLINE, &ha->flags) && 154 - !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { 142 + !test_bit(AF_HA_REMOVAL, &ha->flags)) { 155 143 /* Do not poll for completion. Use completion queue */ 156 144 set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); 157 145 wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); ··· 407 395 /*memcpy(ha->alias, init_fw_cb->Alias, 408 396 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ 409 397 410 - /* Save Command Line Paramater info */ 411 - ha->discovery_wait = ql4xdiscoverywait; 412 - 413 398 if (ha->acb_version == ACB_SUPPORTED) { 414 399 ha->ipv6_options = init_fw_cb->ipv6_opts; 415 400 ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; ··· 475 466 __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB); 476 467 477 468 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 469 + 470 + /* Set bit for "serialize task mgmt" all other bits need to be zero */ 471 + init_fw_cb->add_fw_options = 0; 472 + init_fw_cb->add_fw_options |= 473 + __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); 478 474 479 475 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) 480 476 != QLA_SUCCESS) {
+1 -2
drivers/scsi/qla4xxx/ql4_nx.c
··· 2304 2304 void 2305 2305 qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) 2306 2306 { 2307 - if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) 2307 + if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) 2308 2308 qla4_8xxx_mbx_intr_disable(ha); 2309 2309 2310 2310 spin_lock_irq(&ha->hardware_lock); 2311 2311 /* BIT 10 - set */ 2312 2312 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); 2313 2313 spin_unlock_irq(&ha->hardware_lock); 2314 - clear_bit(AF_INTERRUPTS_ON, &ha->flags); 2315 2314 } 2316 2315 2317 2316 struct ql4_init_msix_entry {
+95 -51
drivers/scsi/qla4xxx/ql4_os.c
··· 29 29 /* 30 30 * Module parameter information and variables 31 31 */ 32 - int ql4xdiscoverywait = 60; 33 - module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR); 34 - MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time"); 35 - 36 32 int ql4xdontresethba = 0; 37 33 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 38 34 MODULE_PARM_DESC(ql4xdontresethba, ··· 51 55 " 2 = enable MSI interrupt mechanism."); 52 56 53 57 #define QL4_DEF_QDEPTH 32 58 + static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 59 + module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 60 + MODULE_PARM_DESC(ql4xmaxqdepth, 61 + "Maximum queue depth to report for target devices.\n" 62 + " Default: 32."); 63 + 64 + static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 65 + module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 66 + MODULE_PARM_DESC(ql4xsess_recovery_tmo, 67 + "Target Session Recovery Timeout.\n" 68 + " Default: 30 sec."); 54 69 55 70 /* 56 71 * SCSI host template entry points ··· 172 165 DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " 173 166 "of (%d) secs exhausted, marking device DEAD.\n", 174 167 ha->host_no, __func__, ddb_entry->fw_ddb_index, 175 - QL4_SESS_RECOVERY_TMO)); 168 + ddb_entry->sess->recovery_tmo)); 176 169 } 177 170 } 178 171 ··· 302 295 { 303 296 int err; 304 297 305 - ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO; 298 + ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; 306 299 307 300 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); 308 301 if (err) { ··· 760 753 if (!pci_channel_offline(ha->pdev)) 761 754 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 762 755 763 - if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { 764 - DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", 765 - __func__)); 766 - return; 767 - } 768 - 769 756 if (is_qla8022(ha)) { 770 757 qla4_8xxx_watchdog(ha); 771 758 } ··· 1068 1067 1069 1068 /* Disable the board */ 1070 1069 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 1071 - set_bit(AF_HBA_GOING_AWAY, &ha->flags); 1072 1070 1073 1071 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 1074 1072 qla4xxx_mark_all_devices_missing(ha); ··· 1218 1218 return status; 1219 1219 } 1220 1220 1221 + static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 1222 + { 1223 + struct ddb_entry *ddb_entry, *dtemp; 1224 + 1225 + list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { 1226 + if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || 1227 + (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { 1228 + if (ddb_entry->fw_ddb_device_state == 1229 + DDB_DS_SESSION_ACTIVE) { 1230 + atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); 1231 + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 1232 + " marked ONLINE\n", ha->host_no, __func__, 1233 + ddb_entry->fw_ddb_index); 1234 + 1235 + iscsi_unblock_session(ddb_entry->sess); 1236 + } else 1237 + qla4xxx_relogin_device(ha, ddb_entry); 1238 + } 1239 + } 1240 + } 1241 + 1221 1242 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 1222 1243 { 1223 1244 if (ha->dpc_thread && ··· 1279 1258 ha->host_no, __func__, ha->flags)); 1280 1259 goto do_dpc_exit; 1281 1260 } 1282 - 1283 - /* HBA is in the process of being permanently disabled. 1284 - * Don't process anything */ 1285 - if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) 1286 - return; 1287 1261 1288 1262 if (is_qla8022(ha)) { 1289 1263 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { ··· 1347 1331 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 1348 1332 if (!test_bit(AF_LINK_UP, &ha->flags)) { 1349 1333 /* ---- link down? --- */ 1350 - list_for_each_entry_safe(ddb_entry, dtemp, 1351 - &ha->ddb_list, list) { 1352 - if (atomic_read(&ddb_entry->state) == 1353 - DDB_STATE_ONLINE) 1354 - qla4xxx_mark_device_missing(ha, 1355 - ddb_entry); 1356 - } 1334 + qla4xxx_mark_all_devices_missing(ha); 1357 1335 } else { 1358 1336 /* ---- link up? --- * 1359 1337 * F/W will auto login to all devices ONLY ONCE after ··· 1356 1346 * manually relogin to devices when recovering from 1357 1347 * connection failures, logouts, expired KATO, etc. */ 1358 1348 1359 - list_for_each_entry_safe(ddb_entry, dtemp, 1360 - &ha->ddb_list, list) { 1361 - if ((atomic_read(&ddb_entry->state) == 1362 - DDB_STATE_MISSING) || 1363 - (atomic_read(&ddb_entry->state) == 1364 - DDB_STATE_DEAD)) { 1365 - if (ddb_entry->fw_ddb_device_state == 1366 - DDB_DS_SESSION_ACTIVE) { 1367 - atomic_set(&ddb_entry->state, 1368 - DDB_STATE_ONLINE); 1369 - ql4_printk(KERN_INFO, ha, 1370 - "scsi%ld: %s: ddb[%d]" 1371 - " marked ONLINE\n", 1372 - ha->host_no, __func__, 1373 - ddb_entry->fw_ddb_index); 1374 - 1375 - iscsi_unblock_session( 1376 - ddb_entry->sess); 1377 - } else 1378 - qla4xxx_relogin_device( 1379 - ha, ddb_entry); 1380 - } 1381 - 1382 - } 1349 + qla4xxx_relogin_all_devices(ha); 1383 1350 } 1384 1351 } 1385 1352 ··· 1617 1630 uint8_t init_retry_count = 0; 1618 1631 char buf[34]; 1619 1632 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 1633 + uint32_t dev_state; 1620 1634 1621 1635 if (pci_enable_device(pdev)) 1622 1636 return -1; ··· 1701 1713 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); 1702 1714 while ((!test_bit(AF_ONLINE, &ha->flags)) && 1703 1715 init_retry_count++ < MAX_INIT_RETRIES) { 1716 + 1717 + if (is_qla8022(ha)) { 1718 + qla4_8xxx_idc_lock(ha); 1719 + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 1720 + qla4_8xxx_idc_unlock(ha); 1721 + if (dev_state == QLA82XX_DEV_FAILED) { 1722 + ql4_printk(KERN_WARNING, ha, "%s: don't retry " 1723 + "initialize adapter. H/W is in failed state\n", 1724 + __func__); 1725 + break; 1726 + } 1727 + } 1704 1728 DEBUG2(printk("scsi: %s: retrying adapter initialization " 1705 1729 "(%d)\n", __func__, init_retry_count)); 1706 1730 ··· 1815 1815 } 1816 1816 1817 1817 /** 1818 + * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 1819 + * @ha: pointer to adapter structure 1820 + * 1821 + * Mark the other ISP-4xxx port to indicate that the driver is being removed, 1822 + * so that the other port will not re-initialize while in the process of 1823 + * removing the ha due to driver unload or hba hotplug. 1824 + **/ 1825 + static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 1826 + { 1827 + struct scsi_qla_host *other_ha = NULL; 1828 + struct pci_dev *other_pdev = NULL; 1829 + int fn = ISP4XXX_PCI_FN_2; 1830 + 1831 + /*iscsi function numbers for ISP4xxx is 1 and 3*/ 1832 + if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 1833 + fn = ISP4XXX_PCI_FN_1; 1834 + 1835 + other_pdev = 1836 + pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 1837 + ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 1838 + fn)); 1839 + 1840 + /* Get other_ha if other_pdev is valid and state is enable*/ 1841 + if (other_pdev) { 1842 + if (atomic_read(&other_pdev->enable_cnt)) { 1843 + other_ha = pci_get_drvdata(other_pdev); 1844 + if (other_ha) { 1845 + set_bit(AF_HA_REMOVAL, &other_ha->flags); 1846 + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 1847 + "Prevent %s reinit\n", __func__, 1848 + dev_name(&other_ha->pdev->dev))); 1849 + } 1850 + } 1851 + pci_dev_put(other_pdev); 1852 + } 1853 + } 1854 + 1855 + /** 1818 1856 * qla4xxx_remove_adapter - calback function to remove adapter. 1819 1857 * @pci_dev: PCI device pointer 1820 1858 **/ ··· 1862 1824 1863 1825 ha = pci_get_drvdata(pdev); 1864 1826 1865 - set_bit(AF_HBA_GOING_AWAY, &ha->flags); 1827 + if (!is_qla8022(ha)) 1828 + qla4xxx_prevent_other_port_reinit(ha); 1866 1829 1867 1830 /* remove devs from iscsi_sessions to scsi_devices */ 1868 1831 qla4xxx_free_ddb_list(ha); ··· 1907 1868 { 1908 1869 struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); 1909 1870 struct ddb_entry *ddb = sess->dd_data; 1871 + int queue_depth = QL4_DEF_QDEPTH; 1910 1872 1911 1873 sdev->hostdata = ddb; 1912 1874 sdev->tagged_supported = 1; 1913 - scsi_activate_tcq(sdev, QL4_DEF_QDEPTH); 1875 + 1876 + if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 1877 + queue_depth = ql4xmaxqdepth; 1878 + 1879 + scsi_activate_tcq(sdev, queue_depth); 1914 1880 return 0; 1915 1881 } 1916 1882
+1 -1
drivers/scsi/qla4xxx/ql4_version.h
··· 5 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 6 */ 7 7 8 - #define QLA4XXX_DRIVER_VERSION "5.02.00-k5" 8 + #define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
+1 -1
drivers/scsi/scsi_transport_iscsi.c
··· 1917 1917 #define iscsi_priv_session_rw_attr(field, format) \ 1918 1918 iscsi_priv_session_attr_show(field, format) \ 1919 1919 iscsi_priv_session_attr_store(field) \ 1920 - static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ 1920 + static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ 1921 1921 show_priv_session_##field, \ 1922 1922 store_priv_session_##field) 1923 1923 iscsi_priv_session_rw_attr(recovery_tmo, "%d");
+16 -47
drivers/scsi/sd.c
··· 2027 2027 int old_rcd = sdkp->RCD; 2028 2028 int old_dpofua = sdkp->DPOFUA; 2029 2029 2030 - if (sdp->skip_ms_page_8) { 2031 - if (sdp->type == TYPE_RBC) 2032 - goto defaults; 2033 - else { 2034 - modepage = 0x3F; 2035 - dbd = 0; 2036 - } 2037 - } else if (sdp->type == TYPE_RBC) { 2030 + if (sdp->skip_ms_page_8) 2031 + goto defaults; 2032 + 2033 + if (sdp->type == TYPE_RBC) { 2038 2034 modepage = 6; 2039 2035 dbd = 8; 2040 2036 } else { ··· 2058 2062 */ 2059 2063 if (len < 3) 2060 2064 goto bad_sense; 2061 - else if (len > SD_BUF_SIZE) { 2062 - sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2063 - "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2064 - len = SD_BUF_SIZE; 2065 - } 2065 + if (len > 20) 2066 + len = 20; 2067 + 2068 + /* Take headers and block descriptors into account */ 2069 + len += data.header_length + data.block_descriptor_length; 2070 + if (len > SD_BUF_SIZE) 2071 + goto bad_sense; 2066 2072 2067 2073 /* Get the data */ 2068 2074 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); ··· 2072 2074 if (scsi_status_is_good(res)) { 2073 2075 int offset = data.header_length + data.block_descriptor_length; 2074 2076 2075 - while (offset < len) { 2076 - u8 page_code = buffer[offset] & 0x3F; 2077 - u8 spf = buffer[offset] & 0x40; 2078 - 2079 - if (page_code == 8 || page_code == 6) { 2080 - /* We're interested only in the first 3 bytes. 2081 - */ 2082 - if (len - offset <= 2) { 2083 - sd_printk(KERN_ERR, sdkp, "Incomplete " 2084 - "mode parameter data\n"); 2085 - goto defaults; 2086 - } else { 2087 - modepage = page_code; 2088 - goto Page_found; 2089 - } 2090 - } else { 2091 - /* Go to the next page */ 2092 - if (spf && len - offset > 3) 2093 - offset += 4 + (buffer[offset+2] << 8) + 2094 - buffer[offset+3]; 2095 - else if (!spf && len - offset > 1) 2096 - offset += 2 + buffer[offset+1]; 2097 - else { 2098 - sd_printk(KERN_ERR, sdkp, "Incomplete " 2099 - "mode parameter data\n"); 2100 - goto defaults; 2101 - } 2102 - } 2077 + if (offset >= SD_BUF_SIZE - 2) { 2078 + sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); 2079 + goto defaults; 2103 2080 } 2104 2081 2105 - if (modepage == 0x3F) { 2106 - sd_printk(KERN_ERR, sdkp, "No Caching mode page " 2107 - "present\n"); 2108 - goto defaults; 2109 - } else if ((buffer[offset] & 0x3f) != modepage) { 2082 + if ((buffer[offset] & 0x3f) != modepage) { 2110 2083 sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); 2111 2084 goto defaults; 2112 2085 } 2113 - Page_found: 2086 + 2114 2087 if (modepage == 8) { 2115 2088 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2116 2089 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
+27 -21
drivers/scsi/ses.c
··· 35 35 36 36 struct ses_device { 37 37 unsigned char *page1; 38 + unsigned char *page1_types; 38 39 unsigned char *page2; 39 40 unsigned char *page10; 40 41 short page1_len; 42 + short page1_num_types; 41 43 short page2_len; 42 44 short page10_len; 43 45 }; ··· 112 110 int i, j, count = 0, descriptor = ecomp->number; 113 111 struct scsi_device *sdev = to_scsi_device(edev->edev.parent); 114 112 struct ses_device *ses_dev = edev->scratch; 115 - unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 113 + unsigned char *type_ptr = ses_dev->page1_types; 116 114 unsigned char *desc_ptr = ses_dev->page2 + 8; 117 115 118 116 /* Clear everything */ 119 117 memset(desc_ptr, 0, ses_dev->page2_len - 8); 120 - for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { 118 + for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { 121 119 for (j = 0; j < type_ptr[1]; j++) { 122 120 desc_ptr += 4; 123 121 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && ··· 142 140 int i, j, count = 0, descriptor = ecomp->number; 143 141 struct scsi_device *sdev = to_scsi_device(edev->edev.parent); 144 142 struct ses_device *ses_dev = edev->scratch; 145 - unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 143 + unsigned char *type_ptr = ses_dev->page1_types; 146 144 unsigned char *desc_ptr = ses_dev->page2 + 8; 147 145 148 146 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 149 147 150 - for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { 148 + for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { 151 149 for (j = 0; j < type_ptr[1]; j++) { 152 150 desc_ptr += 4; 153 151 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && ··· 360 358 unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; 361 359 int i, j, page7_len, len, components; 362 360 struct ses_device *ses_dev = edev->scratch; 363 - int types = ses_dev->page1[10]; 361 + int types = ses_dev->page1_num_types; 364 362 unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); 365 363 366 364 if (!hdr_buf) ··· 392 390 len = (desc_ptr[2] << 8) + desc_ptr[3]; 393 391 /* skip past overall descriptor */ 394 392 desc_ptr += len + 4; 395 - if (ses_dev->page10) 396 - addl_desc_ptr = ses_dev->page10 + 8; 397 393 } 398 - type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 394 + if (ses_dev->page10) 395 + addl_desc_ptr = ses_dev->page10 + 8; 396 + type_ptr = ses_dev->page1_types; 399 397 components = 0; 400 398 for (i = 0; i < types; i++, type_ptr += 4) { 401 399 for (j = 0; j < type_ptr[1]; j++) { ··· 505 503 u32 result; 506 504 int i, types, len, components = 0; 507 505 int err = -ENOMEM; 506 + int num_enclosures; 508 507 struct enclosure_device *edev; 509 508 struct ses_component *scomp = NULL; 510 509 ··· 533 530 if (result) 534 531 goto recv_failed; 535 532 536 - if (hdr_buf[1] != 0) { 537 - /* FIXME: need subenclosure support; I've just never 538 - * seen a device with subenclosures and it makes the 539 - * traversal routines more complex */ 540 - sdev_printk(KERN_ERR, sdev, 541 - "FIXME driver has no support for subenclosures (%d)\n", 542 - hdr_buf[1]); 543 - goto err_free; 544 - } 545 - 546 533 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 547 534 buf = kzalloc(len, GFP_KERNEL); 548 535 if (!buf) ··· 542 549 if (result) 543 550 goto recv_failed; 544 551 545 - types = buf[10]; 552 + types = 0; 546 553 547 - type_ptr = buf + 12 + buf[11]; 554 + /* we always have one main enclosure and the rest are referred 555 + * to as secondary subenclosures */ 556 + num_enclosures = buf[1] + 1; 548 557 549 - for (i = 0; i < types; i++, type_ptr += 4) { 558 + /* begin at the enclosure descriptor */ 559 + type_ptr = buf + 8; 560 + /* skip all the enclosure descriptors */ 561 + for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { 562 + types += type_ptr[2]; 563 + type_ptr += type_ptr[3] + 4; 564 + } 565 + 566 + ses_dev->page1_types = type_ptr; 567 + ses_dev->page1_num_types = types; 568 + 569 + for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { 550 570 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 551 571 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) 552 572 components += type_ptr[1];
+2
drivers/target/Kconfig
··· 29 29 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered 30 30 passthrough access to Linux/SCSI device 31 31 32 + source "drivers/target/loopback/Kconfig" 33 + 32 34 endif
+5 -2
drivers/target/Makefile
··· 1 - EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/ 2 1 3 2 target_core_mod-y := target_core_configfs.o \ 4 3 target_core_device.o \ ··· 12 13 target_core_transport.o \ 13 14 target_core_cdb.o \ 14 15 target_core_ua.o \ 15 - target_core_rd.o 16 + target_core_rd.o \ 17 + target_core_stat.o 16 18 17 19 obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 18 20 ··· 21 21 obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o 22 22 obj-$(CONFIG_TCM_FILEIO) += target_core_file.o 23 23 obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o 24 + 25 + # Fabric modules 26 + obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
+11
drivers/target/loopback/Kconfig
··· 1 + config LOOPBACK_TARGET 2 + tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module" 3 + help 4 + Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD 5 + fabric loopback module. 6 + 7 + config LOOPBACK_TARGET_CDB_DEBUG 8 + bool "TCM loopback fabric module CDB debug code" 9 + depends on LOOPBACK_TARGET 10 + help 11 + Say Y here to enable the TCM loopback fabric module CDB debug code
+1
drivers/target/loopback/Makefile
··· 1 + obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o
+1579
drivers/target/loopback/tcm_loop.c
··· 1 + /******************************************************************************* 2 + * 3 + * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 + * for emulated SAS initiator ports 5 + * 6 + * © Copyright 2011 RisingTide Systems LLC. 7 + * 8 + * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 + * 10 + * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 11 + * 12 + * This program is free software; you can redistribute it and/or modify 13 + * it under the terms of the GNU General Public License as published by 14 + * the Free Software Foundation; either version 2 of the License, or 15 + * (at your option) any later version. 16 + * 17 + * This program is distributed in the hope that it will be useful, 18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 + * GNU General Public License for more details. 21 + ****************************************************************************/ 22 + 23 + #include <linux/module.h> 24 + #include <linux/moduleparam.h> 25 + #include <linux/init.h> 26 + #include <linux/slab.h> 27 + #include <linux/types.h> 28 + #include <linux/configfs.h> 29 + #include <scsi/scsi.h> 30 + #include <scsi/scsi_tcq.h> 31 + #include <scsi/scsi_host.h> 32 + #include <scsi/scsi_device.h> 33 + #include <scsi/scsi_cmnd.h> 34 + #include <scsi/libsas.h> /* For TASK_ATTR_* */ 35 + 36 + #include <target/target_core_base.h> 37 + #include <target/target_core_transport.h> 38 + #include <target/target_core_fabric_ops.h> 39 + #include <target/target_core_fabric_configfs.h> 40 + #include <target/target_core_fabric_lib.h> 41 + #include <target/target_core_configfs.h> 42 + #include <target/target_core_device.h> 43 + #include <target/target_core_tpg.h> 44 + #include <target/target_core_tmr.h> 45 + 46 + #include "tcm_loop.h" 47 + 48 + #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 49 + 50 + /* Local pointer to allocated TCM configfs fabric module */ 51 + static struct target_fabric_configfs *tcm_loop_fabric_configfs; 52 + 53 + static struct kmem_cache *tcm_loop_cmd_cache; 54 + 55 + static int tcm_loop_hba_no_cnt; 56 + 57 + /* 58 + * Allocate a tcm_loop cmd descriptor from target_core_mod code 59 + * 60 + * Can be called from interrupt context in tcm_loop_queuecommand() below 61 + */ 62 + static struct se_cmd *tcm_loop_allocate_core_cmd( 63 + struct tcm_loop_hba *tl_hba, 64 + struct se_portal_group *se_tpg, 65 + struct scsi_cmnd *sc) 66 + { 67 + struct se_cmd *se_cmd; 68 + struct se_session *se_sess; 69 + struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus; 70 + struct tcm_loop_cmd *tl_cmd; 71 + int sam_task_attr; 72 + 73 + if (!tl_nexus) { 74 + scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 75 + " does not exist\n"); 76 + set_host_byte(sc, DID_ERROR); 77 + return NULL; 78 + } 79 + se_sess = tl_nexus->se_sess; 80 + 81 + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 82 + if (!tl_cmd) { 83 + printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); 84 + set_host_byte(sc, DID_ERROR); 85 + return NULL; 86 + } 87 + se_cmd = &tl_cmd->tl_se_cmd; 88 + /* 89 + * Save the pointer to struct scsi_cmnd *sc 90 + */ 91 + tl_cmd->sc = sc; 92 + /* 93 + * Locate the SAM Task Attr from struct scsi_cmnd * 94 + */ 95 + if (sc->device->tagged_supported) { 96 + switch (sc->tag) { 97 + case HEAD_OF_QUEUE_TAG: 98 + sam_task_attr = TASK_ATTR_HOQ; 99 + break; 100 + case ORDERED_QUEUE_TAG: 101 + sam_task_attr = TASK_ATTR_ORDERED; 102 + break; 103 + default: 104 + sam_task_attr = TASK_ATTR_SIMPLE; 105 + break; 106 + } 107 + } else 108 + sam_task_attr = TASK_ATTR_SIMPLE; 109 + 110 + /* 111 + * Initialize struct se_cmd descriptor from target_core_mod infrastructure 112 + */ 113 + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 114 + scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, 115 + &tl_cmd->tl_sense_buf[0]); 116 + 117 + /* 118 + * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi 119 + */ 120 + if (scsi_bidi_cmnd(sc)) 121 + T_TASK(se_cmd)->t_tasks_bidi = 1; 122 + /* 123 + * Locate the struct se_lun pointer and attach it to struct se_cmd 124 + */ 125 + if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) { 126 + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 127 + set_host_byte(sc, DID_NO_CONNECT); 128 + return NULL; 129 + } 130 + 131 + transport_device_setup_cmd(se_cmd); 132 + return se_cmd; 133 + } 134 + 135 + /* 136 + * Called by struct target_core_fabric_ops->new_cmd_map() 137 + * 138 + * Always called in process context. A non zero return value 139 + * here will signal to handle an exception based on the return code. 140 + */ 141 + static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) 142 + { 143 + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 144 + struct tcm_loop_cmd, tl_se_cmd); 145 + struct scsi_cmnd *sc = tl_cmd->sc; 146 + void *mem_ptr, *mem_bidi_ptr = NULL; 147 + u32 sg_no_bidi = 0; 148 + int ret; 149 + /* 150 + * Allocate the necessary tasks to complete the received CDB+data 151 + */ 152 + ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); 153 + if (ret == -1) { 154 + /* Out of Resources */ 155 + return PYX_TRANSPORT_LU_COMM_FAILURE; 156 + } else if (ret == -2) { 157 + /* 158 + * Handle case for SAM_STAT_RESERVATION_CONFLICT 159 + */ 160 + if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) 161 + return PYX_TRANSPORT_RESERVATION_CONFLICT; 162 + /* 163 + * Otherwise, return SAM_STAT_CHECK_CONDITION and return 164 + * sense data. 165 + */ 166 + return PYX_TRANSPORT_USE_SENSE_REASON; 167 + } 168 + /* 169 + * Setup the struct scatterlist memory from the received 170 + * struct scsi_cmnd. 171 + */ 172 + if (scsi_sg_count(sc)) { 173 + se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; 174 + mem_ptr = (void *)scsi_sglist(sc); 175 + /* 176 + * For BIDI commands, pass in the extra READ buffer 177 + * to transport_generic_map_mem_to_cmd() below.. 178 + */ 179 + if (T_TASK(se_cmd)->t_tasks_bidi) { 180 + struct scsi_data_buffer *sdb = scsi_in(sc); 181 + 182 + mem_bidi_ptr = (void *)sdb->table.sgl; 183 + sg_no_bidi = sdb->table.nents; 184 + } 185 + } else { 186 + /* 187 + * Used for DMA_NONE 188 + */ 189 + mem_ptr = NULL; 190 + } 191 + /* 192 + * Map the SG memory into struct se_mem->page linked list using the same 193 + * physical memory at sg->page_link. 194 + */ 195 + ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, 196 + scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); 197 + if (ret < 0) 198 + return PYX_TRANSPORT_LU_COMM_FAILURE; 199 + 200 + return 0; 201 + } 202 + 203 + /* 204 + * Called from struct target_core_fabric_ops->check_stop_free() 205 + */ 206 + static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) 207 + { 208 + /* 209 + * Do not release struct se_cmd's containing a valid TMR 210 + * pointer. These will be released directly in tcm_loop_device_reset() 211 + * with transport_generic_free_cmd(). 212 + */ 213 + if (se_cmd->se_tmr_req) 214 + return; 215 + /* 216 + * Release the struct se_cmd, which will make a callback to release 217 + * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() 218 + */ 219 + transport_generic_free_cmd(se_cmd, 0, 1, 0); 220 + } 221 + 222 + /* 223 + * Called from struct target_core_fabric_ops->release_cmd_to_pool() 224 + */ 225 + static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd) 226 + { 227 + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 228 + struct tcm_loop_cmd, tl_se_cmd); 229 + 230 + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 231 + } 232 + 233 + static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer, 234 + char **start, off_t offset, 235 + int length, int inout) 236 + { 237 + return sprintf(buffer, "tcm_loop_proc_info()\n"); 238 + } 239 + 240 + static int tcm_loop_driver_probe(struct device *); 241 + static int tcm_loop_driver_remove(struct device *); 242 + 243 + static int pseudo_lld_bus_match(struct device *dev, 244 + struct device_driver *dev_driver) 245 + { 246 + return 1; 247 + } 248 + 249 + static struct bus_type tcm_loop_lld_bus = { 250 + .name = "tcm_loop_bus", 251 + .match = pseudo_lld_bus_match, 252 + .probe = tcm_loop_driver_probe, 253 + .remove = tcm_loop_driver_remove, 254 + }; 255 + 256 + static struct device_driver tcm_loop_driverfs = { 257 + .name = "tcm_loop", 258 + .bus = &tcm_loop_lld_bus, 259 + }; 260 + /* 261 + * Used with root_device_register() in tcm_loop_alloc_core_bus() below 262 + */ 263 + struct device *tcm_loop_primary; 264 + 265 + /* 266 + * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and 267 + * drivers/scsi/libiscsi.c:iscsi_change_queue_depth() 268 + */ 269 + static int tcm_loop_change_queue_depth( 270 + struct scsi_device *sdev, 271 + int depth, 272 + int reason) 273 + { 274 + switch (reason) { 275 + case SCSI_QDEPTH_DEFAULT: 276 + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 277 + break; 278 + case SCSI_QDEPTH_QFULL: 279 + scsi_track_queue_full(sdev, depth); 280 + break; 281 + case SCSI_QDEPTH_RAMP_UP: 282 + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 283 + break; 284 + default: 285 + return -EOPNOTSUPP; 286 + } 287 + return sdev->queue_depth; 288 + } 289 + 290 + /* 291 + * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data 292 + * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) 293 + */ 294 + static int tcm_loop_queuecommand( 295 + struct Scsi_Host *sh, 296 + struct scsi_cmnd *sc) 297 + { 298 + struct se_cmd *se_cmd; 299 + struct se_portal_group *se_tpg; 300 + struct tcm_loop_hba *tl_hba; 301 + struct tcm_loop_tpg *tl_tpg; 302 + 303 + TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" 304 + " scsi_buf_len: %u\n", sc->device->host->host_no, 305 + sc->device->id, sc->device->channel, sc->device->lun, 306 + sc->cmnd[0], scsi_bufflen(sc)); 307 + /* 308 + * Locate the tcm_loop_hba_t pointer 309 + */ 310 + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 311 + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 312 + se_tpg = &tl_tpg->tl_se_tpg; 313 + /* 314 + * Determine the SAM Task Attribute and allocate tl_cmd and 315 + * tl_cmd->tl_se_cmd from TCM infrastructure 316 + */ 317 + se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); 318 + if (!se_cmd) { 319 + sc->scsi_done(sc); 320 + return 0; 321 + } 322 + /* 323 + * Queue up the newly allocated to be processed in TCM thread context. 324 + */ 325 + transport_generic_handle_cdb_map(se_cmd); 326 + return 0; 327 + } 328 + 329 + /* 330 + * Called from SCSI EH process context to issue a LUN_RESET TMR 331 + * to struct scsi_device 332 + */ 333 + static int tcm_loop_device_reset(struct scsi_cmnd *sc) 334 + { 335 + struct se_cmd *se_cmd = NULL; 336 + struct se_portal_group *se_tpg; 337 + struct se_session *se_sess; 338 + struct tcm_loop_cmd *tl_cmd = NULL; 339 + struct tcm_loop_hba *tl_hba; 340 + struct tcm_loop_nexus *tl_nexus; 341 + struct tcm_loop_tmr *tl_tmr = NULL; 342 + struct tcm_loop_tpg *tl_tpg; 343 + int ret = FAILED; 344 + /* 345 + * Locate the tcm_loop_hba_t pointer 346 + */ 347 + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 348 + /* 349 + * Locate the tl_nexus and se_sess pointers 350 + */ 351 + tl_nexus = tl_hba->tl_nexus; 352 + if (!tl_nexus) { 353 + printk(KERN_ERR "Unable to perform device reset without" 354 + " active I_T Nexus\n"); 355 + return FAILED; 356 + } 357 + se_sess = tl_nexus->se_sess; 358 + /* 359 + * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id 360 + */ 361 + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 362 + se_tpg = &tl_tpg->tl_se_tpg; 363 + 364 + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 365 + if (!tl_cmd) { 366 + printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); 367 + return FAILED; 368 + } 369 + 370 + tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 371 + if (!tl_tmr) { 372 + printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); 373 + goto release; 374 + } 375 + init_waitqueue_head(&tl_tmr->tl_tmr_wait); 376 + 377 + se_cmd = &tl_cmd->tl_se_cmd; 378 + /* 379 + * Initialize struct se_cmd descriptor from target_core_mod infrastructure 380 + */ 381 + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 382 + DMA_NONE, TASK_ATTR_SIMPLE, 383 + &tl_cmd->tl_sense_buf[0]); 384 + /* 385 + * Allocate the LUN_RESET TMR 386 + */ 387 + se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, 388 + TMR_LUN_RESET); 389 + if (!se_cmd->se_tmr_req) 390 + goto release; 391 + /* 392 + * Locate the underlying TCM struct se_lun from sc->device->lun 393 + */ 394 + if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) 395 + goto release; 396 + /* 397 + * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() 398 + * to wake us up. 399 + */ 400 + transport_generic_handle_tmr(se_cmd); 401 + wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 402 + /* 403 + * The TMR LUN_RESET has completed, check the response status and 404 + * then release allocations. 405 + */ 406 + ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 407 + SUCCESS : FAILED; 408 + release: 409 + if (se_cmd) 410 + transport_generic_free_cmd(se_cmd, 1, 1, 0); 411 + else 412 + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 413 + kfree(tl_tmr); 414 + return ret; 415 + } 416 + 417 + static int tcm_loop_slave_alloc(struct scsi_device *sd) 418 + { 419 + set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 420 + return 0; 421 + } 422 + 423 + static int tcm_loop_slave_configure(struct scsi_device *sd) 424 + { 425 + return 0; 426 + } 427 + 428 + static struct scsi_host_template tcm_loop_driver_template = { 429 + .proc_info = tcm_loop_proc_info, 430 + .proc_name = "tcm_loopback", 431 + .name = "TCM_Loopback", 432 + .queuecommand = tcm_loop_queuecommand, 433 + .change_queue_depth = tcm_loop_change_queue_depth, 434 + .eh_device_reset_handler = tcm_loop_device_reset, 435 + .can_queue = TL_SCSI_CAN_QUEUE, 436 + .this_id = -1, 437 + .sg_tablesize = TL_SCSI_SG_TABLESIZE, 438 + .cmd_per_lun = TL_SCSI_CMD_PER_LUN, 439 + .max_sectors = TL_SCSI_MAX_SECTORS, 440 + .use_clustering = DISABLE_CLUSTERING, 441 + .slave_alloc = tcm_loop_slave_alloc, 442 + .slave_configure = tcm_loop_slave_configure, 443 + .module = THIS_MODULE, 444 + }; 445 + 446 + static int tcm_loop_driver_probe(struct device *dev) 447 + { 448 + struct tcm_loop_hba *tl_hba; 449 + struct Scsi_Host *sh; 450 + int error; 451 + 452 + tl_hba = to_tcm_loop_hba(dev); 453 + 454 + sh = scsi_host_alloc(&tcm_loop_driver_template, 455 + sizeof(struct tcm_loop_hba)); 456 + if (!sh) { 457 + printk(KERN_ERR "Unable to allocate struct scsi_host\n"); 458 + return -ENODEV; 459 + } 460 + tl_hba->sh = sh; 461 + 462 + /* 463 + * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 464 + */ 465 + *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 466 + /* 467 + * Setup single ID, Channel and LUN for now.. 468 + */ 469 + sh->max_id = 2; 470 + sh->max_lun = 0; 471 + sh->max_channel = 0; 472 + sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; 473 + 474 + error = scsi_add_host(sh, &tl_hba->dev); 475 + if (error) { 476 + printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 477 + scsi_host_put(sh); 478 + return -ENODEV; 479 + } 480 + return 0; 481 + } 482 + 483 + static int tcm_loop_driver_remove(struct device *dev) 484 + { 485 + struct tcm_loop_hba *tl_hba; 486 + struct Scsi_Host *sh; 487 + 488 + tl_hba = to_tcm_loop_hba(dev); 489 + sh = tl_hba->sh; 490 + 491 + scsi_remove_host(sh); 492 + scsi_host_put(sh); 493 + return 0; 494 + } 495 + 496 + static void tcm_loop_release_adapter(struct device *dev) 497 + { 498 + struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 499 + 500 + kfree(tl_hba); 501 + } 502 + 503 + /* 504 + * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 505 + */ 506 + static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 507 + { 508 + int ret; 509 + 510 + tl_hba->dev.bus = &tcm_loop_lld_bus; 511 + tl_hba->dev.parent = tcm_loop_primary; 512 + tl_hba->dev.release = &tcm_loop_release_adapter; 513 + dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 514 + 515 + ret = device_register(&tl_hba->dev); 516 + if (ret) { 517 + printk(KERN_ERR "device_register() failed for" 518 + " tl_hba->dev: %d\n", ret); 519 + return -ENODEV; 520 + } 521 + 522 + return 0; 523 + } 524 + 525 + /* 526 + * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 527 + * tcm_loop SCSI bus. 528 + */ 529 + static int tcm_loop_alloc_core_bus(void) 530 + { 531 + int ret; 532 + 533 + tcm_loop_primary = root_device_register("tcm_loop_0"); 534 + if (IS_ERR(tcm_loop_primary)) { 535 + printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); 536 + return PTR_ERR(tcm_loop_primary); 537 + } 538 + 539 + ret = bus_register(&tcm_loop_lld_bus); 540 + if (ret) { 541 + printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); 542 + goto dev_unreg; 543 + } 544 + 545 + ret = driver_register(&tcm_loop_driverfs); 546 + if (ret) { 547 + printk(KERN_ERR "driver_register() failed for" 548 + "tcm_loop_driverfs\n"); 549 + goto bus_unreg; 550 + } 551 + 552 + printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); 553 + return ret; 554 + 555 + bus_unreg: 556 + bus_unregister(&tcm_loop_lld_bus); 557 + dev_unreg: 558 + root_device_unregister(tcm_loop_primary); 559 + return ret; 560 + } 561 + 562 + static void tcm_loop_release_core_bus(void) 563 + { 564 + driver_unregister(&tcm_loop_driverfs); 565 + bus_unregister(&tcm_loop_lld_bus); 566 + root_device_unregister(tcm_loop_primary); 567 + 568 + printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); 569 + } 570 + 571 + static char *tcm_loop_get_fabric_name(void) 572 + { 573 + return "loopback"; 574 + } 575 + 576 + static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) 577 + { 578 + struct tcm_loop_tpg *tl_tpg = 579 + (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 580 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 581 + /* 582 + * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() 583 + * time based on the protocol dependent prefix of the passed configfs group. 584 + * 585 + * Based upon tl_proto_id, TCM_Loop emulates the requested fabric 586 + * ProtocolID using target_core_fabric_lib.c symbols. 587 + */ 588 + switch (tl_hba->tl_proto_id) { 589 + case SCSI_PROTOCOL_SAS: 590 + return sas_get_fabric_proto_ident(se_tpg); 591 + case SCSI_PROTOCOL_FCP: 592 + return fc_get_fabric_proto_ident(se_tpg); 593 + case SCSI_PROTOCOL_ISCSI: 594 + return iscsi_get_fabric_proto_ident(se_tpg); 595 + default: 596 + printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 597 + " SAS emulation\n", tl_hba->tl_proto_id); 598 + break; 599 + } 600 + 601 + return sas_get_fabric_proto_ident(se_tpg); 602 + } 603 + 604 + static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 605 + { 606 + struct tcm_loop_tpg *tl_tpg = 607 + (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 608 + /* 609 + * Return the passed NAA identifier for the SAS Target Port 610 + */ 611 + return &tl_tpg->tl_hba->tl_wwn_address[0]; 612 + } 613 + 614 + static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 615 + { 616 + struct tcm_loop_tpg *tl_tpg = 617 + (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 618 + /* 619 + * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 620 + * to represent the SCSI Target Port. 621 + */ 622 + return tl_tpg->tl_tpgt; 623 + } 624 + 625 + static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg) 626 + { 627 + return 1; 628 + } 629 + 630 + static u32 tcm_loop_get_pr_transport_id( 631 + struct se_portal_group *se_tpg, 632 + struct se_node_acl *se_nacl, 633 + struct t10_pr_registration *pr_reg, 634 + int *format_code, 635 + unsigned char *buf) 636 + { 637 + struct tcm_loop_tpg *tl_tpg = 638 + (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 639 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 640 + 641 + switch (tl_hba->tl_proto_id) { 642 + case SCSI_PROTOCOL_SAS: 643 + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 644 + format_code, buf); 645 + case SCSI_PROTOCOL_FCP: 646 + return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 647 + format_code, buf); 648 + case SCSI_PROTOCOL_ISCSI: 649 + return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 650 + format_code, buf); 651 + default: 652 + printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 653 + " SAS emulation\n", tl_hba->tl_proto_id); 654 + break; 655 + } 656 + 657 + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 658 + format_code, buf); 659 + } 660 + 661 + static u32 tcm_loop_get_pr_transport_id_len( 662 + struct se_portal_group *se_tpg, 663 + struct se_node_acl *se_nacl, 664 + struct t10_pr_registration *pr_reg, 665 + int *format_code) 666 + { 667 + struct tcm_loop_tpg *tl_tpg = 668 + (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 669 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 670 + 671 + switch (tl_hba->tl_proto_id) { 672 + case SCSI_PROTOCOL_SAS: 673 + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 674 + format_code); 675 + case SCSI_PROTOCOL_FCP: 676 + return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 677 + format_code); 678 + case SCSI_PROTOCOL_ISCSI: 679 + return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 680 + format_code); 681 + default: 682 + printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 683 + " SAS emulation\n", tl_hba->tl_proto_id); 684 + break; 685 + } 686 + 687 + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 688 + format_code); 689 + } 690 + 691 + /* 692 + * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above 693 + * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. 694 + */ 695 + static char *tcm_loop_parse_pr_out_transport_id( 696 + struct se_portal_group *se_tpg, 697 + const char *buf, 698 + u32 *out_tid_len, 699 + char **port_nexus_ptr) 700 + { 701 + struct tcm_loop_tpg *tl_tpg = 702 + (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 703 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 704 + 705 + switch (tl_hba->tl_proto_id) { 706 + case SCSI_PROTOCOL_SAS: 707 + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 708 + port_nexus_ptr); 709 + case SCSI_PROTOCOL_FCP: 710 + return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 711 + port_nexus_ptr); 712 + case SCSI_PROTOCOL_ISCSI: 713 + return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 714 + port_nexus_ptr); 715 + default: 716 + printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" 717 + " SAS emulation\n", tl_hba->tl_proto_id); 718 + break; 719 + } 720 + 721 + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 722 + port_nexus_ptr); 723 + } 724 + 725 + /* 726 + * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 727 + * based upon the incoming fabric dependent SCSI Initiator Port 728 + */ 729 + static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 730 + { 731 + return 1; 732 + } 733 + 734 + static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 735 + { 736 + return 0; 737 + } 738 + 739 + /* 740 + * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 741 + * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 742 + */ 743 + static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 744 + { 745 + return 0; 746 + } 747 + 748 + /* 749 + * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 750 + * never be called for TCM_Loop by target_core_fabric_configfs.c code. 751 + * It has been added here as a nop for target_fabric_tf_ops_check() 752 + */ 753 + static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 754 + { 755 + return 0; 756 + } 757 + 758 + static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( 759 + struct se_portal_group *se_tpg) 760 + { 761 + struct tcm_loop_nacl *tl_nacl; 762 + 763 + tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); 764 + if (!tl_nacl) { 765 + printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); 766 + return NULL; 767 + } 768 + 769 + return &tl_nacl->se_node_acl; 770 + } 771 + 772 + static void tcm_loop_tpg_release_fabric_acl( 773 + struct se_portal_group *se_tpg, 774 + struct se_node_acl *se_nacl) 775 + { 776 + struct tcm_loop_nacl *tl_nacl = container_of(se_nacl, 777 + struct tcm_loop_nacl, se_node_acl); 778 + 779 + kfree(tl_nacl); 780 + } 781 + 782 + static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 783 + { 784 + return 1; 785 + } 786 + 787 + static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd) 788 + { 789 + /* 790 + * Since TCM_loop is already passing struct scatterlist data from 791 + * struct scsi_cmnd, no more Linux/SCSI failure dependent state need 792 + * to be handled here. 793 + */ 794 + return; 795 + } 796 + 797 + static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) 798 + { 799 + /* 800 + * Assume struct scsi_cmnd is not in remove state.. 801 + */ 802 + return 0; 803 + } 804 + 805 + static int tcm_loop_sess_logged_in(struct se_session *se_sess) 806 + { 807 + /* 808 + * Assume that TL Nexus is always active 809 + */ 810 + return 1; 811 + } 812 + 813 + static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 814 + { 815 + return 1; 816 + } 817 + 818 + static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 819 + { 820 + return; 821 + } 822 + 823 + static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) 824 + { 825 + return 1; 826 + } 827 + 828 + static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 829 + { 830 + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 831 + struct tcm_loop_cmd, tl_se_cmd); 832 + 833 + return tl_cmd->sc_cmd_state; 834 + } 835 + 836 + static int tcm_loop_shutdown_session(struct se_session *se_sess) 837 + { 838 + return 0; 839 + } 840 + 841 + static void tcm_loop_close_session(struct se_session *se_sess) 842 + { 843 + return; 844 + }; 845 + 846 + static void tcm_loop_stop_session( 847 + struct se_session *se_sess, 848 + int sess_sleep, 849 + int conn_sleep) 850 + { 851 + return; 852 + } 853 + 854 + static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess) 855 + { 856 + return; 857 + } 858 + 859 + static int tcm_loop_write_pending(struct se_cmd *se_cmd) 860 + { 861 + /* 862 + * Since Linux/SCSI has already sent down a struct scsi_cmnd 863 + * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 864 + * memory, and memory has already been mapped to struct se_cmd->t_mem_list 865 + * format with transport_generic_map_mem_to_cmd(). 866 + * 867 + * We now tell TCM to add this WRITE CDB directly into the TCM storage 868 + * object execution queue. 869 + */ 870 + transport_generic_process_write(se_cmd); 871 + return 0; 872 + } 873 + 874 + static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 875 + { 876 + return 0; 877 + } 878 + 879 + static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 880 + { 881 + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 882 + struct tcm_loop_cmd, tl_se_cmd); 883 + struct scsi_cmnd *sc = tl_cmd->sc; 884 + 885 + TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 886 + " cdb: 0x%02x\n", sc, sc->cmnd[0]); 887 + 888 + sc->result = SAM_STAT_GOOD; 889 + set_host_byte(sc, DID_OK); 890 + sc->scsi_done(sc); 891 + return 0; 892 + } 893 + 894 + static int tcm_loop_queue_status(struct se_cmd *se_cmd) 895 + { 896 + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 897 + struct tcm_loop_cmd, tl_se_cmd); 898 + struct scsi_cmnd *sc = tl_cmd->sc; 899 + 900 + TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" 901 + " cdb: 0x%02x\n", sc, sc->cmnd[0]); 902 + 903 + if (se_cmd->sense_buffer && 904 + ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 905 + (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 906 + 907 + memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, 908 + SCSI_SENSE_BUFFERSIZE); 909 + sc->result = SAM_STAT_CHECK_CONDITION; 910 + set_driver_byte(sc, DRIVER_SENSE); 911 + } else 912 + sc->result = se_cmd->scsi_status; 913 + 914 + set_host_byte(sc, DID_OK); 915 + sc->scsi_done(sc); 916 + return 0; 917 + } 918 + 919 + static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 920 + { 921 + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 922 + struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; 923 + /* 924 + * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead 925 + * and wake up the wait_queue_head_t in tcm_loop_device_reset() 926 + */ 927 + atomic_set(&tl_tmr->tmr_complete, 1); 928 + wake_up(&tl_tmr->tl_tmr_wait); 929 + return 0; 930 + } 931 + 932 + static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) 933 + { 934 + return 0; 935 + } 936 + 937 + static u16 tcm_loop_get_fabric_sense_len(void) 938 + { 939 + return 0; 940 + } 941 + 942 + static u64 tcm_loop_pack_lun(unsigned int lun) 943 + { 944 + u64 result; 945 + 946 + /* LSB of lun into byte 1 big-endian */ 947 + result = ((lun & 0xff) << 8); 948 + /* use flat space addressing method */ 949 + result |= 0x40 | ((lun >> 8) & 0x3f); 950 + 951 + return cpu_to_le64(result); 952 + } 953 + 954 + static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 955 + { 956 + switch (tl_hba->tl_proto_id) { 957 + case SCSI_PROTOCOL_SAS: 958 + return "SAS"; 959 + case SCSI_PROTOCOL_FCP: 960 + return "FCP"; 961 + case SCSI_PROTOCOL_ISCSI: 962 + return "iSCSI"; 963 + default: 964 + break; 965 + } 966 + 967 + return "Unknown"; 968 + } 969 + 970 + /* Start items for tcm_loop_port_cit */ 971 + 972 + static int tcm_loop_port_link( 973 + struct se_portal_group *se_tpg, 974 + struct se_lun *lun) 975 + { 976 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 977 + struct tcm_loop_tpg, tl_se_tpg); 978 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 979 + 980 + atomic_inc(&tl_tpg->tl_tpg_port_count); 981 + smp_mb__after_atomic_inc(); 982 + /* 983 + * Add Linux/SCSI struct scsi_device by HCTL 984 + */ 985 + scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 986 + 987 + printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); 988 + return 0; 989 + } 990 + 991 + static void tcm_loop_port_unlink( 992 + struct se_portal_group *se_tpg, 993 + struct se_lun *se_lun) 994 + { 995 + struct scsi_device *sd; 996 + struct tcm_loop_hba *tl_hba; 997 + struct tcm_loop_tpg *tl_tpg; 998 + 999 + tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 1000 + tl_hba = tl_tpg->tl_hba; 1001 + 1002 + sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 1003 + se_lun->unpacked_lun); 1004 + if (!sd) { 1005 + printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" 1006 + "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 1007 + return; 1008 + } 1009 + /* 1010 + * Remove Linux/SCSI struct scsi_device by HCTL 1011 + */ 1012 + scsi_remove_device(sd); 1013 + scsi_device_put(sd); 1014 + 1015 + atomic_dec(&tl_tpg->tl_tpg_port_count); 1016 + smp_mb__after_atomic_dec(); 1017 + 1018 + printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); 1019 + } 1020 + 1021 + /* End items for tcm_loop_port_cit */ 1022 + 1023 + /* Start items for tcm_loop_nexus_cit */ 1024 + 1025 + static int tcm_loop_make_nexus( 1026 + struct tcm_loop_tpg *tl_tpg, 1027 + const char *name) 1028 + { 1029 + struct se_portal_group *se_tpg; 1030 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 1031 + struct tcm_loop_nexus *tl_nexus; 1032 + 1033 + if (tl_tpg->tl_hba->tl_nexus) { 1034 + printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); 1035 + return -EEXIST; 1036 + } 1037 + se_tpg = &tl_tpg->tl_se_tpg; 1038 + 1039 + tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 1040 + if (!tl_nexus) { 1041 + printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); 1042 + return -ENOMEM; 1043 + } 1044 + /* 1045 + * Initialize the struct se_session pointer 1046 + */ 1047 + tl_nexus->se_sess = transport_init_session(); 1048 + if (!tl_nexus->se_sess) 1049 + goto out; 1050 + /* 1051 + * Since we are running in 'demo mode' this call with generate a 1052 + * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI 1053 + * Initiator port name of the passed configfs group 'name'. 1054 + */ 1055 + tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1056 + se_tpg, (unsigned char *)name); 1057 + if (!tl_nexus->se_sess->se_node_acl) { 1058 + transport_free_session(tl_nexus->se_sess); 1059 + goto out; 1060 + } 1061 + /* 1062 + * Now, register the SAS I_T Nexus as active with the call to 1063 + * transport_register_session() 1064 + */ 1065 + __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 1066 + tl_nexus->se_sess, (void *)tl_nexus); 1067 + tl_tpg->tl_hba->tl_nexus = tl_nexus; 1068 + printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1069 + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1070 + name); 1071 + return 0; 1072 + 1073 + out: 1074 + kfree(tl_nexus); 1075 + return -ENOMEM; 1076 + } 1077 + 1078 + static int tcm_loop_drop_nexus( 1079 + struct tcm_loop_tpg *tpg) 1080 + { 1081 + struct se_session *se_sess; 1082 + struct tcm_loop_nexus *tl_nexus; 1083 + struct tcm_loop_hba *tl_hba = tpg->tl_hba; 1084 + 1085 + tl_nexus = tpg->tl_hba->tl_nexus; 1086 + if (!tl_nexus) 1087 + return -ENODEV; 1088 + 1089 + se_sess = tl_nexus->se_sess; 1090 + if (!se_sess) 1091 + return -ENODEV; 1092 + 1093 + if (atomic_read(&tpg->tl_tpg_port_count)) { 1094 + printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" 1095 + " active TPG port count: %d\n", 1096 + atomic_read(&tpg->tl_tpg_port_count)); 1097 + return -EPERM; 1098 + } 1099 + 1100 + printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 1101 + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1102 + tl_nexus->se_sess->se_node_acl->initiatorname); 1103 + /* 1104 + * Release the SCSI I_T Nexus to the emulated SAS Target Port 1105 + */ 1106 + transport_deregister_session(tl_nexus->se_sess); 1107 + tpg->tl_hba->tl_nexus = NULL; 1108 + kfree(tl_nexus); 1109 + return 0; 1110 + } 1111 + 1112 + /* End items for tcm_loop_nexus_cit */ 1113 + 1114 + static ssize_t tcm_loop_tpg_show_nexus( 1115 + struct se_portal_group *se_tpg, 1116 + char *page) 1117 + { 1118 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1119 + struct tcm_loop_tpg, tl_se_tpg); 1120 + struct tcm_loop_nexus *tl_nexus; 1121 + ssize_t ret; 1122 + 1123 + tl_nexus = tl_tpg->tl_hba->tl_nexus; 1124 + if (!tl_nexus) 1125 + return -ENODEV; 1126 + 1127 + ret = snprintf(page, PAGE_SIZE, "%s\n", 1128 + tl_nexus->se_sess->se_node_acl->initiatorname); 1129 + 1130 + return ret; 1131 + } 1132 + 1133 + static ssize_t tcm_loop_tpg_store_nexus( 1134 + struct se_portal_group *se_tpg, 1135 + const char *page, 1136 + size_t count) 1137 + { 1138 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1139 + struct tcm_loop_tpg, tl_se_tpg); 1140 + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 1141 + unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 1142 + int ret; 1143 + /* 1144 + * Shutdown the active I_T nexus if 'NULL' is passed.. 1145 + */ 1146 + if (!strncmp(page, "NULL", 4)) { 1147 + ret = tcm_loop_drop_nexus(tl_tpg); 1148 + return (!ret) ? count : ret; 1149 + } 1150 + /* 1151 + * Otherwise make sure the passed virtual Initiator port WWN matches 1152 + * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 1153 + * tcm_loop_make_nexus() 1154 + */ 1155 + if (strlen(page) > TL_WWN_ADDR_LEN) { 1156 + printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" 1157 + " max: %d\n", page, TL_WWN_ADDR_LEN); 1158 + return -EINVAL; 1159 + } 1160 + snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 1161 + 1162 + ptr = strstr(i_port, "naa."); 1163 + if (ptr) { 1164 + if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 1165 + printk(KERN_ERR "Passed SAS Initiator Port %s does not" 1166 + " match target port protoid: %s\n", i_port, 1167 + tcm_loop_dump_proto_id(tl_hba)); 1168 + return -EINVAL; 1169 + } 1170 + port_ptr = &i_port[0]; 1171 + goto check_newline; 1172 + } 1173 + ptr = strstr(i_port, "fc."); 1174 + if (ptr) { 1175 + if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 1176 + printk(KERN_ERR "Passed FCP Initiator Port %s does not" 1177 + " match target port protoid: %s\n", i_port, 1178 + tcm_loop_dump_proto_id(tl_hba)); 1179 + return -EINVAL; 1180 + } 1181 + port_ptr = &i_port[3]; /* Skip over "fc." */ 1182 + goto check_newline; 1183 + } 1184 + ptr = strstr(i_port, "iqn."); 1185 + if (ptr) { 1186 + if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 1187 + printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" 1188 + " match target port protoid: %s\n", i_port, 1189 + tcm_loop_dump_proto_id(tl_hba)); 1190 + return -EINVAL; 1191 + } 1192 + port_ptr = &i_port[0]; 1193 + goto check_newline; 1194 + } 1195 + printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" 1196 + " %s\n", i_port); 1197 + return -EINVAL; 1198 + /* 1199 + * Clear any trailing newline for the NAA WWN 1200 + */ 1201 + check_newline: 1202 + if (i_port[strlen(i_port)-1] == '\n') 1203 + i_port[strlen(i_port)-1] = '\0'; 1204 + 1205 + ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 1206 + if (ret < 0) 1207 + return ret; 1208 + 1209 + return count; 1210 + } 1211 + 1212 + TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); 1213 + 1214 + static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1215 + &tcm_loop_tpg_nexus.attr, 1216 + NULL, 1217 + }; 1218 + 1219 + /* Start items for tcm_loop_naa_cit */ 1220 + 1221 + struct se_portal_group *tcm_loop_make_naa_tpg( 1222 + struct se_wwn *wwn, 1223 + struct config_group *group, 1224 + const char *name) 1225 + { 1226 + struct tcm_loop_hba *tl_hba = container_of(wwn, 1227 + struct tcm_loop_hba, tl_hba_wwn); 1228 + struct tcm_loop_tpg *tl_tpg; 1229 + char *tpgt_str, *end_ptr; 1230 + int ret; 1231 + unsigned short int tpgt; 1232 + 1233 + tpgt_str = strstr(name, "tpgt_"); 1234 + if (!tpgt_str) { 1235 + printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" 1236 + " group\n"); 1237 + return ERR_PTR(-EINVAL); 1238 + } 1239 + tpgt_str += 5; /* Skip ahead of "tpgt_" */ 1240 + tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); 1241 + 1242 + if (tpgt > TL_TPGS_PER_HBA) { 1243 + printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" 1244 + " %u\n", tpgt, TL_TPGS_PER_HBA); 1245 + return ERR_PTR(-EINVAL); 1246 + } 1247 + tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1248 + tl_tpg->tl_hba = tl_hba; 1249 + tl_tpg->tl_tpgt = tpgt; 1250 + /* 1251 + * Register the tl_tpg as a emulated SAS TCM Target Endpoint 1252 + */ 1253 + ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, 1254 + wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, 1255 + TRANSPORT_TPG_TYPE_NORMAL); 1256 + if (ret < 0) 1257 + return ERR_PTR(-ENOMEM); 1258 + 1259 + printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" 1260 + " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1261 + config_item_name(&wwn->wwn_group.cg_item), tpgt); 1262 + 1263 + return &tl_tpg->tl_se_tpg; 1264 + } 1265 + 1266 + void tcm_loop_drop_naa_tpg( 1267 + struct se_portal_group *se_tpg) 1268 + { 1269 + struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1270 + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1271 + struct tcm_loop_tpg, tl_se_tpg); 1272 + struct tcm_loop_hba *tl_hba; 1273 + unsigned short tpgt; 1274 + 1275 + tl_hba = tl_tpg->tl_hba; 1276 + tpgt = tl_tpg->tl_tpgt; 1277 + /* 1278 + * Release the I_T Nexus for the Virtual SAS link if present 1279 + */ 1280 + tcm_loop_drop_nexus(tl_tpg); 1281 + /* 1282 + * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint 1283 + */ 1284 + core_tpg_deregister(se_tpg); 1285 + 1286 + printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" 1287 + " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1288 + config_item_name(&wwn->wwn_group.cg_item), tpgt); 1289 + } 1290 + 1291 + /* End items for tcm_loop_naa_cit */ 1292 + 1293 + /* Start items for tcm_loop_cit */ 1294 + 1295 + struct se_wwn *tcm_loop_make_scsi_hba( 1296 + struct target_fabric_configfs *tf, 1297 + struct config_group *group, 1298 + const char *name) 1299 + { 1300 + struct tcm_loop_hba *tl_hba; 1301 + struct Scsi_Host *sh; 1302 + char *ptr; 1303 + int ret, off = 0; 1304 + 1305 + tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1306 + if (!tl_hba) { 1307 + printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); 1308 + return ERR_PTR(-ENOMEM); 1309 + } 1310 + /* 1311 + * Determine the emulated Protocol Identifier and Target Port Name 1312 + * based on the incoming configfs directory name. 1313 + */ 1314 + ptr = strstr(name, "naa."); 1315 + if (ptr) { 1316 + tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1317 + goto check_len; 1318 + } 1319 + ptr = strstr(name, "fc."); 1320 + if (ptr) { 1321 + tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1322 + off = 3; /* Skip over "fc." */ 1323 + goto check_len; 1324 + } 1325 + ptr = strstr(name, "iqn."); 1326 + if (ptr) { 1327 + tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1328 + goto check_len; 1329 + } 1330 + 1331 + printk(KERN_ERR "Unable to locate prefix for emulated Target Port:" 1332 + " %s\n", name); 1333 + return ERR_PTR(-EINVAL); 1334 + 1335 + check_len: 1336 + if (strlen(name) > TL_WWN_ADDR_LEN) { 1337 + printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" 1338 + " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1339 + TL_WWN_ADDR_LEN); 1340 + kfree(tl_hba); 1341 + return ERR_PTR(-EINVAL); 1342 + } 1343 + snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1344 + 1345 + /* 1346 + * Call device_register(tl_hba->dev) to register the emulated 1347 + * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1348 + * device_register() callbacks in tcm_loop_driver_probe() 1349 + */ 1350 + ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1351 + if (ret) 1352 + goto out; 1353 + 1354 + sh = tl_hba->sh; 1355 + tcm_loop_hba_no_cnt++; 1356 + printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" 1357 + " %s Address: %s at Linux/SCSI Host ID: %d\n", 1358 + tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1359 + 1360 + return &tl_hba->tl_hba_wwn; 1361 + out: 1362 + kfree(tl_hba); 1363 + return ERR_PTR(ret); 1364 + } 1365 + 1366 + void tcm_loop_drop_scsi_hba( 1367 + struct se_wwn *wwn) 1368 + { 1369 + struct tcm_loop_hba *tl_hba = container_of(wwn, 1370 + struct tcm_loop_hba, tl_hba_wwn); 1371 + int host_no = tl_hba->sh->host_no; 1372 + /* 1373 + * Call device_unregister() on the original tl_hba->dev. 1374 + * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1375 + * release *tl_hba; 1376 + */ 1377 + device_unregister(&tl_hba->dev); 1378 + 1379 + printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" 1380 + " SAS Address: %s at Linux/SCSI Host ID: %d\n", 1381 + config_item_name(&wwn->wwn_group.cg_item), host_no); 1382 + } 1383 + 1384 + /* Start items for tcm_loop_cit */ 1385 + static ssize_t tcm_loop_wwn_show_attr_version( 1386 + struct target_fabric_configfs *tf, 1387 + char *page) 1388 + { 1389 + return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1390 + } 1391 + 1392 + TF_WWN_ATTR_RO(tcm_loop, version); 1393 + 1394 + static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1395 + &tcm_loop_wwn_version.attr, 1396 + NULL, 1397 + }; 1398 + 1399 + /* End items for tcm_loop_cit */ 1400 + 1401 + static int tcm_loop_register_configfs(void) 1402 + { 1403 + struct target_fabric_configfs *fabric; 1404 + struct config_group *tf_cg; 1405 + int ret; 1406 + /* 1407 + * Set the TCM Loop HBA counter to zero 1408 + */ 1409 + tcm_loop_hba_no_cnt = 0; 1410 + /* 1411 + * Register the top level struct config_item_type with TCM core 1412 + */ 1413 + fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); 1414 + if (!fabric) { 1415 + printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); 1416 + return -1; 1417 + } 1418 + /* 1419 + * Setup the fabric API of function pointers used by target_core_mod 1420 + */ 1421 + fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; 1422 + fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; 1423 + fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; 1424 + fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; 1425 + fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; 1426 + fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; 1427 + fabric->tf_ops.tpg_get_pr_transport_id_len = 1428 + &tcm_loop_get_pr_transport_id_len; 1429 + fabric->tf_ops.tpg_parse_pr_out_transport_id = 1430 + &tcm_loop_parse_pr_out_transport_id; 1431 + fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; 1432 + fabric->tf_ops.tpg_check_demo_mode_cache = 1433 + &tcm_loop_check_demo_mode_cache; 1434 + fabric->tf_ops.tpg_check_demo_mode_write_protect = 1435 + &tcm_loop_check_demo_mode_write_protect; 1436 + fabric->tf_ops.tpg_check_prod_mode_write_protect = 1437 + &tcm_loop_check_prod_mode_write_protect; 1438 + /* 1439 + * The TCM loopback fabric module runs in demo-mode to a local 1440 + * virtual SCSI device, so fabric dependent initator ACLs are 1441 + * not required. 1442 + */ 1443 + fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; 1444 + fabric->tf_ops.tpg_release_fabric_acl = 1445 + &tcm_loop_tpg_release_fabric_acl; 1446 + fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; 1447 + /* 1448 + * Since tcm_loop is mapping physical memory from Linux/SCSI 1449 + * struct scatterlist arrays for each struct scsi_cmnd I/O, 1450 + * we do not need TCM to allocate a iovec array for 1451 + * virtual memory address mappings 1452 + */ 1453 + fabric->tf_ops.alloc_cmd_iovecs = NULL; 1454 + /* 1455 + * Used for setting up remaining TCM resources in process context 1456 + */ 1457 + fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; 1458 + fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; 1459 + fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd; 1460 + fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd; 1461 + fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; 1462 + fabric->tf_ops.close_session = &tcm_loop_close_session; 1463 + fabric->tf_ops.stop_session = &tcm_loop_stop_session; 1464 + fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0; 1465 + fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in; 1466 + fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; 1467 + fabric->tf_ops.sess_get_initiator_sid = NULL; 1468 + fabric->tf_ops.write_pending = &tcm_loop_write_pending; 1469 + fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; 1470 + /* 1471 + * Not used for TCM loopback 1472 + */ 1473 + fabric->tf_ops.set_default_node_attributes = 1474 + &tcm_loop_set_default_node_attributes; 1475 + fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; 1476 + fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; 1477 + fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure; 1478 + fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; 1479 + fabric->tf_ops.queue_status = &tcm_loop_queue_status; 1480 + fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; 1481 + fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; 1482 + fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; 1483 + fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; 1484 + fabric->tf_ops.pack_lun = &tcm_loop_pack_lun; 1485 + 1486 + tf_cg = &fabric->tf_group; 1487 + /* 1488 + * Setup function pointers for generic logic in target_core_fabric_configfs.c 1489 + */ 1490 + fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; 1491 + fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; 1492 + fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; 1493 + fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; 1494 + /* 1495 + * fabric_post_link() and fabric_pre_unlink() are used for 1496 + * registration and release of TCM Loop Virtual SCSI LUNs. 1497 + */ 1498 + fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; 1499 + fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; 1500 + fabric->tf_ops.fabric_make_np = NULL; 1501 + fabric->tf_ops.fabric_drop_np = NULL; 1502 + /* 1503 + * Setup default attribute lists for various fabric->tf_cit_tmpl 1504 + */ 1505 + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; 1506 + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; 1507 + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1508 + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1509 + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1510 + /* 1511 + * Once fabric->tf_ops has been setup, now register the fabric for 1512 + * use within TCM 1513 + */ 1514 + ret = target_fabric_configfs_register(fabric); 1515 + if (ret < 0) { 1516 + printk(KERN_ERR "target_fabric_configfs_register() for" 1517 + " TCM_Loop failed!\n"); 1518 + target_fabric_configfs_free(fabric); 1519 + return -1; 1520 + } 1521 + /* 1522 + * Setup our local pointer to *fabric. 1523 + */ 1524 + tcm_loop_fabric_configfs = fabric; 1525 + printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" 1526 + " tcm_loop_fabric_configfs\n"); 1527 + return 0; 1528 + } 1529 + 1530 + static void tcm_loop_deregister_configfs(void) 1531 + { 1532 + if (!tcm_loop_fabric_configfs) 1533 + return; 1534 + 1535 + target_fabric_configfs_deregister(tcm_loop_fabric_configfs); 1536 + tcm_loop_fabric_configfs = NULL; 1537 + printk(KERN_INFO "TCM_LOOP[0] - Cleared" 1538 + " tcm_loop_fabric_configfs\n"); 1539 + } 1540 + 1541 + static int __init tcm_loop_fabric_init(void) 1542 + { 1543 + int ret; 1544 + 1545 + tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1546 + sizeof(struct tcm_loop_cmd), 1547 + __alignof__(struct tcm_loop_cmd), 1548 + 0, NULL); 1549 + if (!tcm_loop_cmd_cache) { 1550 + printk(KERN_ERR "kmem_cache_create() for" 1551 + " tcm_loop_cmd_cache failed\n"); 1552 + return -ENOMEM; 1553 + } 1554 + 1555 + ret = tcm_loop_alloc_core_bus(); 1556 + if (ret) 1557 + return ret; 1558 + 1559 + ret = tcm_loop_register_configfs(); 1560 + if (ret) { 1561 + tcm_loop_release_core_bus(); 1562 + return ret; 1563 + } 1564 + 1565 + return 0; 1566 + } 1567 + 1568 + static void __exit tcm_loop_fabric_exit(void) 1569 + { 1570 + tcm_loop_deregister_configfs(); 1571 + tcm_loop_release_core_bus(); 1572 + kmem_cache_destroy(tcm_loop_cmd_cache); 1573 + } 1574 + 1575 + MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1576 + MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); 1577 + MODULE_LICENSE("GPL"); 1578 + module_init(tcm_loop_fabric_init); 1579 + module_exit(tcm_loop_fabric_exit);
+77
drivers/target/loopback/tcm_loop.h
··· 1 + #define TCM_LOOP_VERSION "v2.1-rc1" 2 + #define TL_WWN_ADDR_LEN 256 3 + #define TL_TPGS_PER_HBA 32 4 + /* 5 + * Defaults for struct scsi_host_template tcm_loop_driver_template 6 + * 7 + * We use large can_queue and cmd_per_lun here and let TCM enforce 8 + * the underlying se_device_t->queue_depth. 9 + */ 10 + #define TL_SCSI_CAN_QUEUE 1024 11 + #define TL_SCSI_CMD_PER_LUN 1024 12 + #define TL_SCSI_MAX_SECTORS 1024 13 + #define TL_SCSI_SG_TABLESIZE 256 14 + /* 15 + * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len 16 + */ 17 + #define TL_SCSI_MAX_CMD_LEN 32 18 + 19 + #ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG 20 + # define TL_CDB_DEBUG(x...) printk(KERN_INFO x) 21 + #else 22 + # define TL_CDB_DEBUG(x...) 23 + #endif 24 + 25 + struct tcm_loop_cmd { 26 + /* State of Linux/SCSI CDB+Data descriptor */ 27 + u32 sc_cmd_state; 28 + /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ 29 + struct scsi_cmnd *sc; 30 + struct list_head *tl_cmd_list; 31 + /* The TCM I/O descriptor that is accessed via container_of() */ 32 + struct se_cmd tl_se_cmd; 33 + /* Sense buffer that will be mapped into outgoing status */ 34 + unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; 35 + }; 36 + 37 + struct tcm_loop_tmr { 38 + atomic_t tmr_complete; 39 + wait_queue_head_t tl_tmr_wait; 40 + }; 41 + 42 + struct tcm_loop_nexus { 43 + int it_nexus_active; 44 + /* 45 + * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h 46 + */ 47 + struct scsi_host *sh; 48 + /* 49 + * Pointer to TCM session for I_T Nexus 50 + */ 51 + struct se_session *se_sess; 52 + }; 53 + 54 + struct tcm_loop_nacl { 55 + struct se_node_acl se_node_acl; 56 + }; 57 + 58 + struct tcm_loop_tpg { 59 + unsigned short tl_tpgt; 60 + atomic_t tl_tpg_port_count; 61 + struct se_portal_group tl_se_tpg; 62 + struct tcm_loop_hba *tl_hba; 63 + }; 64 + 65 + struct tcm_loop_hba { 66 + u8 tl_proto_id; 67 + unsigned char tl_wwn_address[TL_WWN_ADDR_LEN]; 68 + struct se_hba_s *se_hba; 69 + struct se_lun *tl_hba_lun; 70 + struct se_port *tl_hba_lun_sep; 71 + struct se_device_s *se_dev_hba_ptr; 72 + struct tcm_loop_nexus *tl_nexus; 73 + struct device dev; 74 + struct Scsi_Host *sh; 75 + struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; 76 + struct se_wwn tl_hba_wwn; 77 + };
+102 -15
drivers/target/target_core_configfs.c
··· 3 3 * 4 4 * This file contains ConfigFS logic for the Generic Target Engine project. 5 5 * 6 - * Copyright (c) 2008-2010 Rising Tide Systems 7 - * Copyright (c) 2008-2010 Linux-iSCSI.org 6 + * Copyright (c) 2008-2011 Rising Tide Systems 7 + * Copyright (c) 2008-2011 Linux-iSCSI.org 8 8 * 9 9 * Nicholas A. Bellinger <nab@kernel.org> 10 10 * ··· 50 50 #include "target_core_hba.h" 51 51 #include "target_core_pr.h" 52 52 #include "target_core_rd.h" 53 + #include "target_core_stat.h" 53 54 54 55 static struct list_head g_tf_list; 55 56 static struct mutex g_tf_lock; ··· 1452 1451 size_t count) 1453 1452 { 1454 1453 struct se_device *dev; 1455 - unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL; 1456 - unsigned char *isid = NULL; 1454 + unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1455 + unsigned char *t_fabric = NULL, *t_port = NULL; 1457 1456 char *orig, *ptr, *arg_p, *opts; 1458 1457 substring_t args[MAX_OPT_ARGS]; 1459 1458 unsigned long long tmp_ll; ··· 1489 1488 switch (token) { 1490 1489 case Opt_initiator_fabric: 1491 1490 i_fabric = match_strdup(&args[0]); 1491 + if (!i_fabric) { 1492 + ret = -ENOMEM; 1493 + goto out; 1494 + } 1492 1495 break; 1493 1496 case Opt_initiator_node: 1494 1497 i_port = match_strdup(&args[0]); 1498 + if (!i_port) { 1499 + ret = -ENOMEM; 1500 + goto out; 1501 + } 1495 1502 if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { 1496 1503 printk(KERN_ERR "APTPL metadata initiator_node=" 1497 1504 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", ··· 1510 1501 break; 1511 1502 case Opt_initiator_sid: 1512 1503 isid = match_strdup(&args[0]); 1504 + if (!isid) { 1505 + ret = -ENOMEM; 1506 + goto out; 1507 + } 1513 1508 if (strlen(isid) > PR_REG_ISID_LEN) { 1514 1509 printk(KERN_ERR "APTPL metadata initiator_isid" 1515 1510 "= exceeds PR_REG_ISID_LEN: %d\n", ··· 1524 1511 break; 1525 1512 case Opt_sa_res_key: 1526 1513 arg_p = match_strdup(&args[0]); 1514 + if (!arg_p) { 1515 + ret = -ENOMEM; 1516 + goto out; 1517 + } 1527 1518 ret = strict_strtoull(arg_p, 0, &tmp_ll); 1528 1519 if (ret < 0) { 1529 1520 printk(KERN_ERR "strict_strtoull() failed for" ··· 1564 1547 */ 1565 1548 case Opt_target_fabric: 1566 1549 t_fabric = match_strdup(&args[0]); 1550 + if (!t_fabric) { 1551 + ret = -ENOMEM; 1552 + goto out; 1553 + } 1567 1554 break; 1568 1555 case Opt_target_node: 1569 1556 t_port = match_strdup(&args[0]); 1557 + if (!t_port) { 1558 + ret = -ENOMEM; 1559 + goto out; 1560 + } 1570 1561 if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { 1571 1562 printk(KERN_ERR "APTPL metadata target_node=" 1572 1563 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", ··· 1617 1592 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 1618 1593 res_holder, all_tg_pt, type); 1619 1594 out: 1595 + kfree(i_fabric); 1596 + kfree(i_port); 1597 + kfree(isid); 1598 + kfree(t_fabric); 1599 + kfree(t_port); 1620 1600 kfree(orig); 1621 1601 return (ret == 0) ? count : ret; 1622 1602 } ··· 1828 1798 return -EINVAL; 1829 1799 1830 1800 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1831 - if (!(dev) || IS_ERR(dev)) 1801 + if (IS_ERR(dev)) 1802 + return PTR_ERR(dev); 1803 + else if (!dev) 1832 1804 return -EINVAL; 1833 1805 1834 1806 se_dev->se_dev_ptr = dev; ··· 2710 2678 2711 2679 /* End functions for struct config_item_type target_core_alua_cit */ 2712 2680 2681 + /* Start functions for struct config_item_type target_core_stat_cit */ 2682 + 2683 + static struct config_group *target_core_stat_mkdir( 2684 + struct config_group *group, 2685 + const char *name) 2686 + { 2687 + return ERR_PTR(-ENOSYS); 2688 + } 2689 + 2690 + static void target_core_stat_rmdir( 2691 + struct config_group *group, 2692 + struct config_item *item) 2693 + { 2694 + return; 2695 + } 2696 + 2697 + static struct configfs_group_operations target_core_stat_group_ops = { 2698 + .make_group = &target_core_stat_mkdir, 2699 + .drop_item = &target_core_stat_rmdir, 2700 + }; 2701 + 2702 + static struct config_item_type target_core_stat_cit = { 2703 + .ct_group_ops = &target_core_stat_group_ops, 2704 + .ct_owner = THIS_MODULE, 2705 + }; 2706 + 2707 + /* End functions for struct config_item_type target_core_stat_cit */ 2708 + 2713 2709 /* Start functions for struct config_item_type target_core_hba_cit */ 2714 2710 2715 2711 static struct config_group *target_core_make_subdev( ··· 2750 2690 struct config_item *hba_ci = &group->cg_item; 2751 2691 struct se_hba *hba = item_to_hba(hba_ci); 2752 2692 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; 2693 + struct config_group *dev_stat_grp = NULL; 2694 + int errno = -ENOMEM, ret; 2753 2695 2754 - if (mutex_lock_interruptible(&hba->hba_access_mutex)) 2755 - return NULL; 2756 - 2696 + ret = mutex_lock_interruptible(&hba->hba_access_mutex); 2697 + if (ret) 2698 + return ERR_PTR(ret); 2757 2699 /* 2758 2700 * Locate the struct se_subsystem_api from parent's struct se_hba. 2759 2701 */ ··· 2785 2723 se_dev->se_dev_hba = hba; 2786 2724 dev_cg = &se_dev->se_dev_group; 2787 2725 2788 - dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, 2726 + dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2789 2727 GFP_KERNEL); 2790 2728 if (!(dev_cg->default_groups)) 2791 2729 goto out; ··· 2817 2755 &target_core_dev_wwn_cit); 2818 2756 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, 2819 2757 "alua", &target_core_alua_tg_pt_gps_cit); 2758 + config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, 2759 + "statistics", &target_core_stat_cit); 2760 + 2820 2761 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; 2821 2762 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; 2822 2763 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; 2823 2764 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; 2824 - dev_cg->default_groups[4] = NULL; 2765 + dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; 2766 + dev_cg->default_groups[5] = NULL; 2825 2767 /* 2826 - * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp 2768 + * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2827 2769 */ 2828 2770 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2829 2771 if (!(tg_pt_gp)) ··· 2847 2781 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; 2848 2782 tg_pt_gp_cg->default_groups[1] = NULL; 2849 2783 T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; 2784 + /* 2785 + * Add core/$HBA/$DEV/statistics/ default groups 2786 + */ 2787 + dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; 2788 + dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2789 + GFP_KERNEL); 2790 + if (!dev_stat_grp->default_groups) { 2791 + printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); 2792 + goto out; 2793 + } 2794 + target_stat_setup_dev_default_groups(se_dev); 2850 2795 2851 2796 printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" 2852 2797 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); ··· 2869 2792 core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); 2870 2793 T10_ALUA(se_dev)->default_tg_pt_gp = NULL; 2871 2794 } 2795 + if (dev_stat_grp) 2796 + kfree(dev_stat_grp->default_groups); 2872 2797 if (tg_pt_gp_cg) 2873 2798 kfree(tg_pt_gp_cg->default_groups); 2874 2799 if (dev_cg) ··· 2880 2801 kfree(se_dev); 2881 2802 unlock: 2882 2803 mutex_unlock(&hba->hba_access_mutex); 2883 - return NULL; 2804 + return ERR_PTR(errno); 2884 2805 } 2885 2806 2886 2807 static void target_core_drop_subdev( ··· 2892 2813 struct se_hba *hba; 2893 2814 struct se_subsystem_api *t; 2894 2815 struct config_item *df_item; 2895 - struct config_group *dev_cg, *tg_pt_gp_cg; 2816 + struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; 2896 2817 int i; 2897 2818 2898 2819 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); ··· 2903 2824 spin_lock(&se_global->g_device_lock); 2904 2825 list_del(&se_dev->g_se_dev_list); 2905 2826 spin_unlock(&se_global->g_device_lock); 2827 + 2828 + dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; 2829 + for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2830 + df_item = &dev_stat_grp->default_groups[i]->cg_item; 2831 + dev_stat_grp->default_groups[i] = NULL; 2832 + config_item_put(df_item); 2833 + } 2834 + kfree(dev_stat_grp->default_groups); 2906 2835 2907 2836 tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; 2908 2837 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { ··· 3131 3044 3132 3045 /* Stop functions for struct config_item_type target_core_hba_cit */ 3133 3046 3134 - static int target_core_init_configfs(void) 3047 + static int __init target_core_init_configfs(void) 3135 3048 { 3136 3049 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 3137 3050 struct config_group *lu_gp_cg = NULL; ··· 3263 3176 return -1; 3264 3177 } 3265 3178 3266 - static void target_core_exit_configfs(void) 3179 + static void __exit target_core_exit_configfs(void) 3267 3180 { 3268 3181 struct configfs_subsystem *subsys; 3269 3182 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+3 -37
drivers/target/target_core_device.c
··· 589 589 * Called with struct se_device->se_port_lock spinlock held. 590 590 */ 591 591 static void core_release_port(struct se_device *dev, struct se_port *port) 592 + __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 592 593 { 593 594 /* 594 595 * Wait for any port reference for PR ALL_TG_PT=1 operation ··· 780 779 return; 781 780 } 782 781 783 - /* 784 - * Called with struct se_hba->device_lock held. 785 - */ 786 - void se_clear_dev_ports(struct se_device *dev) 787 - { 788 - struct se_hba *hba = dev->se_hba; 789 - struct se_lun *lun; 790 - struct se_portal_group *tpg; 791 - struct se_port *sep, *sep_tmp; 792 - 793 - spin_lock(&dev->se_port_lock); 794 - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { 795 - spin_unlock(&dev->se_port_lock); 796 - spin_unlock(&hba->device_lock); 797 - 798 - lun = sep->sep_lun; 799 - tpg = sep->sep_tpg; 800 - spin_lock(&lun->lun_sep_lock); 801 - if (lun->lun_se_dev == NULL) { 802 - spin_unlock(&lun->lun_sep_lock); 803 - continue; 804 - } 805 - spin_unlock(&lun->lun_sep_lock); 806 - 807 - core_dev_del_lun(tpg, lun->unpacked_lun); 808 - 809 - spin_lock(&hba->device_lock); 810 - spin_lock(&dev->se_port_lock); 811 - } 812 - spin_unlock(&dev->se_port_lock); 813 - 814 - return; 815 - } 816 - 817 782 /* se_free_virtual_device(): 818 783 * 819 784 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. 820 785 */ 821 786 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) 822 787 { 823 - spin_lock(&hba->device_lock); 824 - se_clear_dev_ports(dev); 825 - spin_unlock(&hba->device_lock); 788 + if (!list_empty(&dev->dev_sep_list)) 789 + dump_stack(); 826 790 827 791 core_alua_free_lu_gp_mem(dev); 828 792 se_release_device_for_hba(dev);
+204 -5
drivers/target/target_core_fabric_configfs.c
··· 4 4 * This file contains generic fabric module configfs infrastructure for 5 5 * TCM v4.x code 6 6 * 7 - * Copyright (c) 2010 Rising Tide Systems 8 - * Copyright (c) 2010 Linux-iSCSI.org 7 + * Copyright (c) 2010,2011 Rising Tide Systems 8 + * Copyright (c) 2010,2011 Linux-iSCSI.org 9 9 * 10 - * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org> 10 + * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org> 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify 13 13 * it under the terms of the GNU General Public License as published by ··· 48 48 #include "target_core_alua.h" 49 49 #include "target_core_hba.h" 50 50 #include "target_core_pr.h" 51 + #include "target_core_stat.h" 51 52 52 53 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ 53 54 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ ··· 242 241 243 242 /* End of tfc_tpg_mappedlun_cit */ 244 243 244 + /* Start of tfc_tpg_mappedlun_port_cit */ 245 + 246 + static struct config_group *target_core_mappedlun_stat_mkdir( 247 + struct config_group *group, 248 + const char *name) 249 + { 250 + return ERR_PTR(-ENOSYS); 251 + } 252 + 253 + static void target_core_mappedlun_stat_rmdir( 254 + struct config_group *group, 255 + struct config_item *item) 256 + { 257 + return; 258 + } 259 + 260 + static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { 261 + .make_group = target_core_mappedlun_stat_mkdir, 262 + .drop_item = target_core_mappedlun_stat_rmdir, 263 + }; 264 + 265 + TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops, 266 + NULL); 267 + 268 + /* End of tfc_tpg_mappedlun_port_cit */ 269 + 245 270 /* Start of tfc_tpg_nacl_attrib_cit */ 246 271 247 272 CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); ··· 321 294 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 322 295 struct se_lun_acl *lacl; 323 296 struct config_item *acl_ci; 297 + struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 324 298 char *buf; 325 299 unsigned long mapped_lun; 326 300 int ret = 0; ··· 358 330 359 331 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, 360 332 config_item_name(acl_ci), &ret); 361 - if (!(lacl)) 333 + if (!(lacl)) { 334 + ret = -EINVAL; 362 335 goto out; 336 + } 337 + 338 + lacl_cg = &lacl->se_lun_group; 339 + lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 340 + GFP_KERNEL); 341 + if (!lacl_cg->default_groups) { 342 + printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); 343 + ret = -ENOMEM; 344 + goto out; 345 + } 363 346 364 347 config_group_init_type_name(&lacl->se_lun_group, name, 365 348 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); 349 + config_group_init_type_name(&lacl->ml_stat_grps.stat_group, 350 + "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); 351 + lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; 352 + lacl_cg->default_groups[1] = NULL; 353 + 354 + ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; 355 + ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 356 + GFP_KERNEL); 357 + if (!ml_stat_grp->default_groups) { 358 + printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); 359 + ret = -ENOMEM; 360 + goto out; 361 + } 362 + target_stat_setup_mappedlun_default_groups(lacl); 366 363 367 364 kfree(buf); 368 365 return &lacl->se_lun_group; 369 366 out: 367 + if (lacl_cg) 368 + kfree(lacl_cg->default_groups); 370 369 kfree(buf); 371 370 return ERR_PTR(ret); 372 371 } ··· 402 347 struct config_group *group, 403 348 struct config_item *item) 404 349 { 350 + struct se_lun_acl *lacl = container_of(to_config_group(item), 351 + struct se_lun_acl, se_lun_group); 352 + struct config_item *df_item; 353 + struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 354 + int i; 355 + 356 + ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; 357 + for (i = 0; ml_stat_grp->default_groups[i]; i++) { 358 + df_item = &ml_stat_grp->default_groups[i]->cg_item; 359 + ml_stat_grp->default_groups[i] = NULL; 360 + config_item_put(df_item); 361 + } 362 + kfree(ml_stat_grp->default_groups); 363 + 364 + lacl_cg = &lacl->se_lun_group; 365 + for (i = 0; lacl_cg->default_groups[i]; i++) { 366 + df_item = &lacl_cg->default_groups[i]->cg_item; 367 + lacl_cg->default_groups[i] = NULL; 368 + config_item_put(df_item); 369 + } 370 + kfree(lacl_cg->default_groups); 371 + 405 372 config_item_put(item); 406 373 } 407 374 ··· 453 376 454 377 /* End of tfc_tpg_nacl_base_cit */ 455 378 379 + /* Start of tfc_node_fabric_stats_cit */ 380 + /* 381 + * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group 382 + * to allow fabrics access to ->acl_fabric_stat_group->default_groups[] 383 + */ 384 + TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL); 385 + 386 + /* End of tfc_wwn_fabric_stats_cit */ 387 + 456 388 /* Start of tfc_tpg_nacl_cit */ 457 389 458 390 static struct config_group *target_fabric_make_nodeacl( ··· 488 402 nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; 489 403 nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; 490 404 nacl_cg->default_groups[2] = &se_nacl->acl_param_group; 491 - nacl_cg->default_groups[3] = NULL; 405 + nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group; 406 + nacl_cg->default_groups[4] = NULL; 492 407 493 408 config_group_init_type_name(&se_nacl->acl_group, name, 494 409 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); ··· 499 412 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); 500 413 config_group_init_type_name(&se_nacl->acl_param_group, "param", 501 414 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); 415 + config_group_init_type_name(&se_nacl->acl_fabric_stat_group, 416 + "fabric_statistics", 417 + &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); 502 418 503 419 return &se_nacl->acl_group; 504 420 } ··· 848 758 849 759 /* End of tfc_tpg_port_cit */ 850 760 761 + /* Start of tfc_tpg_port_stat_cit */ 762 + 763 + static struct config_group *target_core_port_stat_mkdir( 764 + struct config_group *group, 765 + const char *name) 766 + { 767 + return ERR_PTR(-ENOSYS); 768 + } 769 + 770 + static void target_core_port_stat_rmdir( 771 + struct config_group *group, 772 + struct config_item *item) 773 + { 774 + return; 775 + } 776 + 777 + static struct configfs_group_operations target_fabric_port_stat_group_ops = { 778 + .make_group = target_core_port_stat_mkdir, 779 + .drop_item = target_core_port_stat_rmdir, 780 + }; 781 + 782 + TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL); 783 + 784 + /* End of tfc_tpg_port_stat_cit */ 785 + 851 786 /* Start of tfc_tpg_lun_cit */ 852 787 853 788 static struct config_group *target_fabric_make_lun( ··· 883 768 struct se_portal_group *se_tpg = container_of(group, 884 769 struct se_portal_group, tpg_lun_group); 885 770 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 771 + struct config_group *lun_cg = NULL, *port_stat_grp = NULL; 886 772 unsigned long unpacked_lun; 773 + int errno; 887 774 888 775 if (strstr(name, "lun_") != name) { 889 776 printk(KERN_ERR "Unable to locate \'_\" in" ··· 899 782 if (!(lun)) 900 783 return ERR_PTR(-EINVAL); 901 784 785 + lun_cg = &lun->lun_group; 786 + lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 787 + GFP_KERNEL); 788 + if (!lun_cg->default_groups) { 789 + printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); 790 + return ERR_PTR(-ENOMEM); 791 + } 792 + 902 793 config_group_init_type_name(&lun->lun_group, name, 903 794 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); 795 + config_group_init_type_name(&lun->port_stat_grps.stat_group, 796 + "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); 797 + lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; 798 + lun_cg->default_groups[1] = NULL; 799 + 800 + port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; 801 + port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 802 + GFP_KERNEL); 803 + if (!port_stat_grp->default_groups) { 804 + printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); 805 + errno = -ENOMEM; 806 + goto out; 807 + } 808 + target_stat_setup_port_default_groups(lun); 904 809 905 810 return &lun->lun_group; 811 + out: 812 + if (lun_cg) 813 + kfree(lun_cg->default_groups); 814 + return ERR_PTR(errno); 906 815 } 907 816 908 817 static void target_fabric_drop_lun( 909 818 struct config_group *group, 910 819 struct config_item *item) 911 820 { 821 + struct se_lun *lun = container_of(to_config_group(item), 822 + struct se_lun, lun_group); 823 + struct config_item *df_item; 824 + struct config_group *lun_cg, *port_stat_grp; 825 + int i; 826 + 827 + port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; 828 + for (i = 0; port_stat_grp->default_groups[i]; i++) { 829 + df_item = &port_stat_grp->default_groups[i]->cg_item; 830 + port_stat_grp->default_groups[i] = NULL; 831 + config_item_put(df_item); 832 + } 833 + kfree(port_stat_grp->default_groups); 834 + 835 + lun_cg = &lun->lun_group; 836 + for (i = 0; lun_cg->default_groups[i]; i++) { 837 + df_item = &lun_cg->default_groups[i]->cg_item; 838 + lun_cg->default_groups[i] = NULL; 839 + config_item_put(df_item); 840 + } 841 + kfree(lun_cg->default_groups); 842 + 912 843 config_item_put(item); 913 844 } 914 845 ··· 1111 946 1112 947 /* End of tfc_tpg_cit */ 1113 948 949 + /* Start of tfc_wwn_fabric_stats_cit */ 950 + /* 951 + * This is used as a placeholder for struct se_wwn->fabric_stat_group 952 + * to allow fabrics access to ->fabric_stat_group->default_groups[] 953 + */ 954 + TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); 955 + 956 + /* End of tfc_wwn_fabric_stats_cit */ 957 + 1114 958 /* Start of tfc_wwn_cit */ 1115 959 1116 960 static struct config_group *target_fabric_make_wwn( ··· 1140 966 return ERR_PTR(-EINVAL); 1141 967 1142 968 wwn->wwn_tf = tf; 969 + /* 970 + * Setup default groups from pre-allocated wwn->wwn_default_groups 971 + */ 972 + wwn->wwn_group.default_groups = wwn->wwn_default_groups; 973 + wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; 974 + wwn->wwn_group.default_groups[1] = NULL; 975 + 1143 976 config_group_init_type_name(&wwn->wwn_group, name, 1144 977 &TF_CIT_TMPL(tf)->tfc_tpg_cit); 978 + config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", 979 + &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); 1145 980 1146 981 return &wwn->wwn_group; 1147 982 } ··· 1159 976 struct config_group *group, 1160 977 struct config_item *item) 1161 978 { 979 + struct se_wwn *wwn = container_of(to_config_group(item), 980 + struct se_wwn, wwn_group); 981 + struct config_item *df_item; 982 + struct config_group *cg = &wwn->wwn_group; 983 + int i; 984 + 985 + for (i = 0; cg->default_groups[i]; i++) { 986 + df_item = &cg->default_groups[i]->cg_item; 987 + cg->default_groups[i] = NULL; 988 + config_item_put(df_item); 989 + } 990 + 1162 991 config_item_put(item); 1163 992 } 1164 993 ··· 1210 1015 { 1211 1016 target_fabric_setup_discovery_cit(tf); 1212 1017 target_fabric_setup_wwn_cit(tf); 1018 + target_fabric_setup_wwn_fabric_stats_cit(tf); 1213 1019 target_fabric_setup_tpg_cit(tf); 1214 1020 target_fabric_setup_tpg_base_cit(tf); 1215 1021 target_fabric_setup_tpg_port_cit(tf); 1022 + target_fabric_setup_tpg_port_stat_cit(tf); 1216 1023 target_fabric_setup_tpg_lun_cit(tf); 1217 1024 target_fabric_setup_tpg_np_cit(tf); 1218 1025 target_fabric_setup_tpg_np_base_cit(tf); ··· 1225 1028 target_fabric_setup_tpg_nacl_attrib_cit(tf); 1226 1029 target_fabric_setup_tpg_nacl_auth_cit(tf); 1227 1030 target_fabric_setup_tpg_nacl_param_cit(tf); 1031 + target_fabric_setup_tpg_nacl_stat_cit(tf); 1228 1032 target_fabric_setup_tpg_mappedlun_cit(tf); 1033 + target_fabric_setup_tpg_mappedlun_stat_cit(tf); 1229 1034 1230 1035 return 0; 1231 1036 }
+1
drivers/target/target_core_fabric_lib.c
··· 34 34 #include <target/target_core_base.h> 35 35 #include <target/target_core_device.h> 36 36 #include <target/target_core_transport.h> 37 + #include <target/target_core_fabric_lib.h> 37 38 #include <target/target_core_fabric_ops.h> 38 39 #include <target/target_core_configfs.h> 39 40
+22 -6
drivers/target/target_core_file.c
··· 134 134 mm_segment_t old_fs; 135 135 struct file *file; 136 136 struct inode *inode = NULL; 137 - int dev_flags = 0, flags; 137 + int dev_flags = 0, flags, ret = -EINVAL; 138 138 139 139 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 140 140 ··· 146 146 if (IS_ERR(dev_p)) { 147 147 printk(KERN_ERR "getname(%s) failed: %lu\n", 148 148 fd_dev->fd_dev_name, IS_ERR(dev_p)); 149 + ret = PTR_ERR(dev_p); 149 150 goto fail; 150 151 } 151 152 #if 0 ··· 166 165 flags |= O_SYNC; 167 166 168 167 file = filp_open(dev_p, flags, 0600); 169 - 170 - if (IS_ERR(file) || !file || !file->f_dentry) { 168 + if (IS_ERR(file)) { 169 + printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 170 + ret = PTR_ERR(file); 171 + goto fail; 172 + } 173 + if (!file || !file->f_dentry) { 171 174 printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 172 175 goto fail; 173 176 } ··· 246 241 fd_dev->fd_file = NULL; 247 242 } 248 243 putname(dev_p); 249 - return NULL; 244 + return ERR_PTR(ret); 250 245 } 251 246 252 247 /* fd_free_device(): (Part of se_subsystem_api_t template) ··· 514 509 static match_table_t tokens = { 515 510 {Opt_fd_dev_name, "fd_dev_name=%s"}, 516 511 {Opt_fd_dev_size, "fd_dev_size=%s"}, 517 - {Opt_fd_buffered_io, "fd_buffered_id=%d"}, 512 + {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 518 513 {Opt_err, NULL} 519 514 }; 520 515 ··· 541 536 token = match_token(ptr, tokens, args); 542 537 switch (token) { 543 538 case Opt_fd_dev_name: 539 + arg_p = match_strdup(&args[0]); 540 + if (!arg_p) { 541 + ret = -ENOMEM; 542 + break; 543 + } 544 544 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 545 - "%s", match_strdup(&args[0])); 545 + "%s", arg_p); 546 + kfree(arg_p); 546 547 printk(KERN_INFO "FILEIO: Referencing Path: %s\n", 547 548 fd_dev->fd_dev_name); 548 549 fd_dev->fbd_flags |= FBDF_HAS_PATH; 549 550 break; 550 551 case Opt_fd_dev_size: 551 552 arg_p = match_strdup(&args[0]); 553 + if (!arg_p) { 554 + ret = -ENOMEM; 555 + break; 556 + } 552 557 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 558 + kfree(arg_p); 553 559 if (ret < 0) { 554 560 printk(KERN_ERR "strict_strtoull() failed for" 555 561 " fd_dev_size=\n");
+2 -13
drivers/target/target_core_hba.c
··· 151 151 int 152 152 core_delete_hba(struct se_hba *hba) 153 153 { 154 - struct se_device *dev, *dev_tmp; 155 - 156 - spin_lock(&hba->device_lock); 157 - list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) { 158 - 159 - se_clear_dev_ports(dev); 160 - spin_unlock(&hba->device_lock); 161 - 162 - se_release_device_for_hba(dev); 163 - 164 - spin_lock(&hba->device_lock); 165 - } 166 - spin_unlock(&hba->device_lock); 154 + if (!list_empty(&hba->hba_dev_list)) 155 + dump_stack(); 167 156 168 157 hba->transport->detach_hba(hba); 169 158
+17 -11
drivers/target/target_core_iblock.c
··· 129 129 struct request_queue *q; 130 130 struct queue_limits *limits; 131 131 u32 dev_flags = 0; 132 + int ret = -EINVAL; 132 133 133 134 if (!(ib_dev)) { 134 135 printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); 135 - return 0; 136 + return ERR_PTR(ret); 136 137 } 137 138 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 138 139 /* ··· 142 141 ib_dev->ibd_bio_set = bioset_create(32, 64); 143 142 if (!(ib_dev->ibd_bio_set)) { 144 143 printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); 145 - return 0; 144 + return ERR_PTR(-ENOMEM); 146 145 } 147 146 printk(KERN_INFO "IBLOCK: Created bio_set()\n"); 148 147 /* ··· 154 153 155 154 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 156 155 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 157 - if (IS_ERR(bd)) 156 + if (IS_ERR(bd)) { 157 + ret = PTR_ERR(bd); 158 158 goto failed; 159 + } 159 160 /* 160 161 * Setup the local scope queue_limits from struct request_queue->limits 161 162 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. ··· 187 184 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 188 185 * in ATA and we need to set TPE=1 189 186 */ 190 - if (blk_queue_discard(bdev_get_queue(bd))) { 191 - struct request_queue *q = bdev_get_queue(bd); 192 - 187 + if (blk_queue_discard(q)) { 193 188 DEV_ATTRIB(dev)->max_unmap_lba_count = 194 189 q->limits.max_discard_sectors; 195 190 /* ··· 213 212 ib_dev->ibd_bd = NULL; 214 213 ib_dev->ibd_major = 0; 215 214 ib_dev->ibd_minor = 0; 216 - return NULL; 215 + return ERR_PTR(ret); 217 216 } 218 217 219 218 static void iblock_free_device(void *p) ··· 468 467 const char *page, ssize_t count) 469 468 { 470 469 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 471 - char *orig, *ptr, *opts; 470 + char *orig, *ptr, *arg_p, *opts; 472 471 substring_t args[MAX_OPT_ARGS]; 473 472 int ret = 0, arg, token; 474 473 ··· 491 490 ret = -EEXIST; 492 491 goto out; 493 492 } 494 - 495 - ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 496 - "%s", match_strdup(&args[0])); 493 + arg_p = match_strdup(&args[0]); 494 + if (!arg_p) { 495 + ret = -ENOMEM; 496 + break; 497 + } 498 + snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 499 + "%s", arg_p); 500 + kfree(arg_p); 497 501 printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", 498 502 ib_dev->ibd_udev_path); 499 503 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+13 -9
drivers/target/target_core_pscsi.c
··· 441 441 struct pscsi_dev_virt *pdv, 442 442 struct se_subsystem_dev *se_dev, 443 443 struct se_hba *hba) 444 + __releases(sh->host_lock) 444 445 { 445 446 struct se_device *dev; 446 447 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; ··· 489 488 struct pscsi_dev_virt *pdv, 490 489 struct se_subsystem_dev *se_dev, 491 490 struct se_hba *hba) 491 + __releases(sh->host_lock) 492 492 { 493 493 struct se_device *dev; 494 494 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; ··· 524 522 struct pscsi_dev_virt *pdv, 525 523 struct se_subsystem_dev *se_dev, 526 524 struct se_hba *hba) 525 + __releases(sh->host_lock) 527 526 { 528 527 struct se_device *dev; 529 528 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; ··· 558 555 if (!(pdv)) { 559 556 printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" 560 557 " parameter\n"); 561 - return NULL; 558 + return ERR_PTR(-EINVAL); 562 559 } 563 560 /* 564 561 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the ··· 568 565 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 569 566 printk(KERN_ERR "pSCSI: Unable to locate struct" 570 567 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 571 - return NULL; 568 + return ERR_PTR(-ENODEV); 572 569 } 573 570 /* 574 571 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device ··· 577 574 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 578 575 printk(KERN_ERR "pSCSI: udev_path attribute has not" 579 576 " been set before ENABLE=1\n"); 580 - return NULL; 577 + return ERR_PTR(-EINVAL); 581 578 } 582 579 /* 583 580 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, ··· 590 587 printk(KERN_ERR "pSCSI: Unable to set hba_mode" 591 588 " with active devices\n"); 592 589 spin_unlock(&hba->device_lock); 593 - return NULL; 590 + return ERR_PTR(-EEXIST); 594 591 } 595 592 spin_unlock(&hba->device_lock); 596 593 597 594 if (pscsi_pmode_enable_hba(hba, 1) != 1) 598 - return NULL; 595 + return ERR_PTR(-ENODEV); 599 596 600 597 legacy_mode_enable = 1; 601 598 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; ··· 605 602 if (!(sh)) { 606 603 printk(KERN_ERR "pSCSI: Unable to locate" 607 604 " pdv_host_id: %d\n", pdv->pdv_host_id); 608 - return NULL; 605 + return ERR_PTR(-ENODEV); 609 606 } 610 607 } 611 608 } else { 612 609 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { 613 610 printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" 614 611 " struct Scsi_Host exists\n"); 615 - return NULL; 612 + return ERR_PTR(-EEXIST); 616 613 } 617 614 } 618 615 ··· 647 644 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 648 645 } 649 646 pdv->pdv_sd = NULL; 650 - return NULL; 647 + return ERR_PTR(-ENODEV); 651 648 } 652 649 return dev; 653 650 } ··· 663 660 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 664 661 } 665 662 666 - return NULL; 663 + return ERR_PTR(-ENODEV); 667 664 } 668 665 669 666 /* pscsi_free_device(): (Part of se_subsystem_api_t template) ··· 819 816 if (!(pt->pscsi_cdb)) { 820 817 printk(KERN_ERR "pSCSI: Unable to allocate extended" 821 818 " pt->pscsi_cdb\n"); 819 + kfree(pt); 822 820 return NULL; 823 821 } 824 822 } else
+8 -7
drivers/target/target_core_rd.c
··· 150 150 if (rd_dev->rd_page_count <= 0) { 151 151 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", 152 152 rd_dev->rd_page_count); 153 - return -1; 153 + return -EINVAL; 154 154 } 155 155 total_sg_needed = rd_dev->rd_page_count; 156 156 ··· 160 160 if (!(sg_table)) { 161 161 printk(KERN_ERR "Unable to allocate memory for Ramdisk" 162 162 " scatterlist tables\n"); 163 - return -1; 163 + return -ENOMEM; 164 164 } 165 165 166 166 rd_dev->sg_table_array = sg_table; ··· 175 175 if (!(sg)) { 176 176 printk(KERN_ERR "Unable to allocate scatterlist array" 177 177 " for struct rd_dev\n"); 178 - return -1; 178 + return -ENOMEM; 179 179 } 180 180 181 181 sg_init_table((struct scatterlist *)&sg[0], sg_per_table); ··· 191 191 if (!(pg)) { 192 192 printk(KERN_ERR "Unable to allocate scatterlist" 193 193 " pages for struct rd_dev_sg_table\n"); 194 - return -1; 194 + return -ENOMEM; 195 195 } 196 196 sg_assign_page(&sg[j], pg); 197 197 sg[j].length = PAGE_SIZE; ··· 253 253 struct se_dev_limits dev_limits; 254 254 struct rd_dev *rd_dev = p; 255 255 struct rd_host *rd_host = hba->hba_ptr; 256 - int dev_flags = 0; 256 + int dev_flags = 0, ret; 257 257 char prod[16], rev[4]; 258 258 259 259 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 260 260 261 - if (rd_build_device_space(rd_dev) < 0) 261 + ret = rd_build_device_space(rd_dev); 262 + if (ret < 0) 262 263 goto fail; 263 264 264 265 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); ··· 293 292 294 293 fail: 295 294 rd_release_device_space(rd_dev); 296 - return NULL; 295 + return ERR_PTR(ret); 297 296 } 298 297 299 298 static struct se_device *rd_DIRECT_create_virtdevice(
-2
drivers/target/target_core_rd.h
··· 14 14 #define RD_BLOCKSIZE 512 15 15 #define RD_MAX_SECTORS 1024 16 16 17 - extern struct kmem_cache *se_mem_cache; 18 - 19 17 /* Used in target_core_init_configfs() for virtual LUN 0 access */ 20 18 int __init rd_module_init(void); 21 19 void rd_module_exit(void);
+1810
drivers/target/target_core_stat.c
··· 1 + /******************************************************************************* 2 + * Filename: target_core_stat.c 3 + * 4 + * Copyright (c) 2011 Rising Tide Systems 5 + * Copyright (c) 2011 Linux-iSCSI.org 6 + * 7 + * Modern ConfigFS group context specific statistics based on original 8 + * target_core_mib.c code 9 + * 10 + * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. 11 + * 12 + * Nicholas A. Bellinger <nab@linux-iscsi.org> 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License as published by 16 + * the Free Software Foundation; either version 2 of the License, or 17 + * (at your option) any later version. 18 + * 19 + * This program is distributed in the hope that it will be useful, 20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 + * GNU General Public License for more details. 23 + * 24 + * You should have received a copy of the GNU General Public License 25 + * along with this program; if not, write to the Free Software 26 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 + * 28 + ******************************************************************************/ 29 + 30 + #include <linux/kernel.h> 31 + #include <linux/module.h> 32 + #include <linux/delay.h> 33 + #include <linux/timer.h> 34 + #include <linux/string.h> 35 + #include <linux/version.h> 36 + #include <generated/utsrelease.h> 37 + #include <linux/utsname.h> 38 + #include <linux/proc_fs.h> 39 + #include <linux/seq_file.h> 40 + #include <linux/blkdev.h> 41 + #include <linux/configfs.h> 42 + #include <scsi/scsi.h> 43 + #include <scsi/scsi_device.h> 44 + #include <scsi/scsi_host.h> 45 + 46 + #include <target/target_core_base.h> 47 + #include <target/target_core_transport.h> 48 + #include <target/target_core_fabric_ops.h> 49 + #include <target/target_core_configfs.h> 50 + #include <target/configfs_macros.h> 51 + 52 + #include "target_core_hba.h" 53 + 54 + #ifndef INITIAL_JIFFIES 55 + #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 56 + #endif 57 + 58 + #define NONE "None" 59 + #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 60 + 61 + #define SCSI_LU_INDEX 1 62 + #define LU_COUNT 1 63 + 64 + /* 65 + * SCSI Device Table 66 + */ 67 + 68 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps); 69 + #define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \ 70 + static struct target_stat_scsi_dev_attribute \ 71 + target_stat_scsi_dev_##_name = \ 72 + __CONFIGFS_EATTR(_name, _mode, \ 73 + target_stat_scsi_dev_show_attr_##_name, \ 74 + target_stat_scsi_dev_store_attr_##_name); 75 + 76 + #define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \ 77 + static struct target_stat_scsi_dev_attribute \ 78 + target_stat_scsi_dev_##_name = \ 79 + __CONFIGFS_EATTR_RO(_name, \ 80 + target_stat_scsi_dev_show_attr_##_name); 81 + 82 + static ssize_t target_stat_scsi_dev_show_attr_inst( 83 + struct se_dev_stat_grps *sgrps, char *page) 84 + { 85 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 86 + struct se_subsystem_dev, dev_stat_grps); 87 + struct se_hba *hba = se_subdev->se_dev_hba; 88 + struct se_device *dev = se_subdev->se_dev_ptr; 89 + 90 + if (!dev) 91 + return -ENODEV; 92 + 93 + return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 94 + } 95 + DEV_STAT_SCSI_DEV_ATTR_RO(inst); 96 + 97 + static ssize_t target_stat_scsi_dev_show_attr_indx( 98 + struct se_dev_stat_grps *sgrps, char *page) 99 + { 100 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 101 + struct se_subsystem_dev, dev_stat_grps); 102 + struct se_device *dev = se_subdev->se_dev_ptr; 103 + 104 + if (!dev) 105 + return -ENODEV; 106 + 107 + return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 108 + } 109 + DEV_STAT_SCSI_DEV_ATTR_RO(indx); 110 + 111 + static ssize_t target_stat_scsi_dev_show_attr_role( 112 + struct se_dev_stat_grps *sgrps, char *page) 113 + { 114 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 115 + struct se_subsystem_dev, dev_stat_grps); 116 + struct se_device *dev = se_subdev->se_dev_ptr; 117 + 118 + if (!dev) 119 + return -ENODEV; 120 + 121 + return snprintf(page, PAGE_SIZE, "Target\n"); 122 + } 123 + DEV_STAT_SCSI_DEV_ATTR_RO(role); 124 + 125 + static ssize_t target_stat_scsi_dev_show_attr_ports( 126 + struct se_dev_stat_grps *sgrps, char *page) 127 + { 128 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 129 + struct se_subsystem_dev, dev_stat_grps); 130 + struct se_device *dev = se_subdev->se_dev_ptr; 131 + 132 + if (!dev) 133 + return -ENODEV; 134 + 135 + return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); 136 + } 137 + DEV_STAT_SCSI_DEV_ATTR_RO(ports); 138 + 139 + CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group); 140 + 141 + static struct configfs_attribute *target_stat_scsi_dev_attrs[] = { 142 + &target_stat_scsi_dev_inst.attr, 143 + &target_stat_scsi_dev_indx.attr, 144 + &target_stat_scsi_dev_role.attr, 145 + &target_stat_scsi_dev_ports.attr, 146 + NULL, 147 + }; 148 + 149 + static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = { 150 + .show_attribute = target_stat_scsi_dev_attr_show, 151 + .store_attribute = target_stat_scsi_dev_attr_store, 152 + }; 153 + 154 + static struct config_item_type target_stat_scsi_dev_cit = { 155 + .ct_item_ops = &target_stat_scsi_dev_attrib_ops, 156 + .ct_attrs = target_stat_scsi_dev_attrs, 157 + .ct_owner = THIS_MODULE, 158 + }; 159 + 160 + /* 161 + * SCSI Target Device Table 162 + */ 163 + 164 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps); 165 + #define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \ 166 + static struct target_stat_scsi_tgt_dev_attribute \ 167 + target_stat_scsi_tgt_dev_##_name = \ 168 + __CONFIGFS_EATTR(_name, _mode, \ 169 + target_stat_scsi_tgt_dev_show_attr_##_name, \ 170 + target_stat_scsi_tgt_dev_store_attr_##_name); 171 + 172 + #define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \ 173 + static struct target_stat_scsi_tgt_dev_attribute \ 174 + target_stat_scsi_tgt_dev_##_name = \ 175 + __CONFIGFS_EATTR_RO(_name, \ 176 + target_stat_scsi_tgt_dev_show_attr_##_name); 177 + 178 + static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( 179 + struct se_dev_stat_grps *sgrps, char *page) 180 + { 181 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 182 + struct se_subsystem_dev, dev_stat_grps); 183 + struct se_hba *hba = se_subdev->se_dev_hba; 184 + struct se_device *dev = se_subdev->se_dev_ptr; 185 + 186 + if (!dev) 187 + return -ENODEV; 188 + 189 + return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 190 + } 191 + DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst); 192 + 193 + static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( 194 + struct se_dev_stat_grps *sgrps, char *page) 195 + { 196 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 197 + struct se_subsystem_dev, dev_stat_grps); 198 + struct se_device *dev = se_subdev->se_dev_ptr; 199 + 200 + if (!dev) 201 + return -ENODEV; 202 + 203 + return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 204 + } 205 + DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx); 206 + 207 + static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( 208 + struct se_dev_stat_grps *sgrps, char *page) 209 + { 210 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 211 + struct se_subsystem_dev, dev_stat_grps); 212 + struct se_device *dev = se_subdev->se_dev_ptr; 213 + 214 + if (!dev) 215 + return -ENODEV; 216 + 217 + return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); 218 + } 219 + DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); 220 + 221 + static ssize_t target_stat_scsi_tgt_dev_show_attr_status( 222 + struct se_dev_stat_grps *sgrps, char *page) 223 + { 224 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 225 + struct se_subsystem_dev, dev_stat_grps); 226 + struct se_device *dev = se_subdev->se_dev_ptr; 227 + char status[16]; 228 + 229 + if (!dev) 230 + return -ENODEV; 231 + 232 + switch (dev->dev_status) { 233 + case TRANSPORT_DEVICE_ACTIVATED: 234 + strcpy(status, "activated"); 235 + break; 236 + case TRANSPORT_DEVICE_DEACTIVATED: 237 + strcpy(status, "deactivated"); 238 + break; 239 + case TRANSPORT_DEVICE_SHUTDOWN: 240 + strcpy(status, "shutdown"); 241 + break; 242 + case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 243 + case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 244 + strcpy(status, "offline"); 245 + break; 246 + default: 247 + sprintf(status, "unknown(%d)", dev->dev_status); 248 + break; 249 + } 250 + 251 + return snprintf(page, PAGE_SIZE, "%s\n", status); 252 + } 253 + DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); 254 + 255 + static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( 256 + struct se_dev_stat_grps *sgrps, char *page) 257 + { 258 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 259 + struct se_subsystem_dev, dev_stat_grps); 260 + struct se_device *dev = se_subdev->se_dev_ptr; 261 + int non_accessible_lus; 262 + 263 + if (!dev) 264 + return -ENODEV; 265 + 266 + switch (dev->dev_status) { 267 + case TRANSPORT_DEVICE_ACTIVATED: 268 + non_accessible_lus = 0; 269 + break; 270 + case TRANSPORT_DEVICE_DEACTIVATED: 271 + case TRANSPORT_DEVICE_SHUTDOWN: 272 + case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 273 + case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 274 + default: 275 + non_accessible_lus = 1; 276 + break; 277 + } 278 + 279 + return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); 280 + } 281 + DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus); 282 + 283 + static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( 284 + struct se_dev_stat_grps *sgrps, char *page) 285 + { 286 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 287 + struct se_subsystem_dev, dev_stat_grps); 288 + struct se_device *dev = se_subdev->se_dev_ptr; 289 + 290 + if (!dev) 291 + return -ENODEV; 292 + 293 + return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 294 + } 295 + DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); 296 + 297 + 298 + CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group); 299 + 300 + static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { 301 + &target_stat_scsi_tgt_dev_inst.attr, 302 + &target_stat_scsi_tgt_dev_indx.attr, 303 + &target_stat_scsi_tgt_dev_num_lus.attr, 304 + &target_stat_scsi_tgt_dev_status.attr, 305 + &target_stat_scsi_tgt_dev_non_access_lus.attr, 306 + &target_stat_scsi_tgt_dev_resets.attr, 307 + NULL, 308 + }; 309 + 310 + static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = { 311 + .show_attribute = target_stat_scsi_tgt_dev_attr_show, 312 + .store_attribute = target_stat_scsi_tgt_dev_attr_store, 313 + }; 314 + 315 + static struct config_item_type target_stat_scsi_tgt_dev_cit = { 316 + .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops, 317 + .ct_attrs = target_stat_scsi_tgt_dev_attrs, 318 + .ct_owner = THIS_MODULE, 319 + }; 320 + 321 + /* 322 + * SCSI Logical Unit Table 323 + */ 324 + 325 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps); 326 + #define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \ 327 + static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ 328 + __CONFIGFS_EATTR(_name, _mode, \ 329 + target_stat_scsi_lu_show_attr_##_name, \ 330 + target_stat_scsi_lu_store_attr_##_name); 331 + 332 + #define DEV_STAT_SCSI_LU_ATTR_RO(_name) \ 333 + static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ 334 + __CONFIGFS_EATTR_RO(_name, \ 335 + target_stat_scsi_lu_show_attr_##_name); 336 + 337 + static ssize_t target_stat_scsi_lu_show_attr_inst( 338 + struct se_dev_stat_grps *sgrps, char *page) 339 + { 340 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 341 + struct se_subsystem_dev, dev_stat_grps); 342 + struct se_hba *hba = se_subdev->se_dev_hba; 343 + struct se_device *dev = se_subdev->se_dev_ptr; 344 + 345 + if (!dev) 346 + return -ENODEV; 347 + 348 + return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 349 + } 350 + DEV_STAT_SCSI_LU_ATTR_RO(inst); 351 + 352 + static ssize_t target_stat_scsi_lu_show_attr_dev( 353 + struct se_dev_stat_grps *sgrps, char *page) 354 + { 355 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 356 + struct se_subsystem_dev, dev_stat_grps); 357 + struct se_device *dev = se_subdev->se_dev_ptr; 358 + 359 + if (!dev) 360 + return -ENODEV; 361 + 362 + return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 363 + } 364 + DEV_STAT_SCSI_LU_ATTR_RO(dev); 365 + 366 + static ssize_t target_stat_scsi_lu_show_attr_indx( 367 + struct se_dev_stat_grps *sgrps, char *page) 368 + { 369 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 370 + struct se_subsystem_dev, dev_stat_grps); 371 + struct se_device *dev = se_subdev->se_dev_ptr; 372 + 373 + if (!dev) 374 + return -ENODEV; 375 + 376 + return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); 377 + } 378 + DEV_STAT_SCSI_LU_ATTR_RO(indx); 379 + 380 + static ssize_t target_stat_scsi_lu_show_attr_lun( 381 + struct se_dev_stat_grps *sgrps, char *page) 382 + { 383 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 384 + struct se_subsystem_dev, dev_stat_grps); 385 + struct se_device *dev = se_subdev->se_dev_ptr; 386 + 387 + if (!dev) 388 + return -ENODEV; 389 + /* FIXME: scsiLuDefaultLun */ 390 + return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); 391 + } 392 + DEV_STAT_SCSI_LU_ATTR_RO(lun); 393 + 394 + static ssize_t target_stat_scsi_lu_show_attr_lu_name( 395 + struct se_dev_stat_grps *sgrps, char *page) 396 + { 397 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 398 + struct se_subsystem_dev, dev_stat_grps); 399 + struct se_device *dev = se_subdev->se_dev_ptr; 400 + 401 + if (!dev) 402 + return -ENODEV; 403 + /* scsiLuWwnName */ 404 + return snprintf(page, PAGE_SIZE, "%s\n", 405 + (strlen(DEV_T10_WWN(dev)->unit_serial)) ? 406 + (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); 407 + } 408 + DEV_STAT_SCSI_LU_ATTR_RO(lu_name); 409 + 410 + static ssize_t target_stat_scsi_lu_show_attr_vend( 411 + struct se_dev_stat_grps *sgrps, char *page) 412 + { 413 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 414 + struct se_subsystem_dev, dev_stat_grps); 415 + struct se_device *dev = se_subdev->se_dev_ptr; 416 + int j; 417 + char str[28]; 418 + 419 + if (!dev) 420 + return -ENODEV; 421 + /* scsiLuVendorId */ 422 + memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); 423 + for (j = 0; j < 8; j++) 424 + str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? 425 + DEV_T10_WWN(dev)->vendor[j] : 0x20; 426 + str[8] = 0; 427 + return snprintf(page, PAGE_SIZE, "%s\n", str); 428 + } 429 + DEV_STAT_SCSI_LU_ATTR_RO(vend); 430 + 431 + static ssize_t target_stat_scsi_lu_show_attr_prod( 432 + struct se_dev_stat_grps *sgrps, char *page) 433 + { 434 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 435 + struct se_subsystem_dev, dev_stat_grps); 436 + struct se_device *dev = se_subdev->se_dev_ptr; 437 + int j; 438 + char str[28]; 439 + 440 + if (!dev) 441 + return -ENODEV; 442 + 443 + /* scsiLuProductId */ 444 + memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); 445 + for (j = 0; j < 16; j++) 446 + str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? 447 + DEV_T10_WWN(dev)->model[j] : 0x20; 448 + str[16] = 0; 449 + return snprintf(page, PAGE_SIZE, "%s\n", str); 450 + } 451 + DEV_STAT_SCSI_LU_ATTR_RO(prod); 452 + 453 + static ssize_t target_stat_scsi_lu_show_attr_rev( 454 + struct se_dev_stat_grps *sgrps, char *page) 455 + { 456 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 457 + struct se_subsystem_dev, dev_stat_grps); 458 + struct se_device *dev = se_subdev->se_dev_ptr; 459 + int j; 460 + char str[28]; 461 + 462 + if (!dev) 463 + return -ENODEV; 464 + 465 + /* scsiLuRevisionId */ 466 + memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); 467 + for (j = 0; j < 4; j++) 468 + str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? 469 + DEV_T10_WWN(dev)->revision[j] : 0x20; 470 + str[4] = 0; 471 + return snprintf(page, PAGE_SIZE, "%s\n", str); 472 + } 473 + DEV_STAT_SCSI_LU_ATTR_RO(rev); 474 + 475 + static ssize_t target_stat_scsi_lu_show_attr_dev_type( 476 + struct se_dev_stat_grps *sgrps, char *page) 477 + { 478 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 479 + struct se_subsystem_dev, dev_stat_grps); 480 + struct se_device *dev = se_subdev->se_dev_ptr; 481 + 482 + if (!dev) 483 + return -ENODEV; 484 + 485 + /* scsiLuPeripheralType */ 486 + return snprintf(page, PAGE_SIZE, "%u\n", 487 + TRANSPORT(dev)->get_device_type(dev)); 488 + } 489 + DEV_STAT_SCSI_LU_ATTR_RO(dev_type); 490 + 491 + static ssize_t target_stat_scsi_lu_show_attr_status( 492 + struct se_dev_stat_grps *sgrps, char *page) 493 + { 494 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 495 + struct se_subsystem_dev, dev_stat_grps); 496 + struct se_device *dev = se_subdev->se_dev_ptr; 497 + 498 + if (!dev) 499 + return -ENODEV; 500 + 501 + /* scsiLuStatus */ 502 + return snprintf(page, PAGE_SIZE, "%s\n", 503 + (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? 504 + "available" : "notavailable"); 505 + } 506 + DEV_STAT_SCSI_LU_ATTR_RO(status); 507 + 508 + static ssize_t target_stat_scsi_lu_show_attr_state_bit( 509 + struct se_dev_stat_grps *sgrps, char *page) 510 + { 511 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 512 + struct se_subsystem_dev, dev_stat_grps); 513 + struct se_device *dev = se_subdev->se_dev_ptr; 514 + 515 + if (!dev) 516 + return -ENODEV; 517 + 518 + /* scsiLuState */ 519 + return snprintf(page, PAGE_SIZE, "exposed\n"); 520 + } 521 + DEV_STAT_SCSI_LU_ATTR_RO(state_bit); 522 + 523 + static ssize_t target_stat_scsi_lu_show_attr_num_cmds( 524 + struct se_dev_stat_grps *sgrps, char *page) 525 + { 526 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 527 + struct se_subsystem_dev, dev_stat_grps); 528 + struct se_device *dev = se_subdev->se_dev_ptr; 529 + 530 + if (!dev) 531 + return -ENODEV; 532 + 533 + /* scsiLuNumCommands */ 534 + return snprintf(page, PAGE_SIZE, "%llu\n", 535 + (unsigned long long)dev->num_cmds); 536 + } 537 + DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); 538 + 539 + static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( 540 + struct se_dev_stat_grps *sgrps, char *page) 541 + { 542 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 543 + struct se_subsystem_dev, dev_stat_grps); 544 + struct se_device *dev = se_subdev->se_dev_ptr; 545 + 546 + if (!dev) 547 + return -ENODEV; 548 + 549 + /* scsiLuReadMegaBytes */ 550 + return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); 551 + } 552 + DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); 553 + 554 + static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( 555 + struct se_dev_stat_grps *sgrps, char *page) 556 + { 557 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 558 + struct se_subsystem_dev, dev_stat_grps); 559 + struct se_device *dev = se_subdev->se_dev_ptr; 560 + 561 + if (!dev) 562 + return -ENODEV; 563 + 564 + /* scsiLuWrittenMegaBytes */ 565 + return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); 566 + } 567 + DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); 568 + 569 + static ssize_t target_stat_scsi_lu_show_attr_resets( 570 + struct se_dev_stat_grps *sgrps, char *page) 571 + { 572 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 573 + struct se_subsystem_dev, dev_stat_grps); 574 + struct se_device *dev = se_subdev->se_dev_ptr; 575 + 576 + if (!dev) 577 + return -ENODEV; 578 + 579 + /* scsiLuInResets */ 580 + return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 581 + } 582 + DEV_STAT_SCSI_LU_ATTR_RO(resets); 583 + 584 + static ssize_t target_stat_scsi_lu_show_attr_full_stat( 585 + struct se_dev_stat_grps *sgrps, char *page) 586 + { 587 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 588 + struct se_subsystem_dev, dev_stat_grps); 589 + struct se_device *dev = se_subdev->se_dev_ptr; 590 + 591 + if (!dev) 592 + return -ENODEV; 593 + 594 + /* FIXME: scsiLuOutTaskSetFullStatus */ 595 + return snprintf(page, PAGE_SIZE, "%u\n", 0); 596 + } 597 + DEV_STAT_SCSI_LU_ATTR_RO(full_stat); 598 + 599 + static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( 600 + struct se_dev_stat_grps *sgrps, char *page) 601 + { 602 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 603 + struct se_subsystem_dev, dev_stat_grps); 604 + struct se_device *dev = se_subdev->se_dev_ptr; 605 + 606 + if (!dev) 607 + return -ENODEV; 608 + 609 + /* FIXME: scsiLuHSInCommands */ 610 + return snprintf(page, PAGE_SIZE, "%u\n", 0); 611 + } 612 + DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds); 613 + 614 + static ssize_t target_stat_scsi_lu_show_attr_creation_time( 615 + struct se_dev_stat_grps *sgrps, char *page) 616 + { 617 + struct se_subsystem_dev *se_subdev = container_of(sgrps, 618 + struct se_subsystem_dev, dev_stat_grps); 619 + struct se_device *dev = se_subdev->se_dev_ptr; 620 + 621 + if (!dev) 622 + return -ENODEV; 623 + 624 + /* scsiLuCreationTime */ 625 + return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - 626 + INITIAL_JIFFIES) * 100 / HZ)); 627 + } 628 + DEV_STAT_SCSI_LU_ATTR_RO(creation_time); 629 + 630 + CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group); 631 + 632 + static struct configfs_attribute *target_stat_scsi_lu_attrs[] = { 633 + &target_stat_scsi_lu_inst.attr, 634 + &target_stat_scsi_lu_dev.attr, 635 + &target_stat_scsi_lu_indx.attr, 636 + &target_stat_scsi_lu_lun.attr, 637 + &target_stat_scsi_lu_lu_name.attr, 638 + &target_stat_scsi_lu_vend.attr, 639 + &target_stat_scsi_lu_prod.attr, 640 + &target_stat_scsi_lu_rev.attr, 641 + &target_stat_scsi_lu_dev_type.attr, 642 + &target_stat_scsi_lu_status.attr, 643 + &target_stat_scsi_lu_state_bit.attr, 644 + &target_stat_scsi_lu_num_cmds.attr, 645 + &target_stat_scsi_lu_read_mbytes.attr, 646 + &target_stat_scsi_lu_write_mbytes.attr, 647 + &target_stat_scsi_lu_resets.attr, 648 + &target_stat_scsi_lu_full_stat.attr, 649 + &target_stat_scsi_lu_hs_num_cmds.attr, 650 + &target_stat_scsi_lu_creation_time.attr, 651 + NULL, 652 + }; 653 + 654 + static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = { 655 + .show_attribute = target_stat_scsi_lu_attr_show, 656 + .store_attribute = target_stat_scsi_lu_attr_store, 657 + }; 658 + 659 + static struct config_item_type target_stat_scsi_lu_cit = { 660 + .ct_item_ops = &target_stat_scsi_lu_attrib_ops, 661 + .ct_attrs = target_stat_scsi_lu_attrs, 662 + .ct_owner = THIS_MODULE, 663 + }; 664 + 665 + /* 666 + * Called from target_core_configfs.c:target_core_make_subdev() to setup 667 + * the target statistics groups + configfs CITs located in target_core_stat.c 668 + */ 669 + void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) 670 + { 671 + struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; 672 + 673 + config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, 674 + "scsi_dev", &target_stat_scsi_dev_cit); 675 + config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, 676 + "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); 677 + config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, 678 + "scsi_lu", &target_stat_scsi_lu_cit); 679 + 680 + dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; 681 + dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; 682 + dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; 683 + dev_stat_grp->default_groups[3] = NULL; 684 + } 685 + 686 + /* 687 + * SCSI Port Table 688 + */ 689 + 690 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps); 691 + #define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \ 692 + static struct target_stat_scsi_port_attribute \ 693 + target_stat_scsi_port_##_name = \ 694 + __CONFIGFS_EATTR(_name, _mode, \ 695 + target_stat_scsi_port_show_attr_##_name, \ 696 + target_stat_scsi_port_store_attr_##_name); 697 + 698 + #define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \ 699 + static struct target_stat_scsi_port_attribute \ 700 + target_stat_scsi_port_##_name = \ 701 + __CONFIGFS_EATTR_RO(_name, \ 702 + target_stat_scsi_port_show_attr_##_name); 703 + 704 + static ssize_t target_stat_scsi_port_show_attr_inst( 705 + struct se_port_stat_grps *pgrps, char *page) 706 + { 707 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 708 + struct se_port *sep; 709 + struct se_device *dev = lun->lun_se_dev; 710 + struct se_hba *hba; 711 + ssize_t ret; 712 + 713 + spin_lock(&lun->lun_sep_lock); 714 + sep = lun->lun_sep; 715 + if (!sep) { 716 + spin_unlock(&lun->lun_sep_lock); 717 + return -ENODEV; 718 + } 719 + hba = dev->se_hba; 720 + ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 721 + spin_unlock(&lun->lun_sep_lock); 722 + return ret; 723 + } 724 + DEV_STAT_SCSI_PORT_ATTR_RO(inst); 725 + 726 + static ssize_t target_stat_scsi_port_show_attr_dev( 727 + struct se_port_stat_grps *pgrps, char *page) 728 + { 729 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 730 + struct se_port *sep; 731 + struct se_device *dev = lun->lun_se_dev; 732 + ssize_t ret; 733 + 734 + spin_lock(&lun->lun_sep_lock); 735 + sep = lun->lun_sep; 736 + if (!sep) { 737 + spin_unlock(&lun->lun_sep_lock); 738 + return -ENODEV; 739 + } 740 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 741 + spin_unlock(&lun->lun_sep_lock); 742 + return ret; 743 + } 744 + DEV_STAT_SCSI_PORT_ATTR_RO(dev); 745 + 746 + static ssize_t target_stat_scsi_port_show_attr_indx( 747 + struct se_port_stat_grps *pgrps, char *page) 748 + { 749 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 750 + struct se_port *sep; 751 + ssize_t ret; 752 + 753 + spin_lock(&lun->lun_sep_lock); 754 + sep = lun->lun_sep; 755 + if (!sep) { 756 + spin_unlock(&lun->lun_sep_lock); 757 + return -ENODEV; 758 + } 759 + ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); 760 + spin_unlock(&lun->lun_sep_lock); 761 + return ret; 762 + } 763 + DEV_STAT_SCSI_PORT_ATTR_RO(indx); 764 + 765 + static ssize_t target_stat_scsi_port_show_attr_role( 766 + struct se_port_stat_grps *pgrps, char *page) 767 + { 768 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 769 + struct se_device *dev = lun->lun_se_dev; 770 + struct se_port *sep; 771 + ssize_t ret; 772 + 773 + if (!dev) 774 + return -ENODEV; 775 + 776 + spin_lock(&lun->lun_sep_lock); 777 + sep = lun->lun_sep; 778 + if (!sep) { 779 + spin_unlock(&lun->lun_sep_lock); 780 + return -ENODEV; 781 + } 782 + ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); 783 + spin_unlock(&lun->lun_sep_lock); 784 + return ret; 785 + } 786 + DEV_STAT_SCSI_PORT_ATTR_RO(role); 787 + 788 + static ssize_t target_stat_scsi_port_show_attr_busy_count( 789 + struct se_port_stat_grps *pgrps, char *page) 790 + { 791 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 792 + struct se_port *sep; 793 + ssize_t ret; 794 + 795 + spin_lock(&lun->lun_sep_lock); 796 + sep = lun->lun_sep; 797 + if (!sep) { 798 + spin_unlock(&lun->lun_sep_lock); 799 + return -ENODEV; 800 + } 801 + /* FIXME: scsiPortBusyStatuses */ 802 + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 803 + spin_unlock(&lun->lun_sep_lock); 804 + return ret; 805 + } 806 + DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); 807 + 808 + CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group); 809 + 810 + static struct configfs_attribute *target_stat_scsi_port_attrs[] = { 811 + &target_stat_scsi_port_inst.attr, 812 + &target_stat_scsi_port_dev.attr, 813 + &target_stat_scsi_port_indx.attr, 814 + &target_stat_scsi_port_role.attr, 815 + &target_stat_scsi_port_busy_count.attr, 816 + NULL, 817 + }; 818 + 819 + static struct configfs_item_operations target_stat_scsi_port_attrib_ops = { 820 + .show_attribute = target_stat_scsi_port_attr_show, 821 + .store_attribute = target_stat_scsi_port_attr_store, 822 + }; 823 + 824 + static struct config_item_type target_stat_scsi_port_cit = { 825 + .ct_item_ops = &target_stat_scsi_port_attrib_ops, 826 + .ct_attrs = target_stat_scsi_port_attrs, 827 + .ct_owner = THIS_MODULE, 828 + }; 829 + 830 + /* 831 + * SCSI Target Port Table 832 + */ 833 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps); 834 + #define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \ 835 + static struct target_stat_scsi_tgt_port_attribute \ 836 + target_stat_scsi_tgt_port_##_name = \ 837 + __CONFIGFS_EATTR(_name, _mode, \ 838 + target_stat_scsi_tgt_port_show_attr_##_name, \ 839 + target_stat_scsi_tgt_port_store_attr_##_name); 840 + 841 + #define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \ 842 + static struct target_stat_scsi_tgt_port_attribute \ 843 + target_stat_scsi_tgt_port_##_name = \ 844 + __CONFIGFS_EATTR_RO(_name, \ 845 + target_stat_scsi_tgt_port_show_attr_##_name); 846 + 847 + static ssize_t target_stat_scsi_tgt_port_show_attr_inst( 848 + struct se_port_stat_grps *pgrps, char *page) 849 + { 850 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 851 + struct se_device *dev = lun->lun_se_dev; 852 + struct se_port *sep; 853 + struct se_hba *hba; 854 + ssize_t ret; 855 + 856 + spin_lock(&lun->lun_sep_lock); 857 + sep = lun->lun_sep; 858 + if (!sep) { 859 + spin_unlock(&lun->lun_sep_lock); 860 + return -ENODEV; 861 + } 862 + hba = dev->se_hba; 863 + ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 864 + spin_unlock(&lun->lun_sep_lock); 865 + return ret; 866 + } 867 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); 868 + 869 + static ssize_t target_stat_scsi_tgt_port_show_attr_dev( 870 + struct se_port_stat_grps *pgrps, char *page) 871 + { 872 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 873 + struct se_device *dev = lun->lun_se_dev; 874 + struct se_port *sep; 875 + ssize_t ret; 876 + 877 + spin_lock(&lun->lun_sep_lock); 878 + sep = lun->lun_sep; 879 + if (!sep) { 880 + spin_unlock(&lun->lun_sep_lock); 881 + return -ENODEV; 882 + } 883 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 884 + spin_unlock(&lun->lun_sep_lock); 885 + return ret; 886 + } 887 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); 888 + 889 + static ssize_t target_stat_scsi_tgt_port_show_attr_indx( 890 + struct se_port_stat_grps *pgrps, char *page) 891 + { 892 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 893 + struct se_port *sep; 894 + ssize_t ret; 895 + 896 + spin_lock(&lun->lun_sep_lock); 897 + sep = lun->lun_sep; 898 + if (!sep) { 899 + spin_unlock(&lun->lun_sep_lock); 900 + return -ENODEV; 901 + } 902 + ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); 903 + spin_unlock(&lun->lun_sep_lock); 904 + return ret; 905 + } 906 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); 907 + 908 + static ssize_t target_stat_scsi_tgt_port_show_attr_name( 909 + struct se_port_stat_grps *pgrps, char *page) 910 + { 911 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 912 + struct se_port *sep; 913 + struct se_portal_group *tpg; 914 + ssize_t ret; 915 + 916 + spin_lock(&lun->lun_sep_lock); 917 + sep = lun->lun_sep; 918 + if (!sep) { 919 + spin_unlock(&lun->lun_sep_lock); 920 + return -ENODEV; 921 + } 922 + tpg = sep->sep_tpg; 923 + 924 + ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", 925 + TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); 926 + spin_unlock(&lun->lun_sep_lock); 927 + return ret; 928 + } 929 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); 930 + 931 + static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( 932 + struct se_port_stat_grps *pgrps, char *page) 933 + { 934 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 935 + struct se_port *sep; 936 + struct se_portal_group *tpg; 937 + ssize_t ret; 938 + 939 + spin_lock(&lun->lun_sep_lock); 940 + sep = lun->lun_sep; 941 + if (!sep) { 942 + spin_unlock(&lun->lun_sep_lock); 943 + return -ENODEV; 944 + } 945 + tpg = sep->sep_tpg; 946 + 947 + ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", 948 + TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", 949 + TPG_TFO(tpg)->tpg_get_tag(tpg)); 950 + spin_unlock(&lun->lun_sep_lock); 951 + return ret; 952 + } 953 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); 954 + 955 + static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( 956 + struct se_port_stat_grps *pgrps, char *page) 957 + { 958 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 959 + struct se_port *sep; 960 + struct se_portal_group *tpg; 961 + ssize_t ret; 962 + 963 + spin_lock(&lun->lun_sep_lock); 964 + sep = lun->lun_sep; 965 + if (!sep) { 966 + spin_unlock(&lun->lun_sep_lock); 967 + return -ENODEV; 968 + } 969 + tpg = sep->sep_tpg; 970 + 971 + ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); 972 + spin_unlock(&lun->lun_sep_lock); 973 + return ret; 974 + } 975 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); 976 + 977 + static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( 978 + struct se_port_stat_grps *pgrps, char *page) 979 + { 980 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 981 + struct se_port *sep; 982 + struct se_portal_group *tpg; 983 + ssize_t ret; 984 + 985 + spin_lock(&lun->lun_sep_lock); 986 + sep = lun->lun_sep; 987 + if (!sep) { 988 + spin_unlock(&lun->lun_sep_lock); 989 + return -ENODEV; 990 + } 991 + tpg = sep->sep_tpg; 992 + 993 + ret = snprintf(page, PAGE_SIZE, "%u\n", 994 + (u32)(sep->sep_stats.rx_data_octets >> 20)); 995 + spin_unlock(&lun->lun_sep_lock); 996 + return ret; 997 + } 998 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); 999 + 1000 + static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( 1001 + struct se_port_stat_grps *pgrps, char *page) 1002 + { 1003 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1004 + struct se_port *sep; 1005 + struct se_portal_group *tpg; 1006 + ssize_t ret; 1007 + 1008 + spin_lock(&lun->lun_sep_lock); 1009 + sep = lun->lun_sep; 1010 + if (!sep) { 1011 + spin_unlock(&lun->lun_sep_lock); 1012 + return -ENODEV; 1013 + } 1014 + tpg = sep->sep_tpg; 1015 + 1016 + ret = snprintf(page, PAGE_SIZE, "%u\n", 1017 + (u32)(sep->sep_stats.tx_data_octets >> 20)); 1018 + spin_unlock(&lun->lun_sep_lock); 1019 + return ret; 1020 + } 1021 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); 1022 + 1023 + static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( 1024 + struct se_port_stat_grps *pgrps, char *page) 1025 + { 1026 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1027 + struct se_port *sep; 1028 + struct se_portal_group *tpg; 1029 + ssize_t ret; 1030 + 1031 + spin_lock(&lun->lun_sep_lock); 1032 + sep = lun->lun_sep; 1033 + if (!sep) { 1034 + spin_unlock(&lun->lun_sep_lock); 1035 + return -ENODEV; 1036 + } 1037 + tpg = sep->sep_tpg; 1038 + 1039 + /* FIXME: scsiTgtPortHsInCommands */ 1040 + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 1041 + spin_unlock(&lun->lun_sep_lock); 1042 + return ret; 1043 + } 1044 + DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); 1045 + 1046 + CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps, 1047 + scsi_tgt_port_group); 1048 + 1049 + static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = { 1050 + &target_stat_scsi_tgt_port_inst.attr, 1051 + &target_stat_scsi_tgt_port_dev.attr, 1052 + &target_stat_scsi_tgt_port_indx.attr, 1053 + &target_stat_scsi_tgt_port_name.attr, 1054 + &target_stat_scsi_tgt_port_port_index.attr, 1055 + &target_stat_scsi_tgt_port_in_cmds.attr, 1056 + &target_stat_scsi_tgt_port_write_mbytes.attr, 1057 + &target_stat_scsi_tgt_port_read_mbytes.attr, 1058 + &target_stat_scsi_tgt_port_hs_in_cmds.attr, 1059 + NULL, 1060 + }; 1061 + 1062 + static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = { 1063 + .show_attribute = target_stat_scsi_tgt_port_attr_show, 1064 + .store_attribute = target_stat_scsi_tgt_port_attr_store, 1065 + }; 1066 + 1067 + static struct config_item_type target_stat_scsi_tgt_port_cit = { 1068 + .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops, 1069 + .ct_attrs = target_stat_scsi_tgt_port_attrs, 1070 + .ct_owner = THIS_MODULE, 1071 + }; 1072 + 1073 + /* 1074 + * SCSI Transport Table 1075 + o */ 1076 + 1077 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps); 1078 + #define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \ 1079 + static struct target_stat_scsi_transport_attribute \ 1080 + target_stat_scsi_transport_##_name = \ 1081 + __CONFIGFS_EATTR(_name, _mode, \ 1082 + target_stat_scsi_transport_show_attr_##_name, \ 1083 + target_stat_scsi_transport_store_attr_##_name); 1084 + 1085 + #define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \ 1086 + static struct target_stat_scsi_transport_attribute \ 1087 + target_stat_scsi_transport_##_name = \ 1088 + __CONFIGFS_EATTR_RO(_name, \ 1089 + target_stat_scsi_transport_show_attr_##_name); 1090 + 1091 + static ssize_t target_stat_scsi_transport_show_attr_inst( 1092 + struct se_port_stat_grps *pgrps, char *page) 1093 + { 1094 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1095 + struct se_device *dev = lun->lun_se_dev; 1096 + struct se_port *sep; 1097 + struct se_hba *hba; 1098 + ssize_t ret; 1099 + 1100 + spin_lock(&lun->lun_sep_lock); 1101 + sep = lun->lun_sep; 1102 + if (!sep) { 1103 + spin_unlock(&lun->lun_sep_lock); 1104 + return -ENODEV; 1105 + } 1106 + 1107 + hba = dev->se_hba; 1108 + ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 1109 + spin_unlock(&lun->lun_sep_lock); 1110 + return ret; 1111 + } 1112 + DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); 1113 + 1114 + static ssize_t target_stat_scsi_transport_show_attr_device( 1115 + struct se_port_stat_grps *pgrps, char *page) 1116 + { 1117 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1118 + struct se_port *sep; 1119 + struct se_portal_group *tpg; 1120 + ssize_t ret; 1121 + 1122 + spin_lock(&lun->lun_sep_lock); 1123 + sep = lun->lun_sep; 1124 + if (!sep) { 1125 + spin_unlock(&lun->lun_sep_lock); 1126 + return -ENODEV; 1127 + } 1128 + tpg = sep->sep_tpg; 1129 + /* scsiTransportType */ 1130 + ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", 1131 + TPG_TFO(tpg)->get_fabric_name()); 1132 + spin_unlock(&lun->lun_sep_lock); 1133 + return ret; 1134 + } 1135 + DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); 1136 + 1137 + static ssize_t target_stat_scsi_transport_show_attr_indx( 1138 + struct se_port_stat_grps *pgrps, char *page) 1139 + { 1140 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1141 + struct se_port *sep; 1142 + struct se_portal_group *tpg; 1143 + ssize_t ret; 1144 + 1145 + spin_lock(&lun->lun_sep_lock); 1146 + sep = lun->lun_sep; 1147 + if (!sep) { 1148 + spin_unlock(&lun->lun_sep_lock); 1149 + return -ENODEV; 1150 + } 1151 + tpg = sep->sep_tpg; 1152 + ret = snprintf(page, PAGE_SIZE, "%u\n", 1153 + TPG_TFO(tpg)->tpg_get_inst_index(tpg)); 1154 + spin_unlock(&lun->lun_sep_lock); 1155 + return ret; 1156 + } 1157 + DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); 1158 + 1159 + static ssize_t target_stat_scsi_transport_show_attr_dev_name( 1160 + struct se_port_stat_grps *pgrps, char *page) 1161 + { 1162 + struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1163 + struct se_device *dev = lun->lun_se_dev; 1164 + struct se_port *sep; 1165 + struct se_portal_group *tpg; 1166 + struct t10_wwn *wwn; 1167 + ssize_t ret; 1168 + 1169 + spin_lock(&lun->lun_sep_lock); 1170 + sep = lun->lun_sep; 1171 + if (!sep) { 1172 + spin_unlock(&lun->lun_sep_lock); 1173 + return -ENODEV; 1174 + } 1175 + tpg = sep->sep_tpg; 1176 + wwn = DEV_T10_WWN(dev); 1177 + /* scsiTransportDevName */ 1178 + ret = snprintf(page, PAGE_SIZE, "%s+%s\n", 1179 + TPG_TFO(tpg)->tpg_get_wwn(tpg), 1180 + (strlen(wwn->unit_serial)) ? wwn->unit_serial : 1181 + wwn->vendor); 1182 + spin_unlock(&lun->lun_sep_lock); 1183 + return ret; 1184 + } 1185 + DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); 1186 + 1187 + CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps, 1188 + scsi_transport_group); 1189 + 1190 + static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { 1191 + &target_stat_scsi_transport_inst.attr, 1192 + &target_stat_scsi_transport_device.attr, 1193 + &target_stat_scsi_transport_indx.attr, 1194 + &target_stat_scsi_transport_dev_name.attr, 1195 + NULL, 1196 + }; 1197 + 1198 + static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = { 1199 + .show_attribute = target_stat_scsi_transport_attr_show, 1200 + .store_attribute = target_stat_scsi_transport_attr_store, 1201 + }; 1202 + 1203 + static struct config_item_type target_stat_scsi_transport_cit = { 1204 + .ct_item_ops = &target_stat_scsi_transport_attrib_ops, 1205 + .ct_attrs = target_stat_scsi_transport_attrs, 1206 + .ct_owner = THIS_MODULE, 1207 + }; 1208 + 1209 + /* 1210 + * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup 1211 + * the target port statistics groups + configfs CITs located in target_core_stat.c 1212 + */ 1213 + void target_stat_setup_port_default_groups(struct se_lun *lun) 1214 + { 1215 + struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; 1216 + 1217 + config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, 1218 + "scsi_port", &target_stat_scsi_port_cit); 1219 + config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, 1220 + "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); 1221 + config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, 1222 + "scsi_transport", &target_stat_scsi_transport_cit); 1223 + 1224 + port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; 1225 + port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; 1226 + port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; 1227 + port_stat_grp->default_groups[3] = NULL; 1228 + } 1229 + 1230 + /* 1231 + * SCSI Authorized Initiator Table 1232 + */ 1233 + 1234 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps); 1235 + #define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \ 1236 + static struct target_stat_scsi_auth_intr_attribute \ 1237 + target_stat_scsi_auth_intr_##_name = \ 1238 + __CONFIGFS_EATTR(_name, _mode, \ 1239 + target_stat_scsi_auth_intr_show_attr_##_name, \ 1240 + target_stat_scsi_auth_intr_store_attr_##_name); 1241 + 1242 + #define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \ 1243 + static struct target_stat_scsi_auth_intr_attribute \ 1244 + target_stat_scsi_auth_intr_##_name = \ 1245 + __CONFIGFS_EATTR_RO(_name, \ 1246 + target_stat_scsi_auth_intr_show_attr_##_name); 1247 + 1248 + static ssize_t target_stat_scsi_auth_intr_show_attr_inst( 1249 + struct se_ml_stat_grps *lgrps, char *page) 1250 + { 1251 + struct se_lun_acl *lacl = container_of(lgrps, 1252 + struct se_lun_acl, ml_stat_grps); 1253 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1254 + struct se_dev_entry *deve; 1255 + struct se_portal_group *tpg; 1256 + ssize_t ret; 1257 + 1258 + spin_lock_irq(&nacl->device_list_lock); 1259 + deve = &nacl->device_list[lacl->mapped_lun]; 1260 + if (!deve->se_lun || !deve->se_lun_acl) { 1261 + spin_unlock_irq(&nacl->device_list_lock); 1262 + return -ENODEV; 1263 + } 1264 + tpg = nacl->se_tpg; 1265 + /* scsiInstIndex */ 1266 + ret = snprintf(page, PAGE_SIZE, "%u\n", 1267 + TPG_TFO(tpg)->tpg_get_inst_index(tpg)); 1268 + spin_unlock_irq(&nacl->device_list_lock); 1269 + return ret; 1270 + } 1271 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); 1272 + 1273 + static ssize_t target_stat_scsi_auth_intr_show_attr_dev( 1274 + struct se_ml_stat_grps *lgrps, char *page) 1275 + { 1276 + struct se_lun_acl *lacl = container_of(lgrps, 1277 + struct se_lun_acl, ml_stat_grps); 1278 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1279 + struct se_dev_entry *deve; 1280 + struct se_lun *lun; 1281 + struct se_portal_group *tpg; 1282 + ssize_t ret; 1283 + 1284 + spin_lock_irq(&nacl->device_list_lock); 1285 + deve = &nacl->device_list[lacl->mapped_lun]; 1286 + if (!deve->se_lun || !deve->se_lun_acl) { 1287 + spin_unlock_irq(&nacl->device_list_lock); 1288 + return -ENODEV; 1289 + } 1290 + tpg = nacl->se_tpg; 1291 + lun = deve->se_lun; 1292 + /* scsiDeviceIndex */ 1293 + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); 1294 + spin_unlock_irq(&nacl->device_list_lock); 1295 + return ret; 1296 + } 1297 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); 1298 + 1299 + static ssize_t target_stat_scsi_auth_intr_show_attr_port( 1300 + struct se_ml_stat_grps *lgrps, char *page) 1301 + { 1302 + struct se_lun_acl *lacl = container_of(lgrps, 1303 + struct se_lun_acl, ml_stat_grps); 1304 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1305 + struct se_dev_entry *deve; 1306 + struct se_portal_group *tpg; 1307 + ssize_t ret; 1308 + 1309 + spin_lock_irq(&nacl->device_list_lock); 1310 + deve = &nacl->device_list[lacl->mapped_lun]; 1311 + if (!deve->se_lun || !deve->se_lun_acl) { 1312 + spin_unlock_irq(&nacl->device_list_lock); 1313 + return -ENODEV; 1314 + } 1315 + tpg = nacl->se_tpg; 1316 + /* scsiAuthIntrTgtPortIndex */ 1317 + ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); 1318 + spin_unlock_irq(&nacl->device_list_lock); 1319 + return ret; 1320 + } 1321 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); 1322 + 1323 + static ssize_t target_stat_scsi_auth_intr_show_attr_indx( 1324 + struct se_ml_stat_grps *lgrps, char *page) 1325 + { 1326 + struct se_lun_acl *lacl = container_of(lgrps, 1327 + struct se_lun_acl, ml_stat_grps); 1328 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1329 + struct se_dev_entry *deve; 1330 + ssize_t ret; 1331 + 1332 + spin_lock_irq(&nacl->device_list_lock); 1333 + deve = &nacl->device_list[lacl->mapped_lun]; 1334 + if (!deve->se_lun || !deve->se_lun_acl) { 1335 + spin_unlock_irq(&nacl->device_list_lock); 1336 + return -ENODEV; 1337 + } 1338 + /* scsiAuthIntrIndex */ 1339 + ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); 1340 + spin_unlock_irq(&nacl->device_list_lock); 1341 + return ret; 1342 + } 1343 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); 1344 + 1345 + static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( 1346 + struct se_ml_stat_grps *lgrps, char *page) 1347 + { 1348 + struct se_lun_acl *lacl = container_of(lgrps, 1349 + struct se_lun_acl, ml_stat_grps); 1350 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1351 + struct se_dev_entry *deve; 1352 + ssize_t ret; 1353 + 1354 + spin_lock_irq(&nacl->device_list_lock); 1355 + deve = &nacl->device_list[lacl->mapped_lun]; 1356 + if (!deve->se_lun || !deve->se_lun_acl) { 1357 + spin_unlock_irq(&nacl->device_list_lock); 1358 + return -ENODEV; 1359 + } 1360 + /* scsiAuthIntrDevOrPort */ 1361 + ret = snprintf(page, PAGE_SIZE, "%u\n", 1); 1362 + spin_unlock_irq(&nacl->device_list_lock); 1363 + return ret; 1364 + } 1365 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); 1366 + 1367 + static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( 1368 + struct se_ml_stat_grps *lgrps, char *page) 1369 + { 1370 + struct se_lun_acl *lacl = container_of(lgrps, 1371 + struct se_lun_acl, ml_stat_grps); 1372 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1373 + struct se_dev_entry *deve; 1374 + ssize_t ret; 1375 + 1376 + spin_lock_irq(&nacl->device_list_lock); 1377 + deve = &nacl->device_list[lacl->mapped_lun]; 1378 + if (!deve->se_lun || !deve->se_lun_acl) { 1379 + spin_unlock_irq(&nacl->device_list_lock); 1380 + return -ENODEV; 1381 + } 1382 + /* scsiAuthIntrName */ 1383 + ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); 1384 + spin_unlock_irq(&nacl->device_list_lock); 1385 + return ret; 1386 + } 1387 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); 1388 + 1389 + static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( 1390 + struct se_ml_stat_grps *lgrps, char *page) 1391 + { 1392 + struct se_lun_acl *lacl = container_of(lgrps, 1393 + struct se_lun_acl, ml_stat_grps); 1394 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1395 + struct se_dev_entry *deve; 1396 + ssize_t ret; 1397 + 1398 + spin_lock_irq(&nacl->device_list_lock); 1399 + deve = &nacl->device_list[lacl->mapped_lun]; 1400 + if (!deve->se_lun || !deve->se_lun_acl) { 1401 + spin_unlock_irq(&nacl->device_list_lock); 1402 + return -ENODEV; 1403 + } 1404 + /* FIXME: scsiAuthIntrLunMapIndex */ 1405 + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 1406 + spin_unlock_irq(&nacl->device_list_lock); 1407 + return ret; 1408 + } 1409 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); 1410 + 1411 + static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( 1412 + struct se_ml_stat_grps *lgrps, char *page) 1413 + { 1414 + struct se_lun_acl *lacl = container_of(lgrps, 1415 + struct se_lun_acl, ml_stat_grps); 1416 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1417 + struct se_dev_entry *deve; 1418 + ssize_t ret; 1419 + 1420 + spin_lock_irq(&nacl->device_list_lock); 1421 + deve = &nacl->device_list[lacl->mapped_lun]; 1422 + if (!deve->se_lun || !deve->se_lun_acl) { 1423 + spin_unlock_irq(&nacl->device_list_lock); 1424 + return -ENODEV; 1425 + } 1426 + /* scsiAuthIntrAttachedTimes */ 1427 + ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); 1428 + spin_unlock_irq(&nacl->device_list_lock); 1429 + return ret; 1430 + } 1431 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); 1432 + 1433 + static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( 1434 + struct se_ml_stat_grps *lgrps, char *page) 1435 + { 1436 + struct se_lun_acl *lacl = container_of(lgrps, 1437 + struct se_lun_acl, ml_stat_grps); 1438 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1439 + struct se_dev_entry *deve; 1440 + ssize_t ret; 1441 + 1442 + spin_lock_irq(&nacl->device_list_lock); 1443 + deve = &nacl->device_list[lacl->mapped_lun]; 1444 + if (!deve->se_lun || !deve->se_lun_acl) { 1445 + spin_unlock_irq(&nacl->device_list_lock); 1446 + return -ENODEV; 1447 + } 1448 + /* scsiAuthIntrOutCommands */ 1449 + ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); 1450 + spin_unlock_irq(&nacl->device_list_lock); 1451 + return ret; 1452 + } 1453 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); 1454 + 1455 + static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( 1456 + struct se_ml_stat_grps *lgrps, char *page) 1457 + { 1458 + struct se_lun_acl *lacl = container_of(lgrps, 1459 + struct se_lun_acl, ml_stat_grps); 1460 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1461 + struct se_dev_entry *deve; 1462 + ssize_t ret; 1463 + 1464 + spin_lock_irq(&nacl->device_list_lock); 1465 + deve = &nacl->device_list[lacl->mapped_lun]; 1466 + if (!deve->se_lun || !deve->se_lun_acl) { 1467 + spin_unlock_irq(&nacl->device_list_lock); 1468 + return -ENODEV; 1469 + } 1470 + /* scsiAuthIntrReadMegaBytes */ 1471 + ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); 1472 + spin_unlock_irq(&nacl->device_list_lock); 1473 + return ret; 1474 + } 1475 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); 1476 + 1477 + static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( 1478 + struct se_ml_stat_grps *lgrps, char *page) 1479 + { 1480 + struct se_lun_acl *lacl = container_of(lgrps, 1481 + struct se_lun_acl, ml_stat_grps); 1482 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1483 + struct se_dev_entry *deve; 1484 + ssize_t ret; 1485 + 1486 + spin_lock_irq(&nacl->device_list_lock); 1487 + deve = &nacl->device_list[lacl->mapped_lun]; 1488 + if (!deve->se_lun || !deve->se_lun_acl) { 1489 + spin_unlock_irq(&nacl->device_list_lock); 1490 + return -ENODEV; 1491 + } 1492 + /* scsiAuthIntrWrittenMegaBytes */ 1493 + ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); 1494 + spin_unlock_irq(&nacl->device_list_lock); 1495 + return ret; 1496 + } 1497 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); 1498 + 1499 + static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( 1500 + struct se_ml_stat_grps *lgrps, char *page) 1501 + { 1502 + struct se_lun_acl *lacl = container_of(lgrps, 1503 + struct se_lun_acl, ml_stat_grps); 1504 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1505 + struct se_dev_entry *deve; 1506 + ssize_t ret; 1507 + 1508 + spin_lock_irq(&nacl->device_list_lock); 1509 + deve = &nacl->device_list[lacl->mapped_lun]; 1510 + if (!deve->se_lun || !deve->se_lun_acl) { 1511 + spin_unlock_irq(&nacl->device_list_lock); 1512 + return -ENODEV; 1513 + } 1514 + /* FIXME: scsiAuthIntrHSOutCommands */ 1515 + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 1516 + spin_unlock_irq(&nacl->device_list_lock); 1517 + return ret; 1518 + } 1519 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); 1520 + 1521 + static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( 1522 + struct se_ml_stat_grps *lgrps, char *page) 1523 + { 1524 + struct se_lun_acl *lacl = container_of(lgrps, 1525 + struct se_lun_acl, ml_stat_grps); 1526 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1527 + struct se_dev_entry *deve; 1528 + ssize_t ret; 1529 + 1530 + spin_lock_irq(&nacl->device_list_lock); 1531 + deve = &nacl->device_list[lacl->mapped_lun]; 1532 + if (!deve->se_lun || !deve->se_lun_acl) { 1533 + spin_unlock_irq(&nacl->device_list_lock); 1534 + return -ENODEV; 1535 + } 1536 + /* scsiAuthIntrLastCreation */ 1537 + ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - 1538 + INITIAL_JIFFIES) * 100 / HZ)); 1539 + spin_unlock_irq(&nacl->device_list_lock); 1540 + return ret; 1541 + } 1542 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); 1543 + 1544 + static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( 1545 + struct se_ml_stat_grps *lgrps, char *page) 1546 + { 1547 + struct se_lun_acl *lacl = container_of(lgrps, 1548 + struct se_lun_acl, ml_stat_grps); 1549 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1550 + struct se_dev_entry *deve; 1551 + ssize_t ret; 1552 + 1553 + spin_lock_irq(&nacl->device_list_lock); 1554 + deve = &nacl->device_list[lacl->mapped_lun]; 1555 + if (!deve->se_lun || !deve->se_lun_acl) { 1556 + spin_unlock_irq(&nacl->device_list_lock); 1557 + return -ENODEV; 1558 + } 1559 + /* FIXME: scsiAuthIntrRowStatus */ 1560 + ret = snprintf(page, PAGE_SIZE, "Ready\n"); 1561 + spin_unlock_irq(&nacl->device_list_lock); 1562 + return ret; 1563 + } 1564 + DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); 1565 + 1566 + CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps, 1567 + scsi_auth_intr_group); 1568 + 1569 + static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = { 1570 + &target_stat_scsi_auth_intr_inst.attr, 1571 + &target_stat_scsi_auth_intr_dev.attr, 1572 + &target_stat_scsi_auth_intr_port.attr, 1573 + &target_stat_scsi_auth_intr_indx.attr, 1574 + &target_stat_scsi_auth_intr_dev_or_port.attr, 1575 + &target_stat_scsi_auth_intr_intr_name.attr, 1576 + &target_stat_scsi_auth_intr_map_indx.attr, 1577 + &target_stat_scsi_auth_intr_att_count.attr, 1578 + &target_stat_scsi_auth_intr_num_cmds.attr, 1579 + &target_stat_scsi_auth_intr_read_mbytes.attr, 1580 + &target_stat_scsi_auth_intr_write_mbytes.attr, 1581 + &target_stat_scsi_auth_intr_hs_num_cmds.attr, 1582 + &target_stat_scsi_auth_intr_creation_time.attr, 1583 + &target_stat_scsi_auth_intr_row_status.attr, 1584 + NULL, 1585 + }; 1586 + 1587 + static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = { 1588 + .show_attribute = target_stat_scsi_auth_intr_attr_show, 1589 + .store_attribute = target_stat_scsi_auth_intr_attr_store, 1590 + }; 1591 + 1592 + static struct config_item_type target_stat_scsi_auth_intr_cit = { 1593 + .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops, 1594 + .ct_attrs = target_stat_scsi_auth_intr_attrs, 1595 + .ct_owner = THIS_MODULE, 1596 + }; 1597 + 1598 + /* 1599 + * SCSI Attached Initiator Port Table 1600 + */ 1601 + 1602 + CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps); 1603 + #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \ 1604 + static struct target_stat_scsi_att_intr_port_attribute \ 1605 + target_stat_scsi_att_intr_port_##_name = \ 1606 + __CONFIGFS_EATTR(_name, _mode, \ 1607 + target_stat_scsi_att_intr_port_show_attr_##_name, \ 1608 + target_stat_scsi_att_intr_port_store_attr_##_name); 1609 + 1610 + #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \ 1611 + static struct target_stat_scsi_att_intr_port_attribute \ 1612 + target_stat_scsi_att_intr_port_##_name = \ 1613 + __CONFIGFS_EATTR_RO(_name, \ 1614 + target_stat_scsi_att_intr_port_show_attr_##_name); 1615 + 1616 + static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( 1617 + struct se_ml_stat_grps *lgrps, char *page) 1618 + { 1619 + struct se_lun_acl *lacl = container_of(lgrps, 1620 + struct se_lun_acl, ml_stat_grps); 1621 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1622 + struct se_dev_entry *deve; 1623 + struct se_portal_group *tpg; 1624 + ssize_t ret; 1625 + 1626 + spin_lock_irq(&nacl->device_list_lock); 1627 + deve = &nacl->device_list[lacl->mapped_lun]; 1628 + if (!deve->se_lun || !deve->se_lun_acl) { 1629 + spin_unlock_irq(&nacl->device_list_lock); 1630 + return -ENODEV; 1631 + } 1632 + tpg = nacl->se_tpg; 1633 + /* scsiInstIndex */ 1634 + ret = snprintf(page, PAGE_SIZE, "%u\n", 1635 + TPG_TFO(tpg)->tpg_get_inst_index(tpg)); 1636 + spin_unlock_irq(&nacl->device_list_lock); 1637 + return ret; 1638 + } 1639 + DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); 1640 + 1641 + static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( 1642 + struct se_ml_stat_grps *lgrps, char *page) 1643 + { 1644 + struct se_lun_acl *lacl = container_of(lgrps, 1645 + struct se_lun_acl, ml_stat_grps); 1646 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1647 + struct se_dev_entry *deve; 1648 + struct se_lun *lun; 1649 + struct se_portal_group *tpg; 1650 + ssize_t ret; 1651 + 1652 + spin_lock_irq(&nacl->device_list_lock); 1653 + deve = &nacl->device_list[lacl->mapped_lun]; 1654 + if (!deve->se_lun || !deve->se_lun_acl) { 1655 + spin_unlock_irq(&nacl->device_list_lock); 1656 + return -ENODEV; 1657 + } 1658 + tpg = nacl->se_tpg; 1659 + lun = deve->se_lun; 1660 + /* scsiDeviceIndex */ 1661 + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); 1662 + spin_unlock_irq(&nacl->device_list_lock); 1663 + return ret; 1664 + } 1665 + DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); 1666 + 1667 + static ssize_t target_stat_scsi_att_intr_port_show_attr_port( 1668 + struct se_ml_stat_grps *lgrps, char *page) 1669 + { 1670 + struct se_lun_acl *lacl = container_of(lgrps, 1671 + struct se_lun_acl, ml_stat_grps); 1672 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1673 + struct se_dev_entry *deve; 1674 + struct se_portal_group *tpg; 1675 + ssize_t ret; 1676 + 1677 + spin_lock_irq(&nacl->device_list_lock); 1678 + deve = &nacl->device_list[lacl->mapped_lun]; 1679 + if (!deve->se_lun || !deve->se_lun_acl) { 1680 + spin_unlock_irq(&nacl->device_list_lock); 1681 + return -ENODEV; 1682 + } 1683 + tpg = nacl->se_tpg; 1684 + /* scsiPortIndex */ 1685 + ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); 1686 + spin_unlock_irq(&nacl->device_list_lock); 1687 + return ret; 1688 + } 1689 + DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); 1690 + 1691 + static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( 1692 + struct se_ml_stat_grps *lgrps, char *page) 1693 + { 1694 + struct se_lun_acl *lacl = container_of(lgrps, 1695 + struct se_lun_acl, ml_stat_grps); 1696 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1697 + struct se_session *se_sess; 1698 + struct se_portal_group *tpg; 1699 + ssize_t ret; 1700 + 1701 + spin_lock_irq(&nacl->nacl_sess_lock); 1702 + se_sess = nacl->nacl_sess; 1703 + if (!se_sess) { 1704 + spin_unlock_irq(&nacl->nacl_sess_lock); 1705 + return -ENODEV; 1706 + } 1707 + 1708 + tpg = nacl->se_tpg; 1709 + /* scsiAttIntrPortIndex */ 1710 + ret = snprintf(page, PAGE_SIZE, "%u\n", 1711 + TPG_TFO(tpg)->sess_get_index(se_sess)); 1712 + spin_unlock_irq(&nacl->nacl_sess_lock); 1713 + return ret; 1714 + } 1715 + DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx); 1716 + 1717 + static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( 1718 + struct se_ml_stat_grps *lgrps, char *page) 1719 + { 1720 + struct se_lun_acl *lacl = container_of(lgrps, 1721 + struct se_lun_acl, ml_stat_grps); 1722 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1723 + struct se_dev_entry *deve; 1724 + ssize_t ret; 1725 + 1726 + spin_lock_irq(&nacl->device_list_lock); 1727 + deve = &nacl->device_list[lacl->mapped_lun]; 1728 + if (!deve->se_lun || !deve->se_lun_acl) { 1729 + spin_unlock_irq(&nacl->device_list_lock); 1730 + return -ENODEV; 1731 + } 1732 + /* scsiAttIntrPortAuthIntrIdx */ 1733 + ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); 1734 + spin_unlock_irq(&nacl->device_list_lock); 1735 + return ret; 1736 + } 1737 + DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); 1738 + 1739 + static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( 1740 + struct se_ml_stat_grps *lgrps, char *page) 1741 + { 1742 + struct se_lun_acl *lacl = container_of(lgrps, 1743 + struct se_lun_acl, ml_stat_grps); 1744 + struct se_node_acl *nacl = lacl->se_lun_nacl; 1745 + struct se_session *se_sess; 1746 + struct se_portal_group *tpg; 1747 + ssize_t ret; 1748 + unsigned char buf[64]; 1749 + 1750 + spin_lock_irq(&nacl->nacl_sess_lock); 1751 + se_sess = nacl->nacl_sess; 1752 + if (!se_sess) { 1753 + spin_unlock_irq(&nacl->nacl_sess_lock); 1754 + return -ENODEV; 1755 + } 1756 + 1757 + tpg = nacl->se_tpg; 1758 + /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ 1759 + memset(buf, 0, 64); 1760 + if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) 1761 + TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, 1762 + (unsigned char *)&buf[0], 64); 1763 + 1764 + ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); 1765 + spin_unlock_irq(&nacl->nacl_sess_lock); 1766 + return ret; 1767 + } 1768 + DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident); 1769 + 1770 + CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps, 1771 + scsi_att_intr_port_group); 1772 + 1773 + static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = { 1774 + &target_stat_scsi_att_intr_port_inst.attr, 1775 + &target_stat_scsi_att_intr_port_dev.attr, 1776 + &target_stat_scsi_att_intr_port_port.attr, 1777 + &target_stat_scsi_att_intr_port_indx.attr, 1778 + &target_stat_scsi_att_intr_port_port_auth_indx.attr, 1779 + &target_stat_scsi_att_intr_port_port_ident.attr, 1780 + NULL, 1781 + }; 1782 + 1783 + static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = { 1784 + .show_attribute = target_stat_scsi_att_intr_port_attr_show, 1785 + .store_attribute = target_stat_scsi_att_intr_port_attr_store, 1786 + }; 1787 + 1788 + static struct config_item_type target_stat_scsi_att_intr_port_cit = { 1789 + .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops, 1790 + .ct_attrs = target_stat_scsi_ath_intr_port_attrs, 1791 + .ct_owner = THIS_MODULE, 1792 + }; 1793 + 1794 + /* 1795 + * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup 1796 + * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c 1797 + */ 1798 + void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) 1799 + { 1800 + struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; 1801 + 1802 + config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, 1803 + "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); 1804 + config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, 1805 + "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); 1806 + 1807 + ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; 1808 + ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; 1809 + ml_stat_grp->default_groups[2] = NULL; 1810 + }
+8
drivers/target/target_core_stat.h
··· 1 + #ifndef TARGET_CORE_STAT_H 2 + #define TARGET_CORE_STAT_H 3 + 4 + extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); 5 + extern void target_stat_setup_port_default_groups(struct se_lun *); 6 + extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); 7 + 8 + #endif /*** TARGET_CORE_STAT_H ***/
+16 -21
drivers/target/target_core_transport.c
··· 227 227 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 228 228 static void transport_stop_all_task_timers(struct se_cmd *cmd); 229 229 230 - int transport_emulate_control_cdb(struct se_task *task); 231 - 232 230 int init_se_global(void) 233 231 { 234 232 struct se_global *global; ··· 1620 1622 const char *inquiry_prod, 1621 1623 const char *inquiry_rev) 1622 1624 { 1623 - int ret = 0, force_pt; 1625 + int force_pt; 1624 1626 struct se_device *dev; 1625 1627 1626 1628 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); ··· 1737 1739 } 1738 1740 scsi_dump_inquiry(dev); 1739 1741 1742 + return dev; 1740 1743 out: 1741 - if (!ret) 1742 - return dev; 1743 1744 kthread_stop(dev->process_thread); 1744 1745 1745 1746 spin_lock(&hba->device_lock); ··· 4356 4359 printk(KERN_ERR "Unable to allocate struct se_mem\n"); 4357 4360 goto out; 4358 4361 } 4359 - INIT_LIST_HEAD(&se_mem->se_list); 4360 - se_mem->se_len = (length > dma_size) ? dma_size : length; 4361 4362 4362 4363 /* #warning FIXME Allocate contigous pages for struct se_mem elements */ 4363 - se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); 4364 + se_mem->se_page = alloc_pages(GFP_KERNEL, 0); 4364 4365 if (!(se_mem->se_page)) { 4365 4366 printk(KERN_ERR "alloc_pages() failed\n"); 4366 4367 goto out; ··· 4369 4374 printk(KERN_ERR "kmap_atomic() failed\n"); 4370 4375 goto out; 4371 4376 } 4377 + INIT_LIST_HEAD(&se_mem->se_list); 4378 + se_mem->se_len = (length > dma_size) ? dma_size : length; 4372 4379 memset(buf, 0, se_mem->se_len); 4373 4380 kunmap_atomic(buf, KM_IRQ0); 4374 4381 ··· 4389 4392 4390 4393 return 0; 4391 4394 out: 4395 + if (se_mem) 4396 + __free_pages(se_mem->se_page, 0); 4397 + kmem_cache_free(se_mem_cache, se_mem); 4392 4398 return -1; 4393 4399 } 4394 4400 4395 - extern u32 transport_calc_sg_num( 4401 + u32 transport_calc_sg_num( 4396 4402 struct se_task *task, 4397 4403 struct se_mem *in_se_mem, 4398 4404 u32 task_offset) ··· 5834 5834 int ret; 5835 5835 5836 5836 switch (tmr->function) { 5837 - case ABORT_TASK: 5837 + case TMR_ABORT_TASK: 5838 5838 ref_cmd = tmr->ref_cmd; 5839 5839 tmr->response = TMR_FUNCTION_REJECTED; 5840 5840 break; 5841 - case ABORT_TASK_SET: 5842 - case CLEAR_ACA: 5843 - case CLEAR_TASK_SET: 5841 + case TMR_ABORT_TASK_SET: 5842 + case TMR_CLEAR_ACA: 5843 + case TMR_CLEAR_TASK_SET: 5844 5844 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 5845 5845 break; 5846 - case LUN_RESET: 5846 + case TMR_LUN_RESET: 5847 5847 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 5848 5848 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 5849 5849 TMR_FUNCTION_REJECTED; 5850 5850 break; 5851 - #if 0 5852 - case TARGET_WARM_RESET: 5853 - transport_generic_host_reset(dev->se_hba); 5851 + case TMR_TARGET_WARM_RESET: 5854 5852 tmr->response = TMR_FUNCTION_REJECTED; 5855 5853 break; 5856 - case TARGET_COLD_RESET: 5857 - transport_generic_host_reset(dev->se_hba); 5858 - transport_generic_cold_reset(dev->se_hba); 5854 + case TMR_TARGET_COLD_RESET: 5859 5855 tmr->response = TMR_FUNCTION_REJECTED; 5860 5856 break; 5861 - #endif 5862 5857 default: 5863 5858 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", 5864 5859 tmr->function);
+1
include/scsi/libiscsi_tcp.h
··· 47 47 struct scatterlist *sg; 48 48 void *sg_mapped; 49 49 unsigned int sg_offset; 50 + bool atomic_mapped; 50 51 51 52 iscsi_segment_done_fn_t *done; 52 53 };
+1 -1
include/scsi/scsi_device.h
··· 462 462 } 463 463 static inline int scsi_device_enclosure(struct scsi_device *sdev) 464 464 { 465 - return sdev->inquiry[6] & (1<<6); 465 + return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; 466 466 } 467 467 468 468 static inline int scsi_device_protection(struct scsi_device *sdev)
+52 -21
include/target/target_core_base.h
··· 9 9 #include <net/sock.h> 10 10 #include <net/tcp.h> 11 11 12 - #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" 12 + #define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" 13 13 #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) 14 14 15 15 /* Used by transport_generic_allocate_iovecs() */ ··· 239 239 } ____cacheline_aligned; 240 240 241 241 struct t10_alua_lu_gp_member { 242 - int lu_gp_assoc:1; 242 + bool lu_gp_assoc; 243 243 atomic_t lu_gp_mem_ref_cnt; 244 244 spinlock_t lu_gp_mem_lock; 245 245 struct t10_alua_lu_gp *lu_gp; ··· 271 271 } ____cacheline_aligned; 272 272 273 273 struct t10_alua_tg_pt_gp_member { 274 - int tg_pt_gp_assoc:1; 274 + bool tg_pt_gp_assoc; 275 275 atomic_t tg_pt_gp_mem_ref_cnt; 276 276 spinlock_t tg_pt_gp_mem_lock; 277 277 struct t10_alua_tg_pt_gp *tg_pt_gp; ··· 336 336 int pr_res_type; 337 337 int pr_res_scope; 338 338 /* Used for fabric initiator WWPNs using a ISID */ 339 - int isid_present_at_reg:1; 339 + bool isid_present_at_reg; 340 340 u32 pr_res_mapped_lun; 341 341 u32 pr_aptpl_target_lun; 342 342 u32 pr_res_generation; ··· 418 418 unsigned long long t_task_lba; 419 419 int t_tasks_failed; 420 420 int t_tasks_fua; 421 - int t_tasks_bidi:1; 421 + bool t_tasks_bidi; 422 422 u32 t_task_cdbs; 423 423 u32 t_tasks_check; 424 424 u32 t_tasks_no; ··· 470 470 u8 task_flags; 471 471 int task_error_status; 472 472 int task_state_flags; 473 - int task_padded_sg:1; 473 + bool task_padded_sg; 474 474 unsigned long long task_lba; 475 475 u32 task_no; 476 476 u32 task_sectors; ··· 494 494 struct list_head t_state_list; 495 495 } ____cacheline_aligned; 496 496 497 - #define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd) 498 - #define TASK_DEV(task) ((struct se_device *)task->se_dev) 497 + #define TASK_CMD(task) ((task)->task_se_cmd) 498 + #define TASK_DEV(task) ((task)->se_dev) 499 499 500 500 struct se_cmd { 501 501 /* SAM response code being sent to initiator */ ··· 551 551 void (*transport_complete_callback)(struct se_cmd *); 552 552 } ____cacheline_aligned; 553 553 554 - #define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task)) 555 - #define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo) 554 + #define T_TASK(cmd) ((cmd)->t_task) 555 + #define CMD_TFO(cmd) ((cmd)->se_tfo) 556 556 557 557 struct se_tmr_req { 558 558 /* Task Management function to be preformed */ ··· 583 583 struct se_node_acl { 584 584 char initiatorname[TRANSPORT_IQN_LEN]; 585 585 /* Used to signal demo mode created ACL, disabled by default */ 586 - int dynamic_node_acl:1; 586 + bool dynamic_node_acl; 587 587 u32 queue_depth; 588 588 u32 acl_index; 589 589 u64 num_cmds; ··· 601 601 struct config_group acl_attrib_group; 602 602 struct config_group acl_auth_group; 603 603 struct config_group acl_param_group; 604 - struct config_group *acl_default_groups[4]; 604 + struct config_group acl_fabric_stat_group; 605 + struct config_group *acl_default_groups[5]; 605 606 struct list_head acl_list; 606 607 struct list_head acl_sess_list; 607 608 } ____cacheline_aligned; ··· 616 615 struct list_head sess_acl_list; 617 616 } ____cacheline_aligned; 618 617 619 - #define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess) 620 - #define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl) 618 + #define SE_SESS(cmd) ((cmd)->se_sess) 619 + #define SE_NODE_ACL(sess) ((sess)->se_node_acl) 621 620 622 621 struct se_device; 623 622 struct se_transform_info; 624 623 struct scatterlist; 624 + 625 + struct se_ml_stat_grps { 626 + struct config_group stat_group; 627 + struct config_group scsi_auth_intr_group; 628 + struct config_group scsi_att_intr_port_group; 629 + }; 625 630 626 631 struct se_lun_acl { 627 632 char initiatorname[TRANSPORT_IQN_LEN]; ··· 636 629 struct se_lun *se_lun; 637 630 struct list_head lacl_list; 638 631 struct config_group se_lun_group; 632 + struct se_ml_stat_grps ml_stat_grps; 639 633 } ____cacheline_aligned; 640 634 635 + #define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps) 636 + 641 637 struct se_dev_entry { 642 - int def_pr_registered:1; 638 + bool def_pr_registered; 643 639 /* See transport_lunflags_table */ 644 640 u32 lun_flags; 645 641 u32 deve_cmds; ··· 703 693 struct config_group da_group; 704 694 } ____cacheline_aligned; 705 695 696 + struct se_dev_stat_grps { 697 + struct config_group stat_group; 698 + struct config_group scsi_dev_group; 699 + struct config_group scsi_tgt_dev_group; 700 + struct config_group scsi_lu_group; 701 + }; 702 + 706 703 struct se_subsystem_dev { 707 704 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ 708 705 #define SE_DEV_ALIAS_LEN 512 ··· 733 716 struct config_group se_dev_group; 734 717 /* For T10 Reservations */ 735 718 struct config_group se_dev_pr_group; 719 + /* For target_core_stat.c groups */ 720 + struct se_dev_stat_grps dev_stat_grps; 736 721 } ____cacheline_aligned; 737 722 738 723 #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) 739 724 #define T10_RES(su_dev) (&(su_dev)->t10_reservation) 740 725 #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) 726 + #define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps) 741 727 742 728 struct se_device { 743 729 /* Set to 1 if thread is NOT sleeping on thread_sem */ ··· 823 803 struct list_head g_se_dev_list; 824 804 } ____cacheline_aligned; 825 805 826 - #define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev) 827 - #define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev) 806 + #define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev) 807 + #define SU_DEV(dev) ((dev)->se_sub_dev) 828 808 #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) 829 809 #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) 830 810 ··· 852 832 struct se_subsystem_api *transport; 853 833 } ____cacheline_aligned; 854 834 855 - #define SE_HBA(d) ((struct se_hba *)(d)->se_hba) 835 + #define SE_HBA(dev) ((dev)->se_hba) 836 + 837 + struct se_port_stat_grps { 838 + struct config_group stat_group; 839 + struct config_group scsi_port_group; 840 + struct config_group scsi_tgt_port_group; 841 + struct config_group scsi_transport_group; 842 + }; 856 843 857 844 struct se_lun { 858 845 /* See transport_lun_status_table */ ··· 875 848 struct list_head lun_cmd_list; 876 849 struct list_head lun_acl_list; 877 850 struct se_device *lun_se_dev; 851 + struct se_port *lun_sep; 878 852 struct config_group lun_group; 879 - struct se_port *lun_sep; 853 + struct se_port_stat_grps port_stat_grps; 880 854 } ____cacheline_aligned; 881 855 882 - #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) 856 + #define SE_LUN(cmd) ((cmd)->se_lun) 857 + #define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps) 883 858 884 859 struct scsi_port_stats { 885 860 u64 cmd_pdus; ··· 948 919 struct config_group tpg_param_group; 949 920 } ____cacheline_aligned; 950 921 951 - #define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo) 922 + #define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo) 952 923 953 924 struct se_wwn { 954 925 struct target_fabric_configfs *wwn_tf; 955 926 struct config_group wwn_group; 927 + struct config_group *wwn_default_groups[2]; 928 + struct config_group fabric_stat_group; 956 929 } ____cacheline_aligned; 957 930 958 931 struct se_global {
+4
include/target/target_core_configfs.h
··· 14 14 struct target_fabric_configfs_template { 15 15 struct config_item_type tfc_discovery_cit; 16 16 struct config_item_type tfc_wwn_cit; 17 + struct config_item_type tfc_wwn_fabric_stats_cit; 17 18 struct config_item_type tfc_tpg_cit; 18 19 struct config_item_type tfc_tpg_base_cit; 19 20 struct config_item_type tfc_tpg_lun_cit; 20 21 struct config_item_type tfc_tpg_port_cit; 22 + struct config_item_type tfc_tpg_port_stat_cit; 21 23 struct config_item_type tfc_tpg_np_cit; 22 24 struct config_item_type tfc_tpg_np_base_cit; 23 25 struct config_item_type tfc_tpg_attrib_cit; ··· 29 27 struct config_item_type tfc_tpg_nacl_attrib_cit; 30 28 struct config_item_type tfc_tpg_nacl_auth_cit; 31 29 struct config_item_type tfc_tpg_nacl_param_cit; 30 + struct config_item_type tfc_tpg_nacl_stat_cit; 32 31 struct config_item_type tfc_tpg_mappedlun_cit; 32 + struct config_item_type tfc_tpg_mappedlun_stat_cit; 33 33 }; 34 34 35 35 struct target_fabric_configfs {
+1 -1
include/target/target_core_fabric_ops.h
··· 8 8 * for scatterlist chaining using transport_do_task_sg_link(), 9 9 * disabled by default 10 10 */ 11 - int task_sg_chaining:1; 11 + bool task_sg_chaining; 12 12 char *(*get_fabric_name)(void); 13 13 u8 (*get_fabric_proto_ident)(struct se_portal_group *); 14 14 char *(*tpg_get_wwn)(struct se_portal_group *);
+22 -30
include/target/target_core_tmr.h
··· 1 1 #ifndef TARGET_CORE_TMR_H 2 2 #define TARGET_CORE_TMR_H 3 3 4 - /* task management function values */ 5 - #ifdef ABORT_TASK 6 - #undef ABORT_TASK 7 - #endif /* ABORT_TASK */ 8 - #define ABORT_TASK 1 9 - #ifdef ABORT_TASK_SET 10 - #undef ABORT_TASK_SET 11 - #endif /* ABORT_TASK_SET */ 12 - #define ABORT_TASK_SET 2 13 - #ifdef CLEAR_ACA 14 - #undef CLEAR_ACA 15 - #endif /* CLEAR_ACA */ 16 - #define CLEAR_ACA 3 17 - #ifdef CLEAR_TASK_SET 18 - #undef CLEAR_TASK_SET 19 - #endif /* CLEAR_TASK_SET */ 20 - #define CLEAR_TASK_SET 4 21 - #define LUN_RESET 5 22 - #define TARGET_WARM_RESET 6 23 - #define TARGET_COLD_RESET 7 24 - #define TASK_REASSIGN 8 4 + /* fabric independent task management function values */ 5 + enum tcm_tmreq_table { 6 + TMR_ABORT_TASK = 1, 7 + TMR_ABORT_TASK_SET = 2, 8 + TMR_CLEAR_ACA = 3, 9 + TMR_CLEAR_TASK_SET = 4, 10 + TMR_LUN_RESET = 5, 11 + TMR_TARGET_WARM_RESET = 6, 12 + TMR_TARGET_COLD_RESET = 7, 13 + TMR_FABRIC_TMR = 255, 14 + }; 25 15 26 - /* task management response values */ 27 - #define TMR_FUNCTION_COMPLETE 0 28 - #define TMR_TASK_DOES_NOT_EXIST 1 29 - #define TMR_LUN_DOES_NOT_EXIST 2 30 - #define TMR_TASK_STILL_ALLEGIANT 3 31 - #define TMR_TASK_FAILOVER_NOT_SUPPORTED 4 32 - #define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED 5 33 - #define TMR_FUNCTION_AUTHORIZATION_FAILED 6 34 - #define TMR_FUNCTION_REJECTED 255 16 + /* fabric independent task management response values */ 17 + enum tcm_tmrsp_table { 18 + TMR_FUNCTION_COMPLETE = 0, 19 + TMR_TASK_DOES_NOT_EXIST = 1, 20 + TMR_LUN_DOES_NOT_EXIST = 2, 21 + TMR_TASK_STILL_ALLEGIANT = 3, 22 + TMR_TASK_FAILOVER_NOT_SUPPORTED = 4, 23 + TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5, 24 + TMR_FUNCTION_AUTHORIZATION_FAILED = 6, 25 + TMR_FUNCTION_REJECTED = 255, 26 + }; 35 27 36 28 extern struct kmem_cache *se_tmr_req_cache; 37 29
+4
include/target/target_core_transport.h
··· 109 109 struct se_mem; 110 110 struct se_subsystem_api; 111 111 112 + extern struct kmem_cache *se_mem_cache; 113 + 112 114 extern int init_se_global(void); 113 115 extern void release_se_global(void); 114 116 extern void init_scsi_index_table(void); ··· 192 190 extern int transport_generic_do_tmr(struct se_cmd *); 193 191 /* From target_core_alua.c */ 194 192 extern int core_alua_check_nonop_delay(struct se_cmd *); 193 + /* From target_core_cdb.c */ 194 + extern int transport_emulate_control_cdb(struct se_task *); 195 195 196 196 /* 197 197 * Each se_transport_task_t can have N number of possible struct se_task's